2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_defer.h"
28 #include "xfs_da_format.h"
29 #include "xfs_da_btree.h"
31 #include "xfs_inode.h"
32 #include "xfs_btree.h"
33 #include "xfs_trans.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_extfree_item.h"
36 #include "xfs_alloc.h"
38 #include "xfs_bmap_util.h"
39 #include "xfs_bmap_btree.h"
40 #include "xfs_rtalloc.h"
41 #include "xfs_error.h"
42 #include "xfs_quota.h"
43 #include "xfs_trans_space.h"
44 #include "xfs_buf_item.h"
45 #include "xfs_trace.h"
46 #include "xfs_symlink.h"
47 #include "xfs_attr_leaf.h"
48 #include "xfs_filestream.h"
50 #include "xfs_ag_resv.h"
51 #include "xfs_refcount.h"
52 #include "xfs_icache.h"
55 kmem_zone_t *xfs_bmap_free_item_zone;
58 * Miscellaneous helper functions
62 * Compute and fill in the value of the maximum depth of a bmap btree
63 * in this filesystem. Done once, during mount.
66 xfs_bmap_compute_maxlevels(
67 xfs_mount_t *mp, /* file system mount structure */
68 int whichfork) /* data or attr fork */
70 int level; /* btree level */
71 uint maxblocks; /* max blocks at this level */
72 uint maxleafents; /* max leaf entries possible */
73 int maxrootrecs; /* max records in root block */
74 int minleafrecs; /* min records in leaf block */
75 int minnoderecs; /* min records in node block */
76 int sz; /* root block size */
79 * The maximum number of extents in a file, hence the maximum
80 * number of leaf entries, is controlled by the type of di_nextents
81 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
82 * (a signed 16-bit number, xfs_aextnum_t).
84 * Note that we can no longer assume that if we are in ATTR1 that
85 * the fork offset of all the inodes will be
86 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
87 * with ATTR2 and then mounted back with ATTR1, keeping the
88 * di_forkoff's fixed but probably at various positions. Therefore,
89 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
90 * of a minimum size available.
92 if (whichfork == XFS_DATA_FORK) {
93 maxleafents = MAXEXTNUM;
94 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
96 maxleafents = MAXAEXTNUM;
97 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
99 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
100 minleafrecs = mp->m_bmap_dmnr[0];
101 minnoderecs = mp->m_bmap_dmnr[1];
102 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
103 for (level = 1; maxblocks > 1; level++) {
104 if (maxblocks <= maxrootrecs)
107 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
109 mp->m_bm_maxlevels[whichfork] = level;
112 STATIC int /* error */
114 struct xfs_btree_cur *cur,
118 int *stat) /* success/failure */
120 cur->bc_rec.b.br_startoff = off;
121 cur->bc_rec.b.br_startblock = bno;
122 cur->bc_rec.b.br_blockcount = len;
123 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
126 STATIC int /* error */
128 struct xfs_btree_cur *cur,
132 int *stat) /* success/failure */
134 cur->bc_rec.b.br_startoff = off;
135 cur->bc_rec.b.br_startblock = bno;
136 cur->bc_rec.b.br_blockcount = len;
137 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
141 * Check if the inode needs to be converted to btree format.
143 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
145 return whichfork != XFS_COW_FORK &&
146 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
147 XFS_IFORK_NEXTENTS(ip, whichfork) >
148 XFS_IFORK_MAXEXT(ip, whichfork);
152 * Check if the inode should be converted to extent format.
154 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
156 return whichfork != XFS_COW_FORK &&
157 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
158 XFS_IFORK_NEXTENTS(ip, whichfork) <=
159 XFS_IFORK_MAXEXT(ip, whichfork);
163 * Update the record referred to by cur to the value given
164 * by [off, bno, len, state].
165 * This either works (return 0) or gets an EFSCORRUPTED error.
169 struct xfs_btree_cur *cur,
175 union xfs_btree_rec rec;
177 xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
178 return xfs_btree_update(cur, &rec);
182 * Compute the worst-case number of indirect blocks that will be used
183 * for ip's delayed extent of length "len".
186 xfs_bmap_worst_indlen(
187 xfs_inode_t *ip, /* incore inode pointer */
188 xfs_filblks_t len) /* delayed extent length */
190 int level; /* btree level number */
191 int maxrecs; /* maximum record count at this level */
192 xfs_mount_t *mp; /* mount structure */
193 xfs_filblks_t rval; /* return value */
196 maxrecs = mp->m_bmap_dmxr[0];
197 for (level = 0, rval = 0;
198 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
201 do_div(len, maxrecs);
204 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
207 maxrecs = mp->m_bmap_dmxr[1];
213 * Calculate the default attribute fork offset for newly created inodes.
216 xfs_default_attroffset(
217 struct xfs_inode *ip)
219 struct xfs_mount *mp = ip->i_mount;
222 if (mp->m_sb.sb_inodesize == 256) {
223 offset = XFS_LITINO(mp, ip->i_d.di_version) -
224 XFS_BMDR_SPACE_CALC(MINABTPTRS);
226 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
229 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
234 * Helper routine to reset inode di_forkoff field when switching
235 * attribute fork from local to extent format - we reset it where
236 * possible to make space available for inline data fork extents.
239 xfs_bmap_forkoff_reset(
243 if (whichfork == XFS_ATTR_FORK &&
244 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
245 ip->i_d.di_format != XFS_DINODE_FMT_UUID &&
246 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
247 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
249 if (dfl_forkoff > ip->i_d.di_forkoff)
250 ip->i_d.di_forkoff = dfl_forkoff;
255 STATIC struct xfs_buf *
257 struct xfs_btree_cur *cur,
260 struct xfs_log_item_desc *lidp;
266 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
267 if (!cur->bc_bufs[i])
269 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
270 return cur->bc_bufs[i];
273 /* Chase down all the log items to see if the bp is there */
274 list_for_each_entry(lidp, &cur->bc_tp->t_items, lid_trans) {
275 struct xfs_buf_log_item *bip;
276 bip = (struct xfs_buf_log_item *)lidp->lid_item;
277 if (bip->bli_item.li_type == XFS_LI_BUF &&
278 XFS_BUF_ADDR(bip->bli_buf) == bno)
287 struct xfs_btree_block *block,
293 __be64 *pp, *thispa; /* pointer to block address */
294 xfs_bmbt_key_t *prevp, *keyp;
296 ASSERT(be16_to_cpu(block->bb_level) > 0);
299 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
300 dmxr = mp->m_bmap_dmxr[0];
301 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
304 ASSERT(be64_to_cpu(prevp->br_startoff) <
305 be64_to_cpu(keyp->br_startoff));
310 * Compare the block numbers to see if there are dups.
313 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
315 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
317 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
319 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
321 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
322 if (*thispa == *pp) {
323 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
325 (unsigned long long)be64_to_cpu(*thispa));
326 panic("%s: ptrs are equal in node\n",
334 * Check that the extents for the inode ip are in the right order in all
335 * btree leaves. THis becomes prohibitively expensive for large extent count
336 * files, so don't bother with inodes that have more than 10,000 extents in
337 * them. The btree record ordering checks will still be done, so for such large
338 * bmapbt constructs that is going to catch most corruptions.
341 xfs_bmap_check_leaf_extents(
342 xfs_btree_cur_t *cur, /* btree cursor or null */
343 xfs_inode_t *ip, /* incore inode pointer */
344 int whichfork) /* data or attr fork */
346 struct xfs_btree_block *block; /* current btree block */
347 xfs_fsblock_t bno; /* block # of "block" */
348 xfs_buf_t *bp; /* buffer for "block" */
349 int error; /* error return value */
350 xfs_extnum_t i=0, j; /* index into the extents list */
351 xfs_ifork_t *ifp; /* fork structure */
352 int level; /* btree level, for checking */
353 xfs_mount_t *mp; /* file system mount structure */
354 __be64 *pp; /* pointer to block address */
355 xfs_bmbt_rec_t *ep; /* pointer to current extent */
356 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
357 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
360 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
364 /* skip large extent count inodes */
365 if (ip->i_d.di_nextents > 10000)
370 ifp = XFS_IFORK_PTR(ip, whichfork);
371 block = ifp->if_broot;
373 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
375 level = be16_to_cpu(block->bb_level);
377 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
378 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
379 bno = be64_to_cpu(*pp);
381 ASSERT(bno != NULLFSBLOCK);
382 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
383 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
386 * Go down the tree until leaf level is reached, following the first
387 * pointer (leftmost) at each level.
389 while (level-- > 0) {
390 /* See if buf is in cur first */
392 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
395 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
401 block = XFS_BUF_TO_BLOCK(bp);
406 * Check this block for basic sanity (increasing keys and
407 * no duplicate blocks).
410 xfs_check_block(block, mp, 0, 0);
411 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
412 bno = be64_to_cpu(*pp);
413 XFS_WANT_CORRUPTED_GOTO(mp,
414 XFS_FSB_SANITY_CHECK(mp, bno), error0);
417 xfs_trans_brelse(NULL, bp);
422 * Here with bp and block set to the leftmost leaf node in the tree.
427 * Loop over all leaf nodes checking that all extents are in the right order.
430 xfs_fsblock_t nextbno;
431 xfs_extnum_t num_recs;
434 num_recs = xfs_btree_get_numrecs(block);
437 * Read-ahead the next leaf block, if any.
440 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
443 * Check all the extents to make sure they are OK.
444 * If we had a previous block, the last entry should
445 * conform with the first entry in this one.
448 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
450 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
451 xfs_bmbt_disk_get_blockcount(&last) <=
452 xfs_bmbt_disk_get_startoff(ep));
454 for (j = 1; j < num_recs; j++) {
455 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
456 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
457 xfs_bmbt_disk_get_blockcount(ep) <=
458 xfs_bmbt_disk_get_startoff(nextp));
466 xfs_trans_brelse(NULL, bp);
470 * If we've reached the end, stop.
472 if (bno == NULLFSBLOCK)
476 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
479 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
485 block = XFS_BUF_TO_BLOCK(bp);
491 xfs_warn(mp, "%s: at error0", __func__);
493 xfs_trans_brelse(NULL, bp);
495 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
497 panic("%s: CORRUPTED BTREE OR SOMETHING", __func__);
502 * Add bmap trace insert entries for all the contents of the extent records.
505 xfs_bmap_trace_exlist(
506 xfs_inode_t *ip, /* incore inode pointer */
507 xfs_extnum_t cnt, /* count of entries in the list */
508 int whichfork, /* data or attr or cow fork */
509 unsigned long caller_ip)
511 xfs_extnum_t idx; /* extent record index */
512 xfs_ifork_t *ifp; /* inode fork pointer */
515 if (whichfork == XFS_ATTR_FORK)
516 state |= BMAP_ATTRFORK;
517 else if (whichfork == XFS_COW_FORK)
518 state |= BMAP_COWFORK;
520 ifp = XFS_IFORK_PTR(ip, whichfork);
521 ASSERT(cnt == xfs_iext_count(ifp));
522 for (idx = 0; idx < cnt; idx++)
523 trace_xfs_extlist(ip, idx, state, caller_ip);
527 * Validate that the bmbt_irecs being returned from bmapi are valid
528 * given the caller's original parameters. Specifically check the
529 * ranges of the returned irecs to ensure that they only extend beyond
530 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
533 xfs_bmap_validate_ret(
537 xfs_bmbt_irec_t *mval,
541 int i; /* index to map values */
543 ASSERT(ret_nmap <= nmap);
545 for (i = 0; i < ret_nmap; i++) {
546 ASSERT(mval[i].br_blockcount > 0);
547 if (!(flags & XFS_BMAPI_ENTIRE)) {
548 ASSERT(mval[i].br_startoff >= bno);
549 ASSERT(mval[i].br_blockcount <= len);
550 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
553 ASSERT(mval[i].br_startoff < bno + len);
554 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
558 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
559 mval[i].br_startoff);
560 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
561 mval[i].br_startblock != HOLESTARTBLOCK);
562 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
563 mval[i].br_state == XFS_EXT_UNWRITTEN);
568 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
569 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
573 * bmap free list manipulation functions
577 * Add the extent to the list of extents to be free at transaction end.
578 * The list is maintained sorted (by block number).
582 struct xfs_mount *mp,
583 struct xfs_defer_ops *dfops,
586 struct xfs_owner_info *oinfo)
588 struct xfs_extent_free_item *new; /* new element */
593 ASSERT(bno != NULLFSBLOCK);
595 ASSERT(len <= MAXEXTLEN);
596 ASSERT(!isnullstartblock(bno));
597 agno = XFS_FSB_TO_AGNO(mp, bno);
598 agbno = XFS_FSB_TO_AGBNO(mp, bno);
599 ASSERT(agno < mp->m_sb.sb_agcount);
600 ASSERT(agbno < mp->m_sb.sb_agblocks);
601 ASSERT(len < mp->m_sb.sb_agblocks);
602 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
604 ASSERT(xfs_bmap_free_item_zone != NULL);
606 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
607 new->xefi_startblock = bno;
608 new->xefi_blockcount = (xfs_extlen_t)len;
610 new->xefi_oinfo = *oinfo;
612 xfs_rmap_skip_owner_update(&new->xefi_oinfo);
613 trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
614 XFS_FSB_TO_AGBNO(mp, bno), len);
615 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
619 * Inode fork format manipulation functions
623 * Transform a btree format file with only one leaf node, where the
624 * extents list will fit in the inode, into an extents format file.
625 * Since the file extents are already in-core, all we have to do is
626 * give up the space for the btree root and pitch the leaf block.
628 STATIC int /* error */
629 xfs_bmap_btree_to_extents(
630 xfs_trans_t *tp, /* transaction pointer */
631 xfs_inode_t *ip, /* incore inode pointer */
632 xfs_btree_cur_t *cur, /* btree cursor */
633 int *logflagsp, /* inode logging flags */
634 int whichfork) /* data or attr fork */
637 struct xfs_btree_block *cblock;/* child btree block */
638 xfs_fsblock_t cbno; /* child block number */
639 xfs_buf_t *cbp; /* child block's buffer */
640 int error; /* error return value */
641 xfs_ifork_t *ifp; /* inode fork data */
642 xfs_mount_t *mp; /* mount point structure */
643 __be64 *pp; /* ptr to block address */
644 struct xfs_btree_block *rblock;/* root btree block */
645 struct xfs_owner_info oinfo;
648 ifp = XFS_IFORK_PTR(ip, whichfork);
649 ASSERT(whichfork != XFS_COW_FORK);
650 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
651 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
652 rblock = ifp->if_broot;
653 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
654 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
655 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
656 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
657 cbno = be64_to_cpu(*pp);
660 if ((error = xfs_btree_check_lptr(cur, cbno, 1)))
663 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
667 cblock = XFS_BUF_TO_BLOCK(cbp);
668 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
670 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
671 xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
672 ip->i_d.di_nblocks--;
673 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
674 xfs_trans_binval(tp, cbp);
675 if (cur->bc_bufs[0] == cbp)
676 cur->bc_bufs[0] = NULL;
677 xfs_iroot_realloc(ip, -1, whichfork);
678 ASSERT(ifp->if_broot == NULL);
679 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
680 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
681 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
686 * Convert an extents-format file into a btree-format file.
687 * The new file will have a root block (in the inode) and a single child block.
689 STATIC int /* error */
690 xfs_bmap_extents_to_btree(
691 xfs_trans_t *tp, /* transaction pointer */
692 xfs_inode_t *ip, /* incore inode pointer */
693 xfs_fsblock_t *firstblock, /* first-block-allocated */
694 struct xfs_defer_ops *dfops, /* blocks freed in xaction */
695 xfs_btree_cur_t **curp, /* cursor returned to caller */
696 int wasdel, /* converting a delayed alloc */
697 int *logflagsp, /* inode logging flags */
698 int whichfork) /* data or attr fork */
700 struct xfs_btree_block *ablock; /* allocated (child) bt block */
701 xfs_buf_t *abp; /* buffer for ablock */
702 xfs_alloc_arg_t args; /* allocation arguments */
703 xfs_bmbt_rec_t *arp; /* child record pointer */
704 struct xfs_btree_block *block; /* btree root block */
705 xfs_btree_cur_t *cur; /* bmap btree cursor */
706 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
707 int error; /* error return value */
708 xfs_extnum_t i, cnt; /* extent record index */
709 xfs_ifork_t *ifp; /* inode fork pointer */
710 xfs_bmbt_key_t *kp; /* root block key pointer */
711 xfs_mount_t *mp; /* mount structure */
712 xfs_extnum_t nextents; /* number of file extents */
713 xfs_bmbt_ptr_t *pp; /* root block address pointer */
716 ASSERT(whichfork != XFS_COW_FORK);
717 ifp = XFS_IFORK_PTR(ip, whichfork);
718 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
721 * Make space in the inode incore.
723 xfs_iroot_realloc(ip, 1, whichfork);
724 ifp->if_flags |= XFS_IFBROOT;
729 block = ifp->if_broot;
730 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
731 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
732 XFS_BTREE_LONG_PTRS);
734 * Need a cursor. Can't allocate until bb_level is filled in.
736 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
737 cur->bc_private.b.firstblock = *firstblock;
738 cur->bc_private.b.dfops = dfops;
739 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
741 * Convert to a btree with two levels, one record in root.
743 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
744 memset(&args, 0, sizeof(args));
747 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
748 args.firstblock = *firstblock;
749 if (*firstblock == NULLFSBLOCK) {
750 args.type = XFS_ALLOCTYPE_START_BNO;
751 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
752 } else if (dfops->dop_low) {
753 args.type = XFS_ALLOCTYPE_START_BNO;
754 args.fsbno = *firstblock;
756 args.type = XFS_ALLOCTYPE_NEAR_BNO;
757 args.fsbno = *firstblock;
759 args.minlen = args.maxlen = args.prod = 1;
760 args.wasdel = wasdel;
762 if ((error = xfs_alloc_vextent(&args))) {
763 xfs_iroot_realloc(ip, -1, whichfork);
764 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
768 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
769 xfs_iroot_realloc(ip, -1, whichfork);
770 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
774 * Allocation can't fail, the space was reserved.
776 ASSERT(*firstblock == NULLFSBLOCK ||
777 args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
778 *firstblock = cur->bc_private.b.firstblock = args.fsbno;
779 cur->bc_private.b.allocated++;
780 ip->i_d.di_nblocks++;
781 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
782 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
784 * Fill in the child block.
786 abp->b_ops = &xfs_bmbt_buf_ops;
787 ablock = XFS_BUF_TO_BLOCK(abp);
788 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
789 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
790 XFS_BTREE_LONG_PTRS);
792 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
793 nextents = xfs_iext_count(ifp);
794 for (cnt = i = 0; i < nextents; i++) {
795 ep = xfs_iext_get_ext(ifp, i);
796 if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
797 arp->l0 = cpu_to_be64(ep->l0);
798 arp->l1 = cpu_to_be64(ep->l1);
802 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
803 xfs_btree_set_numrecs(ablock, cnt);
806 * Fill in the root key and pointer.
808 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
809 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
810 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
811 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
812 be16_to_cpu(block->bb_level)));
813 *pp = cpu_to_be64(args.fsbno);
816 * Do all this logging at the end so that
817 * the root is at the right level.
819 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
820 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
821 ASSERT(*curp == NULL);
823 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
828 * Convert a local file to an extents file.
829 * This code is out of bounds for data forks of regular files,
830 * since the file data needs to get logged so things will stay consistent.
831 * (The bmap-level manipulations are ok, though).
834 xfs_bmap_local_to_extents_empty(
835 struct xfs_inode *ip,
838 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
840 ASSERT(whichfork != XFS_COW_FORK);
841 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
842 ASSERT(ifp->if_bytes == 0);
843 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
845 xfs_bmap_forkoff_reset(ip, whichfork);
846 ifp->if_flags &= ~XFS_IFINLINE;
847 ifp->if_flags |= XFS_IFEXTENTS;
848 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
852 STATIC int /* error */
853 xfs_bmap_local_to_extents(
854 xfs_trans_t *tp, /* transaction pointer */
855 xfs_inode_t *ip, /* incore inode pointer */
856 xfs_fsblock_t *firstblock, /* first block allocated in xaction */
857 xfs_extlen_t total, /* total blocks needed by transaction */
858 int *logflagsp, /* inode logging flags */
860 void (*init_fn)(struct xfs_trans *tp,
862 struct xfs_inode *ip,
863 struct xfs_ifork *ifp))
866 int flags; /* logging flags returned */
867 xfs_ifork_t *ifp; /* inode fork pointer */
868 xfs_alloc_arg_t args; /* allocation arguments */
869 xfs_buf_t *bp; /* buffer for extent block */
870 struct xfs_bmbt_irec rec;
873 * We don't want to deal with the case of keeping inode data inline yet.
874 * So sending the data fork of a regular inode is invalid.
876 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
877 ifp = XFS_IFORK_PTR(ip, whichfork);
878 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
880 if (!ifp->if_bytes) {
881 xfs_bmap_local_to_extents_empty(ip, whichfork);
882 flags = XFS_ILOG_CORE;
888 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) ==
890 memset(&args, 0, sizeof(args));
892 args.mp = ip->i_mount;
893 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
894 args.firstblock = *firstblock;
896 * Allocate a block. We know we need only one, since the
897 * file currently fits in an inode.
899 if (*firstblock == NULLFSBLOCK) {
900 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
901 args.type = XFS_ALLOCTYPE_START_BNO;
903 args.fsbno = *firstblock;
904 args.type = XFS_ALLOCTYPE_NEAR_BNO;
907 args.minlen = args.maxlen = args.prod = 1;
908 error = xfs_alloc_vextent(&args);
912 /* Can't fail, the space was reserved. */
913 ASSERT(args.fsbno != NULLFSBLOCK);
914 ASSERT(args.len == 1);
915 *firstblock = args.fsbno;
916 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
919 * Initialize the block, copy the data and log the remote buffer.
921 * The callout is responsible for logging because the remote format
922 * might differ from the local format and thus we don't know how much to
923 * log here. Note that init_fn must also set the buffer log item type
926 init_fn(tp, bp, ip, ifp);
928 /* account for the change in fork size */
929 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
930 xfs_bmap_local_to_extents_empty(ip, whichfork);
931 flags |= XFS_ILOG_CORE;
934 rec.br_startblock = args.fsbno;
935 rec.br_blockcount = 1;
936 rec.br_state = XFS_EXT_NORM;
937 xfs_iext_insert(ip, 0, 1, &rec, 0);
939 trace_xfs_bmap_post_update(ip, 0,
940 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0,
942 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
943 ip->i_d.di_nblocks = 1;
944 xfs_trans_mod_dquot_byino(tp, ip,
945 XFS_TRANS_DQ_BCOUNT, 1L);
946 flags |= xfs_ilog_fext(whichfork);
954 * Called from xfs_bmap_add_attrfork to handle btree format files.
956 STATIC int /* error */
957 xfs_bmap_add_attrfork_btree(
958 xfs_trans_t *tp, /* transaction pointer */
959 xfs_inode_t *ip, /* incore inode pointer */
960 xfs_fsblock_t *firstblock, /* first block allocated */
961 struct xfs_defer_ops *dfops, /* blocks to free at commit */
962 int *flags) /* inode logging flags */
964 xfs_btree_cur_t *cur; /* btree cursor */
965 int error; /* error return value */
966 xfs_mount_t *mp; /* file system mount struct */
967 int stat; /* newroot status */
970 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
971 *flags |= XFS_ILOG_DBROOT;
973 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
974 cur->bc_private.b.dfops = dfops;
975 cur->bc_private.b.firstblock = *firstblock;
976 if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
978 /* must be at least one entry */
979 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
980 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
983 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
986 *firstblock = cur->bc_private.b.firstblock;
987 cur->bc_private.b.allocated = 0;
988 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
992 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
997 * Called from xfs_bmap_add_attrfork to handle extents format files.
999 STATIC int /* error */
1000 xfs_bmap_add_attrfork_extents(
1001 xfs_trans_t *tp, /* transaction pointer */
1002 xfs_inode_t *ip, /* incore inode pointer */
1003 xfs_fsblock_t *firstblock, /* first block allocated */
1004 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1005 int *flags) /* inode logging flags */
1007 xfs_btree_cur_t *cur; /* bmap btree cursor */
1008 int error; /* error return value */
1010 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
1013 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
1014 flags, XFS_DATA_FORK);
1016 cur->bc_private.b.allocated = 0;
1017 xfs_btree_del_cursor(cur,
1018 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
1024 * Called from xfs_bmap_add_attrfork to handle local format files. Each
1025 * different data fork content type needs a different callout to do the
1026 * conversion. Some are basic and only require special block initialisation
1027 * callouts for the data formating, others (directories) are so specialised they
1028 * handle everything themselves.
1030 * XXX (dgc): investigate whether directory conversion can use the generic
1031 * formatting callout. It should be possible - it's just a very complex
1034 STATIC int /* error */
1035 xfs_bmap_add_attrfork_local(
1036 xfs_trans_t *tp, /* transaction pointer */
1037 xfs_inode_t *ip, /* incore inode pointer */
1038 xfs_fsblock_t *firstblock, /* first block allocated */
1039 struct xfs_defer_ops *dfops, /* blocks to free at commit */
1040 int *flags) /* inode logging flags */
1042 xfs_da_args_t dargs; /* args for dir/attr code */
1044 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1047 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1048 memset(&dargs, 0, sizeof(dargs));
1049 dargs.geo = ip->i_mount->m_dir_geo;
1051 dargs.firstblock = firstblock;
1052 dargs.dfops = dfops;
1053 dargs.total = dargs.geo->fsbcount;
1054 dargs.whichfork = XFS_DATA_FORK;
1056 return xfs_dir2_sf_to_block(&dargs);
1059 if (S_ISLNK(VFS_I(ip)->i_mode))
1060 return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
1061 flags, XFS_DATA_FORK,
1062 xfs_symlink_local_to_remote);
1064 /* should only be called for types that support local format data */
1066 return -EFSCORRUPTED;
1070 * Convert inode from non-attributed to attributed.
1071 * Must not be in a transaction, ip must not be locked.
1073 int /* error code */
1074 xfs_bmap_add_attrfork(
1075 xfs_inode_t *ip, /* incore inode pointer */
1076 int size, /* space new attribute needs */
1077 int rsvd) /* xact may use reserved blks */
1079 xfs_fsblock_t firstblock; /* 1st block/ag allocated */
1080 struct xfs_defer_ops dfops; /* freed extent records */
1081 xfs_mount_t *mp; /* mount structure */
1082 xfs_trans_t *tp; /* transaction pointer */
1083 int blks; /* space reservation */
1084 int version = 1; /* superblock attr version */
1085 int logflags; /* logging flags */
1086 int error; /* error return value */
1088 ASSERT(XFS_IFORK_Q(ip) == 0);
1091 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1093 blks = XFS_ADDAFORK_SPACE_RES(mp);
1095 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1096 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1100 xfs_ilock(ip, XFS_ILOCK_EXCL);
1101 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1102 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1103 XFS_QMOPT_RES_REGBLKS);
1106 if (XFS_IFORK_Q(ip))
1108 if (ip->i_d.di_anextents != 0) {
1109 error = -EFSCORRUPTED;
1112 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1114 * For inodes coming from pre-6.2 filesystems.
1116 ASSERT(ip->i_d.di_aformat == 0);
1117 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1120 xfs_trans_ijoin(tp, ip, 0);
1121 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1123 switch (ip->i_d.di_format) {
1124 case XFS_DINODE_FMT_DEV:
1125 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1127 case XFS_DINODE_FMT_UUID:
1128 ip->i_d.di_forkoff = roundup(sizeof(uuid_t), 8) >> 3;
1130 case XFS_DINODE_FMT_LOCAL:
1131 case XFS_DINODE_FMT_EXTENTS:
1132 case XFS_DINODE_FMT_BTREE:
1133 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1134 if (!ip->i_d.di_forkoff)
1135 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1136 else if (mp->m_flags & XFS_MOUNT_ATTR2)
1145 ASSERT(ip->i_afp == NULL);
1146 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1147 ip->i_afp->if_flags = XFS_IFEXTENTS;
1149 xfs_defer_init(&dfops, &firstblock);
1150 switch (ip->i_d.di_format) {
1151 case XFS_DINODE_FMT_LOCAL:
1152 error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
1155 case XFS_DINODE_FMT_EXTENTS:
1156 error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
1159 case XFS_DINODE_FMT_BTREE:
1160 error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
1168 xfs_trans_log_inode(tp, ip, logflags);
1171 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1172 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1173 bool log_sb = false;
1175 spin_lock(&mp->m_sb_lock);
1176 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1177 xfs_sb_version_addattr(&mp->m_sb);
1180 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1181 xfs_sb_version_addattr2(&mp->m_sb);
1184 spin_unlock(&mp->m_sb_lock);
1189 error = xfs_defer_finish(&tp, &dfops);
1192 error = xfs_trans_commit(tp);
1193 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1197 xfs_defer_cancel(&dfops);
1199 xfs_trans_cancel(tp);
1200 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1205 * Internal and external extent tree search functions.
1209 * Read in the extents to if_extents.
1210 * All inode fields are set up by caller, we just traverse the btree
1211 * and copy the records in. If the file system cannot contain unwritten
1212 * extents, the records are checked for no "state" flags.
1215 xfs_bmap_read_extents(
1216 xfs_trans_t *tp, /* transaction pointer */
1217 xfs_inode_t *ip, /* incore inode */
1218 int whichfork) /* data or attr fork */
1220 struct xfs_btree_block *block; /* current btree block */
1221 xfs_fsblock_t bno; /* block # of "block" */
1222 xfs_buf_t *bp; /* buffer for "block" */
1223 int error; /* error return value */
1224 xfs_extnum_t i, j; /* index into the extents list */
1225 xfs_ifork_t *ifp; /* fork structure */
1226 int level; /* btree level, for checking */
1227 xfs_mount_t *mp; /* file system mount structure */
1228 __be64 *pp; /* pointer to block address */
1230 xfs_extnum_t room; /* number of entries there's room for */
1233 ifp = XFS_IFORK_PTR(ip, whichfork);
1234 block = ifp->if_broot;
1236 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1238 level = be16_to_cpu(block->bb_level);
1240 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1241 bno = be64_to_cpu(*pp);
1244 * Go down the tree until leaf level is reached, following the first
1245 * pointer (leftmost) at each level.
1247 while (level-- > 0) {
1248 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1249 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1252 block = XFS_BUF_TO_BLOCK(bp);
1255 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1256 bno = be64_to_cpu(*pp);
1257 XFS_WANT_CORRUPTED_GOTO(mp,
1258 XFS_FSB_SANITY_CHECK(mp, bno), error0);
1259 xfs_trans_brelse(tp, bp);
1262 * Here with bp and block set to the leftmost leaf node in the tree.
1264 room = xfs_iext_count(ifp);
1267 * Loop over all leaf nodes. Copy information to the extent records.
1270 xfs_bmbt_rec_t *frp;
1271 xfs_fsblock_t nextbno;
1272 xfs_extnum_t num_recs;
1274 num_recs = xfs_btree_get_numrecs(block);
1275 if (unlikely(i + num_recs > room)) {
1276 ASSERT(i + num_recs <= room);
1277 xfs_warn(ip->i_mount,
1278 "corrupt dinode %Lu, (btree extents).",
1279 (unsigned long long) ip->i_ino);
1280 XFS_CORRUPTION_ERROR("xfs_bmap_read_extents(1)",
1281 XFS_ERRLEVEL_LOW, ip->i_mount, block);
1285 * Read-ahead the next leaf block, if any.
1287 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1288 if (nextbno != NULLFSBLOCK)
1289 xfs_btree_reada_bufl(mp, nextbno, 1,
1292 * Copy records into the extent records.
1294 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1295 for (j = 0; j < num_recs; j++, i++, frp++) {
1296 xfs_bmbt_rec_host_t *trp = xfs_iext_get_ext(ifp, i);
1297 trp->l0 = be64_to_cpu(frp->l0);
1298 trp->l1 = be64_to_cpu(frp->l1);
1299 if (!xfs_bmbt_validate_extent(mp, whichfork, trp)) {
1300 XFS_ERROR_REPORT("xfs_bmap_read_extents(2)",
1301 XFS_ERRLEVEL_LOW, mp);
1305 xfs_trans_brelse(tp, bp);
1308 * If we've reached the end, stop.
1310 if (bno == NULLFSBLOCK)
1312 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1313 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1316 block = XFS_BUF_TO_BLOCK(bp);
1318 if (i != XFS_IFORK_NEXTENTS(ip, whichfork))
1319 return -EFSCORRUPTED;
1320 ASSERT(i == xfs_iext_count(ifp));
1321 XFS_BMAP_TRACE_EXLIST(ip, i, whichfork);
1324 xfs_trans_brelse(tp, bp);
1325 return -EFSCORRUPTED;
1329 * Returns the file-relative block number of the first unused block(s)
1330 * in the file with at least "len" logically contiguous blocks free.
1331 * This is the lowest-address hole if the file has holes, else the first block
1332 * past the end of file.
1333 * Return 0 if the file is currently local (in-inode).
1336 xfs_bmap_first_unused(
1337 xfs_trans_t *tp, /* transaction pointer */
1338 xfs_inode_t *ip, /* incore inode */
1339 xfs_extlen_t len, /* size of hole to find */
1340 xfs_fileoff_t *first_unused, /* unused block */
1341 int whichfork) /* data or attr fork */
1343 int error; /* error return value */
1344 int idx; /* extent record index */
1345 xfs_ifork_t *ifp; /* inode fork pointer */
1346 xfs_fileoff_t lastaddr; /* last block number seen */
1347 xfs_fileoff_t lowest; /* lowest useful block */
1348 xfs_fileoff_t max; /* starting useful block */
1349 xfs_extnum_t nextents; /* number of extent entries */
1351 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1352 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1353 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1354 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1358 ifp = XFS_IFORK_PTR(ip, whichfork);
1359 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
1360 (error = xfs_iread_extents(tp, ip, whichfork)))
1362 lowest = *first_unused;
1363 nextents = xfs_iext_count(ifp);
1364 for (idx = 0, lastaddr = 0, max = lowest; idx < nextents; idx++) {
1365 struct xfs_bmbt_irec got;
1367 xfs_iext_get_extent(ifp, idx, &got);
1370 * See if the hole before this extent will work.
1372 if (got.br_startoff >= lowest + len &&
1373 got.br_startoff - max >= len) {
1374 *first_unused = max;
1377 lastaddr = got.br_startoff + got.br_blockcount;
1378 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1380 *first_unused = max;
1385 * Returns the file-relative block number of the last block - 1 before
1386 * last_block (input value) in the file.
1387 * This is not based on i_size, it is based on the extent records.
1388 * Returns 0 for local files, as they do not have extent records.
1391 xfs_bmap_last_before(
1392 struct xfs_trans *tp, /* transaction pointer */
1393 struct xfs_inode *ip, /* incore inode */
1394 xfs_fileoff_t *last_block, /* last block */
1395 int whichfork) /* data or attr fork */
1397 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1398 struct xfs_bmbt_irec got;
1402 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1403 case XFS_DINODE_FMT_LOCAL:
1406 case XFS_DINODE_FMT_BTREE:
1407 case XFS_DINODE_FMT_EXTENTS:
1413 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1414 error = xfs_iread_extents(tp, ip, whichfork);
1419 if (xfs_iext_lookup_extent(ip, ifp, *last_block - 1, &idx, &got)) {
1420 if (got.br_startoff <= *last_block - 1)
1424 if (xfs_iext_get_extent(ifp, idx - 1, &got)) {
1425 *last_block = got.br_startoff + got.br_blockcount;
1434 xfs_bmap_last_extent(
1435 struct xfs_trans *tp,
1436 struct xfs_inode *ip,
1438 struct xfs_bmbt_irec *rec,
1441 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1445 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1446 error = xfs_iread_extents(tp, ip, whichfork);
1451 nextents = xfs_iext_count(ifp);
1452 if (nextents == 0) {
1457 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, nextents - 1), rec);
1463 * Check the last inode extent to determine whether this allocation will result
1464 * in blocks being allocated at the end of the file. When we allocate new data
1465 * blocks at the end of the file which do not start at the previous data block,
1466 * we will try to align the new blocks at stripe unit boundaries.
1468 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1469 * at, or past the EOF.
1473 struct xfs_bmalloca *bma,
1476 struct xfs_bmbt_irec rec;
1481 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1492 * Check if we are allocation or past the last extent, or at least into
1493 * the last delayed allocated extent.
1495 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1496 (bma->offset >= rec.br_startoff &&
1497 isnullstartblock(rec.br_startblock));
1502 * Returns the file-relative block number of the first block past eof in
1503 * the file. This is not based on i_size, it is based on the extent records.
1504 * Returns 0 for local files, as they do not have extent records.
1507 xfs_bmap_last_offset(
1508 struct xfs_inode *ip,
1509 xfs_fileoff_t *last_block,
1512 struct xfs_bmbt_irec rec;
1518 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1521 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1522 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1525 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1526 if (error || is_empty)
1529 *last_block = rec.br_startoff + rec.br_blockcount;
1534 * Returns whether the selected fork of the inode has exactly one
1535 * block or not. For the data fork we check this matches di_size,
1536 * implying the file's range is 0..bsize-1.
1538 int /* 1=>1 block, 0=>otherwise */
1540 xfs_inode_t *ip, /* incore inode */
1541 int whichfork) /* data or attr fork */
1543 xfs_bmbt_rec_host_t *ep; /* ptr to fork's extent */
1544 xfs_ifork_t *ifp; /* inode fork pointer */
1545 int rval; /* return value */
1546 xfs_bmbt_irec_t s; /* internal version of extent */
1549 if (whichfork == XFS_DATA_FORK)
1550 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1552 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1554 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1556 ifp = XFS_IFORK_PTR(ip, whichfork);
1557 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1558 ep = xfs_iext_get_ext(ifp, 0);
1559 xfs_bmbt_get_all(ep, &s);
1560 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1561 if (rval && whichfork == XFS_DATA_FORK)
1562 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1567 * Extent tree manipulation functions used during allocation.
1571 * Convert a delayed allocation to a real allocation.
1573 STATIC int /* error */
1574 xfs_bmap_add_extent_delay_real(
1575 struct xfs_bmalloca *bma,
1578 struct xfs_bmbt_irec *new = &bma->got;
1579 int diff; /* temp value */
1580 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
1581 int error; /* error return value */
1582 int i; /* temp state */
1583 xfs_ifork_t *ifp; /* inode fork pointer */
1584 xfs_fileoff_t new_endoff; /* end offset of new entry */
1585 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1586 /* left is 0, right is 1, prev is 2 */
1587 int rval=0; /* return value (logging flags) */
1588 int state = 0;/* state bits, accessed thru macros */
1589 xfs_filblks_t da_new; /* new count del alloc blocks used */
1590 xfs_filblks_t da_old; /* old count del alloc blocks used */
1591 xfs_filblks_t temp=0; /* value for da_new calculations */
1592 xfs_filblks_t temp2=0;/* value for da_new calculations */
1593 int tmp_rval; /* partial logging flags */
1594 struct xfs_mount *mp;
1595 xfs_extnum_t *nextents;
1597 mp = bma->ip->i_mount;
1598 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1599 ASSERT(whichfork != XFS_ATTR_FORK);
1600 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1601 &bma->ip->i_d.di_nextents);
1603 ASSERT(bma->idx >= 0);
1604 ASSERT(bma->idx <= xfs_iext_count(ifp));
1605 ASSERT(!isnullstartblock(new->br_startblock));
1607 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1609 XFS_STATS_INC(mp, xs_add_exlist);
1615 if (whichfork == XFS_COW_FORK)
1616 state |= BMAP_COWFORK;
1619 * Set up a bunch of variables to make the tests simpler.
1621 ep = xfs_iext_get_ext(ifp, bma->idx);
1622 xfs_bmbt_get_all(ep, &PREV);
1623 new_endoff = new->br_startoff + new->br_blockcount;
1624 ASSERT(PREV.br_startoff <= new->br_startoff);
1625 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1627 da_old = startblockval(PREV.br_startblock);
1631 * Set flags determining what part of the previous delayed allocation
1632 * extent is being replaced by a real allocation.
1634 if (PREV.br_startoff == new->br_startoff)
1635 state |= BMAP_LEFT_FILLING;
1636 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1637 state |= BMAP_RIGHT_FILLING;
1640 * Check and set flags if this segment has a left neighbor.
1641 * Don't set contiguous if the combined extent would be too large.
1644 state |= BMAP_LEFT_VALID;
1645 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
1647 if (isnullstartblock(LEFT.br_startblock))
1648 state |= BMAP_LEFT_DELAY;
1651 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1652 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1653 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1654 LEFT.br_state == new->br_state &&
1655 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1656 state |= BMAP_LEFT_CONTIG;
1659 * Check and set flags if this segment has a right neighbor.
1660 * Don't set contiguous if the combined extent would be too large.
1661 * Also check for all-three-contiguous being too large.
1663 if (bma->idx < xfs_iext_count(ifp) - 1) {
1664 state |= BMAP_RIGHT_VALID;
1665 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
1667 if (isnullstartblock(RIGHT.br_startblock))
1668 state |= BMAP_RIGHT_DELAY;
1671 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1672 new_endoff == RIGHT.br_startoff &&
1673 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1674 new->br_state == RIGHT.br_state &&
1675 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1676 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1677 BMAP_RIGHT_FILLING)) !=
1678 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1679 BMAP_RIGHT_FILLING) ||
1680 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1682 state |= BMAP_RIGHT_CONTIG;
1686 * Switch out based on the FILLING and CONTIG state bits.
1688 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1689 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1690 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1691 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1693 * Filling in all of a previously delayed allocation extent.
1694 * The left and right neighbors are both contiguous with new.
1697 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1698 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1699 LEFT.br_blockcount + PREV.br_blockcount +
1700 RIGHT.br_blockcount);
1701 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1703 xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
1705 if (bma->cur == NULL)
1706 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1708 rval = XFS_ILOG_CORE;
1709 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1710 RIGHT.br_startblock,
1711 RIGHT.br_blockcount, &i);
1714 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1715 error = xfs_btree_delete(bma->cur, &i);
1718 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1719 error = xfs_btree_decrement(bma->cur, 0, &i);
1722 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1723 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1725 LEFT.br_blockcount +
1726 PREV.br_blockcount +
1727 RIGHT.br_blockcount, LEFT.br_state);
1733 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1735 * Filling in all of a previously delayed allocation extent.
1736 * The left neighbor is contiguous, the right is not.
1740 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1741 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
1742 LEFT.br_blockcount + PREV.br_blockcount);
1743 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1745 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1746 if (bma->cur == NULL)
1747 rval = XFS_ILOG_DEXT;
1750 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1751 LEFT.br_startblock, LEFT.br_blockcount,
1755 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1756 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1758 LEFT.br_blockcount +
1759 PREV.br_blockcount, LEFT.br_state);
1765 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1767 * Filling in all of a previously delayed allocation extent.
1768 * The right neighbor is contiguous, the left is not.
1770 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1771 xfs_bmbt_set_startblock(ep, new->br_startblock);
1772 xfs_bmbt_set_blockcount(ep,
1773 PREV.br_blockcount + RIGHT.br_blockcount);
1774 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1776 xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
1777 if (bma->cur == NULL)
1778 rval = XFS_ILOG_DEXT;
1781 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1782 RIGHT.br_startblock,
1783 RIGHT.br_blockcount, &i);
1786 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1787 error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
1789 PREV.br_blockcount +
1790 RIGHT.br_blockcount, PREV.br_state);
1796 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1798 * Filling in all of a previously delayed allocation extent.
1799 * Neither the left nor right neighbors are contiguous with
1802 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1803 xfs_bmbt_set_startblock(ep, new->br_startblock);
1804 xfs_bmbt_set_state(ep, new->br_state);
1805 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1808 if (bma->cur == NULL)
1809 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1811 rval = XFS_ILOG_CORE;
1812 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1813 new->br_startblock, new->br_blockcount,
1817 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1818 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1819 error = xfs_btree_insert(bma->cur, &i);
1822 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1826 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1828 * Filling in the first part of a previous delayed allocation.
1829 * The left neighbor is contiguous.
1831 trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1832 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
1833 LEFT.br_blockcount + new->br_blockcount);
1834 xfs_bmbt_set_startoff(ep,
1835 PREV.br_startoff + new->br_blockcount);
1836 trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
1838 temp = PREV.br_blockcount - new->br_blockcount;
1839 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1840 xfs_bmbt_set_blockcount(ep, temp);
1841 if (bma->cur == NULL)
1842 rval = XFS_ILOG_DEXT;
1845 error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
1846 LEFT.br_startblock, LEFT.br_blockcount,
1850 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1851 error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
1853 LEFT.br_blockcount +
1859 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1860 startblockval(PREV.br_startblock));
1861 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1862 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1867 case BMAP_LEFT_FILLING:
1869 * Filling in the first part of a previous delayed allocation.
1870 * The left neighbor is not contiguous.
1872 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1873 xfs_bmbt_set_startoff(ep, new_endoff);
1874 temp = PREV.br_blockcount - new->br_blockcount;
1875 xfs_bmbt_set_blockcount(ep, temp);
1876 xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
1878 if (bma->cur == NULL)
1879 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1881 rval = XFS_ILOG_CORE;
1882 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1883 new->br_startblock, new->br_blockcount,
1887 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1888 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1889 error = xfs_btree_insert(bma->cur, &i);
1892 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1895 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1896 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1897 bma->firstblock, bma->dfops,
1898 &bma->cur, 1, &tmp_rval, whichfork);
1903 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1904 startblockval(PREV.br_startblock) -
1905 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1906 ep = xfs_iext_get_ext(ifp, bma->idx + 1);
1907 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1908 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1911 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1913 * Filling in the last part of a previous delayed allocation.
1914 * The right neighbor is contiguous with the new allocation.
1916 temp = PREV.br_blockcount - new->br_blockcount;
1917 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1918 xfs_bmbt_set_blockcount(ep, temp);
1919 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
1920 new->br_startoff, new->br_startblock,
1921 new->br_blockcount + RIGHT.br_blockcount,
1923 trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
1924 if (bma->cur == NULL)
1925 rval = XFS_ILOG_DEXT;
1928 error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
1929 RIGHT.br_startblock,
1930 RIGHT.br_blockcount, &i);
1933 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1934 error = xfs_bmbt_update(bma->cur, new->br_startoff,
1936 new->br_blockcount +
1937 RIGHT.br_blockcount,
1943 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1944 startblockval(PREV.br_startblock));
1945 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1946 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1947 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1952 case BMAP_RIGHT_FILLING:
1954 * Filling in the last part of a previous delayed allocation.
1955 * The right neighbor is not contiguous.
1957 temp = PREV.br_blockcount - new->br_blockcount;
1958 trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
1959 xfs_bmbt_set_blockcount(ep, temp);
1960 xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
1962 if (bma->cur == NULL)
1963 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1965 rval = XFS_ILOG_CORE;
1966 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
1967 new->br_startblock, new->br_blockcount,
1971 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1972 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
1973 error = xfs_btree_insert(bma->cur, &i);
1976 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1979 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1980 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1981 bma->firstblock, bma->dfops, &bma->cur, 1,
1982 &tmp_rval, whichfork);
1987 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1988 startblockval(PREV.br_startblock) -
1989 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1990 ep = xfs_iext_get_ext(ifp, bma->idx);
1991 xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
1992 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
1999 * Filling in the middle part of a previous delayed allocation.
2000 * Contiguity is impossible here.
2001 * This case is avoided almost all the time.
2003 * We start with a delayed allocation:
2005 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
2008 * and we are allocating:
2009 * +rrrrrrrrrrrrrrrrr+
2012 * and we set it up for insertion as:
2013 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
2015 * PREV @ idx LEFT RIGHT
2016 * inserted at idx + 1
2018 temp = new->br_startoff - PREV.br_startoff;
2019 temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
2020 trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
2021 xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
2023 RIGHT.br_state = PREV.br_state;
2024 RIGHT.br_startblock = nullstartblock(
2025 (int)xfs_bmap_worst_indlen(bma->ip, temp2));
2026 RIGHT.br_startoff = new_endoff;
2027 RIGHT.br_blockcount = temp2;
2028 /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
2029 xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
2031 if (bma->cur == NULL)
2032 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2034 rval = XFS_ILOG_CORE;
2035 error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
2036 new->br_startblock, new->br_blockcount,
2040 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2041 bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
2042 error = xfs_btree_insert(bma->cur, &i);
2045 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2048 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2049 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2050 bma->firstblock, bma->dfops, &bma->cur,
2051 1, &tmp_rval, whichfork);
2056 temp = xfs_bmap_worst_indlen(bma->ip, temp);
2057 temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
2058 diff = (int)(temp + temp2 -
2059 (startblockval(PREV.br_startblock) -
2061 bma->cur->bc_private.b.allocated : 0)));
2063 error = xfs_mod_fdblocks(bma->ip->i_mount,
2064 -((int64_t)diff), false);
2070 ep = xfs_iext_get_ext(ifp, bma->idx);
2071 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
2072 trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
2073 trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2074 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
2075 nullstartblock((int)temp2));
2076 trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
2079 da_new = temp + temp2;
2082 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2083 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2084 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2085 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2086 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2087 case BMAP_LEFT_CONTIG:
2088 case BMAP_RIGHT_CONTIG:
2090 * These cases are all impossible.
2095 /* add reverse mapping */
2096 error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
2100 /* convert to a btree if necessary */
2101 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2102 int tmp_logflags; /* partial log flag return val */
2104 ASSERT(bma->cur == NULL);
2105 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2106 bma->firstblock, bma->dfops, &bma->cur,
2107 da_old > 0, &tmp_logflags, whichfork);
2108 bma->logflags |= tmp_logflags;
2113 /* adjust for changes in reserved delayed indirect blocks */
2114 if (da_old || da_new) {
2117 temp += bma->cur->bc_private.b.allocated;
2119 xfs_mod_fdblocks(bma->ip->i_mount,
2120 (int64_t)(da_old - temp), false);
2123 /* clear out the allocated field, done with it now in any case. */
2125 bma->cur->bc_private.b.allocated = 0;
2127 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2129 if (whichfork != XFS_COW_FORK)
2130 bma->logflags |= rval;
2138 * Convert an unwritten allocation to a real allocation or vice versa.
2140 STATIC int /* error */
2141 xfs_bmap_add_extent_unwritten_real(
2142 struct xfs_trans *tp,
2143 xfs_inode_t *ip, /* incore inode pointer */
2145 xfs_extnum_t *idx, /* extent number to update/insert */
2146 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2147 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2148 xfs_fsblock_t *first, /* pointer to firstblock variable */
2149 struct xfs_defer_ops *dfops, /* list of extents to be freed */
2150 int *logflagsp) /* inode logging flags */
2152 xfs_btree_cur_t *cur; /* btree cursor */
2153 xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
2154 int error; /* error return value */
2155 int i; /* temp state */
2156 xfs_ifork_t *ifp; /* inode fork pointer */
2157 xfs_fileoff_t new_endoff; /* end offset of new entry */
2158 xfs_exntst_t newext; /* new extent state */
2159 xfs_exntst_t oldext; /* old extent state */
2160 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2161 /* left is 0, right is 1, prev is 2 */
2162 int rval=0; /* return value (logging flags) */
2163 int state = 0;/* state bits, accessed thru macros */
2164 struct xfs_mount *mp = ip->i_mount;
2169 ifp = XFS_IFORK_PTR(ip, whichfork);
2170 if (whichfork == XFS_COW_FORK)
2171 state |= BMAP_COWFORK;
2174 ASSERT(*idx <= xfs_iext_count(ifp));
2175 ASSERT(!isnullstartblock(new->br_startblock));
2177 XFS_STATS_INC(mp, xs_add_exlist);
2184 * Set up a bunch of variables to make the tests simpler.
2187 ep = xfs_iext_get_ext(ifp, *idx);
2188 xfs_bmbt_get_all(ep, &PREV);
2189 newext = new->br_state;
2190 oldext = (newext == XFS_EXT_UNWRITTEN) ?
2191 XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
2192 ASSERT(PREV.br_state == oldext);
2193 new_endoff = new->br_startoff + new->br_blockcount;
2194 ASSERT(PREV.br_startoff <= new->br_startoff);
2195 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2198 * Set flags determining what part of the previous oldext allocation
2199 * extent is being replaced by a newext allocation.
2201 if (PREV.br_startoff == new->br_startoff)
2202 state |= BMAP_LEFT_FILLING;
2203 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2204 state |= BMAP_RIGHT_FILLING;
2207 * Check and set flags if this segment has a left neighbor.
2208 * Don't set contiguous if the combined extent would be too large.
2211 state |= BMAP_LEFT_VALID;
2212 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
2214 if (isnullstartblock(LEFT.br_startblock))
2215 state |= BMAP_LEFT_DELAY;
2218 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2219 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2220 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2221 LEFT.br_state == newext &&
2222 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2223 state |= BMAP_LEFT_CONTIG;
2226 * Check and set flags if this segment has a right neighbor.
2227 * Don't set contiguous if the combined extent would be too large.
2228 * Also check for all-three-contiguous being too large.
2230 if (*idx < xfs_iext_count(ifp) - 1) {
2231 state |= BMAP_RIGHT_VALID;
2232 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
2233 if (isnullstartblock(RIGHT.br_startblock))
2234 state |= BMAP_RIGHT_DELAY;
2237 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2238 new_endoff == RIGHT.br_startoff &&
2239 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2240 newext == RIGHT.br_state &&
2241 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2242 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2243 BMAP_RIGHT_FILLING)) !=
2244 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2245 BMAP_RIGHT_FILLING) ||
2246 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2248 state |= BMAP_RIGHT_CONTIG;
2251 * Switch out based on the FILLING and CONTIG state bits.
2253 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2254 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2255 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2256 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2258 * Setting all of a previous oldext extent to newext.
2259 * The left and right neighbors are both contiguous with new.
2263 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2264 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2265 LEFT.br_blockcount + PREV.br_blockcount +
2266 RIGHT.br_blockcount);
2267 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2269 xfs_iext_remove(ip, *idx + 1, 2, state);
2270 XFS_IFORK_NEXT_SET(ip, whichfork,
2271 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2273 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2275 rval = XFS_ILOG_CORE;
2276 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2277 RIGHT.br_startblock,
2278 RIGHT.br_blockcount, &i)))
2280 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2281 if ((error = xfs_btree_delete(cur, &i)))
2283 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2284 if ((error = xfs_btree_decrement(cur, 0, &i)))
2286 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2287 if ((error = xfs_btree_delete(cur, &i)))
2289 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2290 if ((error = xfs_btree_decrement(cur, 0, &i)))
2292 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2293 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2295 LEFT.br_blockcount + PREV.br_blockcount +
2296 RIGHT.br_blockcount, LEFT.br_state)))
2301 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2303 * Setting all of a previous oldext extent to newext.
2304 * The left neighbor is contiguous, the right is not.
2308 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2309 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2310 LEFT.br_blockcount + PREV.br_blockcount);
2311 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2313 xfs_iext_remove(ip, *idx + 1, 1, state);
2314 XFS_IFORK_NEXT_SET(ip, whichfork,
2315 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2317 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2319 rval = XFS_ILOG_CORE;
2320 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2321 PREV.br_startblock, PREV.br_blockcount,
2324 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2325 if ((error = xfs_btree_delete(cur, &i)))
2327 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2328 if ((error = xfs_btree_decrement(cur, 0, &i)))
2330 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2331 if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
2333 LEFT.br_blockcount + PREV.br_blockcount,
2339 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2341 * Setting all of a previous oldext extent to newext.
2342 * The right neighbor is contiguous, the left is not.
2344 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2345 xfs_bmbt_set_blockcount(ep,
2346 PREV.br_blockcount + RIGHT.br_blockcount);
2347 xfs_bmbt_set_state(ep, newext);
2348 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2349 xfs_iext_remove(ip, *idx + 1, 1, state);
2350 XFS_IFORK_NEXT_SET(ip, whichfork,
2351 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2353 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2355 rval = XFS_ILOG_CORE;
2356 if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
2357 RIGHT.br_startblock,
2358 RIGHT.br_blockcount, &i)))
2360 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2361 if ((error = xfs_btree_delete(cur, &i)))
2363 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2364 if ((error = xfs_btree_decrement(cur, 0, &i)))
2366 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2367 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2369 new->br_blockcount + RIGHT.br_blockcount,
2375 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2377 * Setting all of a previous oldext extent to newext.
2378 * Neither the left nor right neighbors are contiguous with
2381 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2382 xfs_bmbt_set_state(ep, newext);
2383 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2386 rval = XFS_ILOG_DEXT;
2389 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2390 new->br_startblock, new->br_blockcount,
2393 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2394 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2395 new->br_startblock, new->br_blockcount,
2401 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2403 * Setting the first part of a previous oldext extent to newext.
2404 * The left neighbor is contiguous.
2406 trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
2407 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
2408 LEFT.br_blockcount + new->br_blockcount);
2409 xfs_bmbt_set_startoff(ep,
2410 PREV.br_startoff + new->br_blockcount);
2411 trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
2413 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2414 xfs_bmbt_set_startblock(ep,
2415 new->br_startblock + new->br_blockcount);
2416 xfs_bmbt_set_blockcount(ep,
2417 PREV.br_blockcount - new->br_blockcount);
2418 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2423 rval = XFS_ILOG_DEXT;
2426 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2427 PREV.br_startblock, PREV.br_blockcount,
2430 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2431 if ((error = xfs_bmbt_update(cur,
2432 PREV.br_startoff + new->br_blockcount,
2433 PREV.br_startblock + new->br_blockcount,
2434 PREV.br_blockcount - new->br_blockcount,
2437 if ((error = xfs_btree_decrement(cur, 0, &i)))
2439 error = xfs_bmbt_update(cur, LEFT.br_startoff,
2441 LEFT.br_blockcount + new->br_blockcount,
2448 case BMAP_LEFT_FILLING:
2450 * Setting the first part of a previous oldext extent to newext.
2451 * The left neighbor is not contiguous.
2453 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2454 ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
2455 xfs_bmbt_set_startoff(ep, new_endoff);
2456 xfs_bmbt_set_blockcount(ep,
2457 PREV.br_blockcount - new->br_blockcount);
2458 xfs_bmbt_set_startblock(ep,
2459 new->br_startblock + new->br_blockcount);
2460 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2462 xfs_iext_insert(ip, *idx, 1, new, state);
2463 XFS_IFORK_NEXT_SET(ip, whichfork,
2464 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2466 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2468 rval = XFS_ILOG_CORE;
2469 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2470 PREV.br_startblock, PREV.br_blockcount,
2473 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2474 if ((error = xfs_bmbt_update(cur,
2475 PREV.br_startoff + new->br_blockcount,
2476 PREV.br_startblock + new->br_blockcount,
2477 PREV.br_blockcount - new->br_blockcount,
2480 cur->bc_rec.b = *new;
2481 if ((error = xfs_btree_insert(cur, &i)))
2483 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2487 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2489 * Setting the last part of a previous oldext extent to newext.
2490 * The right neighbor is contiguous with the new allocation.
2492 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2493 xfs_bmbt_set_blockcount(ep,
2494 PREV.br_blockcount - new->br_blockcount);
2495 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2499 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2500 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2501 new->br_startoff, new->br_startblock,
2502 new->br_blockcount + RIGHT.br_blockcount, newext);
2503 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2506 rval = XFS_ILOG_DEXT;
2509 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2511 PREV.br_blockcount, &i)))
2513 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2514 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2516 PREV.br_blockcount - new->br_blockcount,
2519 if ((error = xfs_btree_increment(cur, 0, &i)))
2521 if ((error = xfs_bmbt_update(cur, new->br_startoff,
2523 new->br_blockcount + RIGHT.br_blockcount,
2529 case BMAP_RIGHT_FILLING:
2531 * Setting the last part of a previous oldext extent to newext.
2532 * The right neighbor is not contiguous.
2534 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2535 xfs_bmbt_set_blockcount(ep,
2536 PREV.br_blockcount - new->br_blockcount);
2537 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2540 xfs_iext_insert(ip, *idx, 1, new, state);
2542 XFS_IFORK_NEXT_SET(ip, whichfork,
2543 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2545 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2547 rval = XFS_ILOG_CORE;
2548 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2549 PREV.br_startblock, PREV.br_blockcount,
2552 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2553 if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
2555 PREV.br_blockcount - new->br_blockcount,
2558 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2559 new->br_startblock, new->br_blockcount,
2562 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2563 cur->bc_rec.b.br_state = XFS_EXT_NORM;
2564 if ((error = xfs_btree_insert(cur, &i)))
2566 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2572 * Setting the middle part of a previous oldext extent to
2573 * newext. Contiguity is impossible here.
2574 * One extent becomes three extents.
2576 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2577 xfs_bmbt_set_blockcount(ep,
2578 new->br_startoff - PREV.br_startoff);
2579 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2582 r[1].br_startoff = new_endoff;
2583 r[1].br_blockcount =
2584 PREV.br_startoff + PREV.br_blockcount - new_endoff;
2585 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2586 r[1].br_state = oldext;
2589 xfs_iext_insert(ip, *idx, 2, &r[0], state);
2591 XFS_IFORK_NEXT_SET(ip, whichfork,
2592 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2594 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2596 rval = XFS_ILOG_CORE;
2597 if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
2598 PREV.br_startblock, PREV.br_blockcount,
2601 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2602 /* new right extent - oldext */
2603 if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
2604 r[1].br_startblock, r[1].br_blockcount,
2607 /* new left extent - oldext */
2608 cur->bc_rec.b = PREV;
2609 cur->bc_rec.b.br_blockcount =
2610 new->br_startoff - PREV.br_startoff;
2611 if ((error = xfs_btree_insert(cur, &i)))
2613 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2615 * Reset the cursor to the position of the new extent
2616 * we are about to insert as we can't trust it after
2617 * the previous insert.
2619 if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
2620 new->br_startblock, new->br_blockcount,
2623 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2624 /* new middle extent - newext */
2625 cur->bc_rec.b.br_state = new->br_state;
2626 if ((error = xfs_btree_insert(cur, &i)))
2628 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2632 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2633 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2634 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2635 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2636 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2637 case BMAP_LEFT_CONTIG:
2638 case BMAP_RIGHT_CONTIG:
2640 * These cases are all impossible.
2645 /* update reverse mappings */
2646 error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
2650 /* convert to a btree if necessary */
2651 if (xfs_bmap_needs_btree(ip, whichfork)) {
2652 int tmp_logflags; /* partial log flag return val */
2654 ASSERT(cur == NULL);
2655 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
2656 0, &tmp_logflags, whichfork);
2657 *logflagsp |= tmp_logflags;
2662 /* clear out the allocated field, done with it now in any case. */
2664 cur->bc_private.b.allocated = 0;
2668 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2678 * Convert a hole to a delayed allocation.
2681 xfs_bmap_add_extent_hole_delay(
2682 xfs_inode_t *ip, /* incore inode pointer */
2684 xfs_extnum_t *idx, /* extent number to update/insert */
2685 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2687 xfs_ifork_t *ifp; /* inode fork pointer */
2688 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2689 xfs_filblks_t newlen=0; /* new indirect size */
2690 xfs_filblks_t oldlen=0; /* old indirect size */
2691 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2692 int state; /* state bits, accessed thru macros */
2693 xfs_filblks_t temp=0; /* temp for indirect calculations */
2695 ifp = XFS_IFORK_PTR(ip, whichfork);
2697 if (whichfork == XFS_COW_FORK)
2698 state |= BMAP_COWFORK;
2699 ASSERT(isnullstartblock(new->br_startblock));
2702 * Check and set flags if this segment has a left neighbor
2705 state |= BMAP_LEFT_VALID;
2706 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2708 if (isnullstartblock(left.br_startblock))
2709 state |= BMAP_LEFT_DELAY;
2713 * Check and set flags if the current (right) segment exists.
2714 * If it doesn't exist, we're converting the hole at end-of-file.
2716 if (*idx < xfs_iext_count(ifp)) {
2717 state |= BMAP_RIGHT_VALID;
2718 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2720 if (isnullstartblock(right.br_startblock))
2721 state |= BMAP_RIGHT_DELAY;
2725 * Set contiguity flags on the left and right neighbors.
2726 * Don't let extents get too large, even if the pieces are contiguous.
2728 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2729 left.br_startoff + left.br_blockcount == new->br_startoff &&
2730 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2731 state |= BMAP_LEFT_CONTIG;
2733 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2734 new->br_startoff + new->br_blockcount == right.br_startoff &&
2735 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2736 (!(state & BMAP_LEFT_CONTIG) ||
2737 (left.br_blockcount + new->br_blockcount +
2738 right.br_blockcount <= MAXEXTLEN)))
2739 state |= BMAP_RIGHT_CONTIG;
2742 * Switch out based on the contiguity flags.
2744 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2745 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2747 * New allocation is contiguous with delayed allocations
2748 * on the left and on the right.
2749 * Merge all three into a single extent record.
2752 temp = left.br_blockcount + new->br_blockcount +
2753 right.br_blockcount;
2755 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2756 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2757 oldlen = startblockval(left.br_startblock) +
2758 startblockval(new->br_startblock) +
2759 startblockval(right.br_startblock);
2760 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2762 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2763 nullstartblock((int)newlen));
2764 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2766 xfs_iext_remove(ip, *idx + 1, 1, state);
2769 case BMAP_LEFT_CONTIG:
2771 * New allocation is contiguous with a delayed allocation
2773 * Merge the new allocation with the left neighbor.
2776 temp = left.br_blockcount + new->br_blockcount;
2778 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2779 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
2780 oldlen = startblockval(left.br_startblock) +
2781 startblockval(new->br_startblock);
2782 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2784 xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
2785 nullstartblock((int)newlen));
2786 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2789 case BMAP_RIGHT_CONTIG:
2791 * New allocation is contiguous with a delayed allocation
2793 * Merge the new allocation with the right neighbor.
2795 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2796 temp = new->br_blockcount + right.br_blockcount;
2797 oldlen = startblockval(new->br_startblock) +
2798 startblockval(right.br_startblock);
2799 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2801 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2803 nullstartblock((int)newlen), temp, right.br_state);
2804 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2809 * New allocation is not contiguous with another
2810 * delayed allocation.
2811 * Insert a new entry.
2813 oldlen = newlen = 0;
2814 xfs_iext_insert(ip, *idx, 1, new, state);
2817 if (oldlen != newlen) {
2818 ASSERT(oldlen > newlen);
2819 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2822 * Nothing to do for disk quota accounting here.
2828 * Convert a hole to a real allocation.
2830 STATIC int /* error */
2831 xfs_bmap_add_extent_hole_real(
2832 struct xfs_trans *tp,
2833 struct xfs_inode *ip,
2836 struct xfs_btree_cur **curp,
2837 struct xfs_bmbt_irec *new,
2838 xfs_fsblock_t *first,
2839 struct xfs_defer_ops *dfops,
2842 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2843 struct xfs_mount *mp = ip->i_mount;
2844 struct xfs_btree_cur *cur = *curp;
2845 int error; /* error return value */
2846 int i; /* temp state */
2847 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2848 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2849 int rval=0; /* return value (logging flags) */
2850 int state; /* state bits, accessed thru macros */
2853 ASSERT(*idx <= xfs_iext_count(ifp));
2854 ASSERT(!isnullstartblock(new->br_startblock));
2855 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2857 XFS_STATS_INC(mp, xs_add_exlist);
2860 if (whichfork == XFS_ATTR_FORK)
2861 state |= BMAP_ATTRFORK;
2862 if (whichfork == XFS_COW_FORK)
2863 state |= BMAP_COWFORK;
2866 * Check and set flags if this segment has a left neighbor.
2869 state |= BMAP_LEFT_VALID;
2870 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
2871 if (isnullstartblock(left.br_startblock))
2872 state |= BMAP_LEFT_DELAY;
2876 * Check and set flags if this segment has a current value.
2877 * Not true if we're inserting into the "hole" at eof.
2879 if (*idx < xfs_iext_count(ifp)) {
2880 state |= BMAP_RIGHT_VALID;
2881 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
2882 if (isnullstartblock(right.br_startblock))
2883 state |= BMAP_RIGHT_DELAY;
2887 * We're inserting a real allocation between "left" and "right".
2888 * Set the contiguity flags. Don't let extents get too large.
2890 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2891 left.br_startoff + left.br_blockcount == new->br_startoff &&
2892 left.br_startblock + left.br_blockcount == new->br_startblock &&
2893 left.br_state == new->br_state &&
2894 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2895 state |= BMAP_LEFT_CONTIG;
2897 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2898 new->br_startoff + new->br_blockcount == right.br_startoff &&
2899 new->br_startblock + new->br_blockcount == right.br_startblock &&
2900 new->br_state == right.br_state &&
2901 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2902 (!(state & BMAP_LEFT_CONTIG) ||
2903 left.br_blockcount + new->br_blockcount +
2904 right.br_blockcount <= MAXEXTLEN))
2905 state |= BMAP_RIGHT_CONTIG;
2909 * Select which case we're in here, and implement it.
2911 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2912 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2914 * New allocation is contiguous with real allocations on the
2915 * left and on the right.
2916 * Merge all three into a single extent record.
2919 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2920 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2921 left.br_blockcount + new->br_blockcount +
2922 right.br_blockcount);
2923 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2925 xfs_iext_remove(ip, *idx + 1, 1, state);
2927 XFS_IFORK_NEXT_SET(ip, whichfork,
2928 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2930 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2932 rval = XFS_ILOG_CORE;
2933 error = xfs_bmbt_lookup_eq(cur, right.br_startoff,
2934 right.br_startblock, right.br_blockcount,
2938 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2939 error = xfs_btree_delete(cur, &i);
2942 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2943 error = xfs_btree_decrement(cur, 0, &i);
2946 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2947 error = xfs_bmbt_update(cur, left.br_startoff,
2949 left.br_blockcount +
2950 new->br_blockcount +
2951 right.br_blockcount,
2958 case BMAP_LEFT_CONTIG:
2960 * New allocation is contiguous with a real allocation
2962 * Merge the new allocation with the left neighbor.
2965 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2966 xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
2967 left.br_blockcount + new->br_blockcount);
2968 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
2971 rval = xfs_ilog_fext(whichfork);
2974 error = xfs_bmbt_lookup_eq(cur, left.br_startoff,
2975 left.br_startblock, left.br_blockcount,
2979 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2980 error = xfs_bmbt_update(cur, left.br_startoff,
2982 left.br_blockcount +
2990 case BMAP_RIGHT_CONTIG:
2992 * New allocation is contiguous with a real allocation
2994 * Merge the new allocation with the right neighbor.
2996 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
2997 xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
2998 new->br_startoff, new->br_startblock,
2999 new->br_blockcount + right.br_blockcount,
3001 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
3004 rval = xfs_ilog_fext(whichfork);
3007 error = xfs_bmbt_lookup_eq(cur,
3009 right.br_startblock,
3010 right.br_blockcount, &i);
3013 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3014 error = xfs_bmbt_update(cur, new->br_startoff,
3016 new->br_blockcount +
3017 right.br_blockcount,
3026 * New allocation is not contiguous with another
3028 * Insert a new entry.
3030 xfs_iext_insert(ip, *idx, 1, new, state);
3031 XFS_IFORK_NEXT_SET(ip, whichfork,
3032 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
3034 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
3036 rval = XFS_ILOG_CORE;
3037 error = xfs_bmbt_lookup_eq(cur,
3040 new->br_blockcount, &i);
3043 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
3044 cur->bc_rec.b.br_state = new->br_state;
3045 error = xfs_btree_insert(cur, &i);
3048 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
3053 /* add reverse mapping */
3054 error = xfs_rmap_map_extent(mp, dfops, ip, whichfork, new);
3058 /* convert to a btree if necessary */
3059 if (xfs_bmap_needs_btree(ip, whichfork)) {
3060 int tmp_logflags; /* partial log flag return val */
3062 ASSERT(cur == NULL);
3063 error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, curp,
3064 0, &tmp_logflags, whichfork);
3065 *logflagsp |= tmp_logflags;
3071 /* clear out the allocated field, done with it now in any case. */
3073 cur->bc_private.b.allocated = 0;
3075 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
3082 * Functions used in the extent read, allocate and remove paths
3086 * Adjust the size of the new extent based on di_extsize and rt extsize.
3089 xfs_bmap_extsize_align(
3091 xfs_bmbt_irec_t *gotp, /* next extent pointer */
3092 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
3093 xfs_extlen_t extsz, /* align to this extent size */
3094 int rt, /* is this a realtime inode? */
3095 int eof, /* is extent at end-of-file? */
3096 int delay, /* creating delalloc extent? */
3097 int convert, /* overwriting unwritten extent? */
3098 xfs_fileoff_t *offp, /* in/out: aligned offset */
3099 xfs_extlen_t *lenp) /* in/out: aligned length */
3101 xfs_fileoff_t orig_off; /* original offset */
3102 xfs_extlen_t orig_alen; /* original length */
3103 xfs_fileoff_t orig_end; /* original off+len */
3104 xfs_fileoff_t nexto; /* next file offset */
3105 xfs_fileoff_t prevo; /* previous file offset */
3106 xfs_fileoff_t align_off; /* temp for offset */
3107 xfs_extlen_t align_alen; /* temp for length */
3108 xfs_extlen_t temp; /* temp for calculations */
3113 orig_off = align_off = *offp;
3114 orig_alen = align_alen = *lenp;
3115 orig_end = orig_off + orig_alen;
3118 * If this request overlaps an existing extent, then don't
3119 * attempt to perform any additional alignment.
3121 if (!delay && !eof &&
3122 (orig_off >= gotp->br_startoff) &&
3123 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
3128 * If the file offset is unaligned vs. the extent size
3129 * we need to align it. This will be possible unless
3130 * the file was previously written with a kernel that didn't
3131 * perform this alignment, or if a truncate shot us in the
3134 temp = do_mod(orig_off, extsz);
3140 /* Same adjustment for the end of the requested area. */
3141 temp = (align_alen % extsz);
3143 align_alen += extsz - temp;
3146 * For large extent hint sizes, the aligned extent might be larger than
3147 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
3148 * the length back under MAXEXTLEN. The outer allocation loops handle
3149 * short allocation just fine, so it is safe to do this. We only want to
3150 * do it when we are forced to, though, because it means more allocation
3151 * operations are required.
3153 while (align_alen > MAXEXTLEN)
3154 align_alen -= extsz;
3155 ASSERT(align_alen <= MAXEXTLEN);
3158 * If the previous block overlaps with this proposed allocation
3159 * then move the start forward without adjusting the length.
3161 if (prevp->br_startoff != NULLFILEOFF) {
3162 if (prevp->br_startblock == HOLESTARTBLOCK)
3163 prevo = prevp->br_startoff;
3165 prevo = prevp->br_startoff + prevp->br_blockcount;
3168 if (align_off != orig_off && align_off < prevo)
3171 * If the next block overlaps with this proposed allocation
3172 * then move the start back without adjusting the length,
3173 * but not before offset 0.
3174 * This may of course make the start overlap previous block,
3175 * and if we hit the offset 0 limit then the next block
3176 * can still overlap too.
3178 if (!eof && gotp->br_startoff != NULLFILEOFF) {
3179 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
3180 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
3181 nexto = gotp->br_startoff + gotp->br_blockcount;
3183 nexto = gotp->br_startoff;
3185 nexto = NULLFILEOFF;
3187 align_off + align_alen != orig_end &&
3188 align_off + align_alen > nexto)
3189 align_off = nexto > align_alen ? nexto - align_alen : 0;
3191 * If we're now overlapping the next or previous extent that
3192 * means we can't fit an extsz piece in this hole. Just move
3193 * the start forward to the first valid spot and set
3194 * the length so we hit the end.
3196 if (align_off != orig_off && align_off < prevo)
3198 if (align_off + align_alen != orig_end &&
3199 align_off + align_alen > nexto &&
3200 nexto != NULLFILEOFF) {
3201 ASSERT(nexto > prevo);
3202 align_alen = nexto - align_off;
3206 * If realtime, and the result isn't a multiple of the realtime
3207 * extent size we need to remove blocks until it is.
3209 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
3211 * We're not covering the original request, or
3212 * we won't be able to once we fix the length.
3214 if (orig_off < align_off ||
3215 orig_end > align_off + align_alen ||
3216 align_alen - temp < orig_alen)
3219 * Try to fix it by moving the start up.
3221 if (align_off + temp <= orig_off) {
3226 * Try to fix it by moving the end in.
3228 else if (align_off + align_alen - temp >= orig_end)
3231 * Set the start to the minimum then trim the length.
3234 align_alen -= orig_off - align_off;
3235 align_off = orig_off;
3236 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3239 * Result doesn't cover the request, fail it.
3241 if (orig_off < align_off || orig_end > align_off + align_alen)
3244 ASSERT(orig_off >= align_off);
3245 /* see MAXEXTLEN handling above */
3246 ASSERT(orig_end <= align_off + align_alen ||
3247 align_alen + extsz > MAXEXTLEN);
3251 if (!eof && gotp->br_startoff != NULLFILEOFF)
3252 ASSERT(align_off + align_alen <= gotp->br_startoff);
3253 if (prevp->br_startoff != NULLFILEOFF)
3254 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3262 #define XFS_ALLOC_GAP_UNITS 4
3266 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3268 xfs_fsblock_t adjust; /* adjustment to block numbers */
3269 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3270 xfs_mount_t *mp; /* mount point structure */
3271 int nullfb; /* true if ap->firstblock isn't set */
3272 int rt; /* true if inode is realtime */
3274 #define ISVALID(x,y) \
3276 (x) < mp->m_sb.sb_rblocks : \
3277 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3278 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3279 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3281 mp = ap->ip->i_mount;
3282 nullfb = *ap->firstblock == NULLFSBLOCK;
3283 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3284 xfs_alloc_is_userdata(ap->datatype);
3285 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3287 * If allocating at eof, and there's a previous real block,
3288 * try to use its last block as our starting point.
3290 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3291 !isnullstartblock(ap->prev.br_startblock) &&
3292 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3293 ap->prev.br_startblock)) {
3294 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3296 * Adjust for the gap between prevp and us.
3298 adjust = ap->offset -
3299 (ap->prev.br_startoff + ap->prev.br_blockcount);
3301 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3302 ap->blkno += adjust;
3305 * If not at eof, then compare the two neighbor blocks.
3306 * Figure out whether either one gives us a good starting point,
3307 * and pick the better one.
3309 else if (!ap->eof) {
3310 xfs_fsblock_t gotbno; /* right side block number */
3311 xfs_fsblock_t gotdiff=0; /* right side difference */
3312 xfs_fsblock_t prevbno; /* left side block number */
3313 xfs_fsblock_t prevdiff=0; /* left side difference */
3316 * If there's a previous (left) block, select a requested
3317 * start block based on it.
3319 if (ap->prev.br_startoff != NULLFILEOFF &&
3320 !isnullstartblock(ap->prev.br_startblock) &&
3321 (prevbno = ap->prev.br_startblock +
3322 ap->prev.br_blockcount) &&
3323 ISVALID(prevbno, ap->prev.br_startblock)) {
3325 * Calculate gap to end of previous block.
3327 adjust = prevdiff = ap->offset -
3328 (ap->prev.br_startoff +
3329 ap->prev.br_blockcount);
3331 * Figure the startblock based on the previous block's
3332 * end and the gap size.
3334 * If the gap is large relative to the piece we're
3335 * allocating, or using it gives us an invalid block
3336 * number, then just use the end of the previous block.
3338 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3339 ISVALID(prevbno + prevdiff,
3340 ap->prev.br_startblock))
3345 * If the firstblock forbids it, can't use it,
3348 if (!rt && !nullfb &&
3349 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3350 prevbno = NULLFSBLOCK;
3353 * No previous block or can't follow it, just default.
3356 prevbno = NULLFSBLOCK;
3358 * If there's a following (right) block, select a requested
3359 * start block based on it.
3361 if (!isnullstartblock(ap->got.br_startblock)) {
3363 * Calculate gap to start of next block.
3365 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3367 * Figure the startblock based on the next block's
3368 * start and the gap size.
3370 gotbno = ap->got.br_startblock;
3373 * If the gap is large relative to the piece we're
3374 * allocating, or using it gives us an invalid block
3375 * number, then just use the start of the next block
3376 * offset by our length.
3378 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3379 ISVALID(gotbno - gotdiff, gotbno))
3381 else if (ISVALID(gotbno - ap->length, gotbno)) {
3382 gotbno -= ap->length;
3383 gotdiff += adjust - ap->length;
3387 * If the firstblock forbids it, can't use it,
3390 if (!rt && !nullfb &&
3391 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3392 gotbno = NULLFSBLOCK;
3395 * No next block, just default.
3398 gotbno = NULLFSBLOCK;
3400 * If both valid, pick the better one, else the only good
3401 * one, else ap->blkno is already set (to 0 or the inode block).
3403 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3404 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3405 else if (prevbno != NULLFSBLOCK)
3406 ap->blkno = prevbno;
3407 else if (gotbno != NULLFSBLOCK)
3414 xfs_bmap_longest_free_extent(
3415 struct xfs_trans *tp,
3420 struct xfs_mount *mp = tp->t_mountp;
3421 struct xfs_perag *pag;
3422 xfs_extlen_t longest;
3425 pag = xfs_perag_get(mp, ag);
3426 if (!pag->pagf_init) {
3427 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3431 if (!pag->pagf_init) {
3437 longest = xfs_alloc_longest_free_extent(mp, pag,
3438 xfs_alloc_min_freelist(mp, pag),
3439 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3440 if (*blen < longest)
3449 xfs_bmap_select_minlen(
3450 struct xfs_bmalloca *ap,
3451 struct xfs_alloc_arg *args,
3455 if (notinit || *blen < ap->minlen) {
3457 * Since we did a BUF_TRYLOCK above, it is possible that
3458 * there is space for this request.
3460 args->minlen = ap->minlen;
3461 } else if (*blen < args->maxlen) {
3463 * If the best seen length is less than the request length,
3464 * use the best as the minimum.
3466 args->minlen = *blen;
3469 * Otherwise we've seen an extent as big as maxlen, use that
3472 args->minlen = args->maxlen;
3477 xfs_bmap_btalloc_nullfb(
3478 struct xfs_bmalloca *ap,
3479 struct xfs_alloc_arg *args,
3482 struct xfs_mount *mp = ap->ip->i_mount;
3483 xfs_agnumber_t ag, startag;
3487 args->type = XFS_ALLOCTYPE_START_BNO;
3488 args->total = ap->total;
3490 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3491 if (startag == NULLAGNUMBER)
3494 while (*blen < args->maxlen) {
3495 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3500 if (++ag == mp->m_sb.sb_agcount)
3506 xfs_bmap_select_minlen(ap, args, blen, notinit);
3511 xfs_bmap_btalloc_filestreams(
3512 struct xfs_bmalloca *ap,
3513 struct xfs_alloc_arg *args,
3516 struct xfs_mount *mp = ap->ip->i_mount;
3521 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3522 args->total = ap->total;
3524 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3525 if (ag == NULLAGNUMBER)
3528 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3532 if (*blen < args->maxlen) {
3533 error = xfs_filestream_new_ag(ap, &ag);
3537 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3544 xfs_bmap_select_minlen(ap, args, blen, notinit);
3547 * Set the failure fallback case to look in the selected AG as stream
3550 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3556 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3558 xfs_mount_t *mp; /* mount point structure */
3559 xfs_alloctype_t atype = 0; /* type for allocation routines */
3560 xfs_extlen_t align = 0; /* minimum allocation alignment */
3561 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3563 xfs_alloc_arg_t args;
3565 xfs_extlen_t nextminlen = 0;
3566 int nullfb; /* true if ap->firstblock isn't set */
3574 mp = ap->ip->i_mount;
3576 /* stripe alignment for allocation is determined by mount parameters */
3578 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3579 stripe_align = mp->m_swidth;
3580 else if (mp->m_dalign)
3581 stripe_align = mp->m_dalign;
3583 if (ap->flags & XFS_BMAPI_COWFORK)
3584 align = xfs_get_cowextsz_hint(ap->ip);
3585 else if (xfs_alloc_is_userdata(ap->datatype))
3586 align = xfs_get_extsz_hint(ap->ip);
3588 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3589 align, 0, ap->eof, 0, ap->conv,
3590 &ap->offset, &ap->length);
3596 nullfb = *ap->firstblock == NULLFSBLOCK;
3597 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
3599 if (xfs_alloc_is_userdata(ap->datatype) &&
3600 xfs_inode_is_filestream(ap->ip)) {
3601 ag = xfs_filestream_lookup_ag(ap->ip);
3602 ag = (ag != NULLAGNUMBER) ? ag : 0;
3603 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3605 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3608 ap->blkno = *ap->firstblock;
3610 xfs_bmap_adjacent(ap);
3613 * If allowed, use ap->blkno; otherwise must use firstblock since
3614 * it's in the right allocation group.
3616 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3619 ap->blkno = *ap->firstblock;
3621 * Normal allocation, done through xfs_alloc_vextent.
3623 tryagain = isaligned = 0;
3624 memset(&args, 0, sizeof(args));
3627 args.fsbno = ap->blkno;
3628 xfs_rmap_skip_owner_update(&args.oinfo);
3630 /* Trim the allocation back to the maximum an AG can fit. */
3631 args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
3632 args.firstblock = *ap->firstblock;
3636 * Search for an allocation group with a single extent large
3637 * enough for the request. If one isn't found, then adjust
3638 * the minimum allocation size to the largest space found.
3640 if (xfs_alloc_is_userdata(ap->datatype) &&
3641 xfs_inode_is_filestream(ap->ip))
3642 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3644 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3647 } else if (ap->dfops->dop_low) {
3648 if (xfs_inode_is_filestream(ap->ip))
3649 args.type = XFS_ALLOCTYPE_FIRST_AG;
3651 args.type = XFS_ALLOCTYPE_START_BNO;
3652 args.total = args.minlen = ap->minlen;
3654 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3655 args.total = ap->total;
3656 args.minlen = ap->minlen;
3658 /* apply extent size hints if obtained earlier */
3661 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
3662 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3663 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3667 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3668 if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod))))
3669 args.mod = (xfs_extlen_t)(args.prod - args.mod);
3672 * If we are not low on available data blocks, and the
3673 * underlying logical volume manager is a stripe, and
3674 * the file offset is zero then try to allocate data
3675 * blocks on stripe unit boundary.
3676 * NOTE: ap->aeof is only set if the allocation length
3677 * is >= the stripe unit and the allocation offset is
3678 * at the end of file.
3680 if (!ap->dfops->dop_low && ap->aeof) {
3682 args.alignment = stripe_align;
3686 * Adjust for alignment
3688 if (blen > args.alignment && blen <= args.maxlen)
3689 args.minlen = blen - args.alignment;
3690 args.minalignslop = 0;
3693 * First try an exact bno allocation.
3694 * If it fails then do a near or start bno
3695 * allocation with alignment turned on.
3699 args.type = XFS_ALLOCTYPE_THIS_BNO;
3702 * Compute the minlen+alignment for the
3703 * next case. Set slop so that the value
3704 * of minlen+alignment+slop doesn't go up
3705 * between the calls.
3707 if (blen > stripe_align && blen <= args.maxlen)
3708 nextminlen = blen - stripe_align;
3710 nextminlen = args.minlen;
3711 if (nextminlen + stripe_align > args.minlen + 1)
3713 nextminlen + stripe_align -
3716 args.minalignslop = 0;
3720 args.minalignslop = 0;
3722 args.minleft = ap->minleft;
3723 args.wasdel = ap->wasdel;
3724 args.resv = XFS_AG_RESV_NONE;
3725 args.datatype = ap->datatype;
3726 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3729 error = xfs_alloc_vextent(&args);
3733 if (tryagain && args.fsbno == NULLFSBLOCK) {
3735 * Exact allocation failed. Now try with alignment
3739 args.fsbno = ap->blkno;
3740 args.alignment = stripe_align;
3741 args.minlen = nextminlen;
3742 args.minalignslop = 0;
3744 if ((error = xfs_alloc_vextent(&args)))
3747 if (isaligned && args.fsbno == NULLFSBLOCK) {
3749 * allocation failed, so turn off alignment and
3753 args.fsbno = ap->blkno;
3755 if ((error = xfs_alloc_vextent(&args)))
3758 if (args.fsbno == NULLFSBLOCK && nullfb &&
3759 args.minlen > ap->minlen) {
3760 args.minlen = ap->minlen;
3761 args.type = XFS_ALLOCTYPE_START_BNO;
3762 args.fsbno = ap->blkno;
3763 if ((error = xfs_alloc_vextent(&args)))
3766 if (args.fsbno == NULLFSBLOCK && nullfb) {
3768 args.type = XFS_ALLOCTYPE_FIRST_AG;
3769 args.total = ap->minlen;
3770 if ((error = xfs_alloc_vextent(&args)))
3772 ap->dfops->dop_low = true;
3774 if (args.fsbno != NULLFSBLOCK) {
3776 * check the allocation happened at the same or higher AG than
3777 * the first block that was allocated.
3779 ASSERT(*ap->firstblock == NULLFSBLOCK ||
3780 XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
3781 XFS_FSB_TO_AGNO(mp, args.fsbno));
3783 ap->blkno = args.fsbno;
3784 if (*ap->firstblock == NULLFSBLOCK)
3785 *ap->firstblock = args.fsbno;
3786 ASSERT(nullfb || fb_agno <= args.agno);
3787 ap->length = args.len;
3788 if (!(ap->flags & XFS_BMAPI_COWFORK))
3789 ap->ip->i_d.di_nblocks += args.len;
3790 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3792 ap->ip->i_delayed_blks -= args.len;
3794 * Adjust the disk quota also. This was reserved
3797 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3798 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT :
3799 XFS_TRANS_DQ_BCOUNT,
3802 ap->blkno = NULLFSBLOCK;
3809 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3810 * It figures out where to ask the underlying allocator to put the new extent.
3814 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3816 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3817 xfs_alloc_is_userdata(ap->datatype))
3818 return xfs_bmap_rtalloc(ap);
3819 return xfs_bmap_btalloc(ap);
3822 /* Trim extent to fit a logical block range. */
3825 struct xfs_bmbt_irec *irec,
3829 xfs_fileoff_t distance;
3830 xfs_fileoff_t end = bno + len;
3832 if (irec->br_startoff + irec->br_blockcount <= bno ||
3833 irec->br_startoff >= end) {
3834 irec->br_blockcount = 0;
3838 if (irec->br_startoff < bno) {
3839 distance = bno - irec->br_startoff;
3840 if (isnullstartblock(irec->br_startblock))
3841 irec->br_startblock = DELAYSTARTBLOCK;
3842 if (irec->br_startblock != DELAYSTARTBLOCK &&
3843 irec->br_startblock != HOLESTARTBLOCK)
3844 irec->br_startblock += distance;
3845 irec->br_startoff += distance;
3846 irec->br_blockcount -= distance;
3849 if (end < irec->br_startoff + irec->br_blockcount) {
3850 distance = irec->br_startoff + irec->br_blockcount - end;
3851 irec->br_blockcount -= distance;
3856 * Trim the returned map to the required bounds
3860 struct xfs_bmbt_irec *mval,
3861 struct xfs_bmbt_irec *got,
3869 if ((flags & XFS_BMAPI_ENTIRE) ||
3870 got->br_startoff + got->br_blockcount <= obno) {
3872 if (isnullstartblock(got->br_startblock))
3873 mval->br_startblock = DELAYSTARTBLOCK;
3879 ASSERT((*bno >= obno) || (n == 0));
3881 mval->br_startoff = *bno;
3882 if (isnullstartblock(got->br_startblock))
3883 mval->br_startblock = DELAYSTARTBLOCK;
3885 mval->br_startblock = got->br_startblock +
3886 (*bno - got->br_startoff);
3888 * Return the minimum of what we got and what we asked for for
3889 * the length. We can use the len variable here because it is
3890 * modified below and we could have been there before coming
3891 * here if the first part of the allocation didn't overlap what
3894 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3895 got->br_blockcount - (*bno - got->br_startoff));
3896 mval->br_state = got->br_state;
3897 ASSERT(mval->br_blockcount <= len);
3902 * Update and validate the extent map to return
3905 xfs_bmapi_update_map(
3906 struct xfs_bmbt_irec **map,
3914 xfs_bmbt_irec_t *mval = *map;
3916 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3917 ((mval->br_startoff + mval->br_blockcount) <= end));
3918 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3919 (mval->br_startoff < obno));
3921 *bno = mval->br_startoff + mval->br_blockcount;
3923 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3924 /* update previous map with new information */
3925 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3926 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3927 ASSERT(mval->br_state == mval[-1].br_state);
3928 mval[-1].br_blockcount = mval->br_blockcount;
3929 mval[-1].br_state = mval->br_state;
3930 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3931 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3932 mval[-1].br_startblock != HOLESTARTBLOCK &&
3933 mval->br_startblock == mval[-1].br_startblock +
3934 mval[-1].br_blockcount &&
3935 ((flags & XFS_BMAPI_IGSTATE) ||
3936 mval[-1].br_state == mval->br_state)) {
3937 ASSERT(mval->br_startoff ==
3938 mval[-1].br_startoff + mval[-1].br_blockcount);
3939 mval[-1].br_blockcount += mval->br_blockcount;
3940 } else if (*n > 0 &&
3941 mval->br_startblock == DELAYSTARTBLOCK &&
3942 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3943 mval->br_startoff ==
3944 mval[-1].br_startoff + mval[-1].br_blockcount) {
3945 mval[-1].br_blockcount += mval->br_blockcount;
3946 mval[-1].br_state = mval->br_state;
3947 } else if (!((*n == 0) &&
3948 ((mval->br_startoff + mval->br_blockcount) <=
3957 * Map file blocks to filesystem blocks without allocation.
3961 struct xfs_inode *ip,
3964 struct xfs_bmbt_irec *mval,
3968 struct xfs_mount *mp = ip->i_mount;
3969 struct xfs_ifork *ifp;
3970 struct xfs_bmbt_irec got;
3977 int whichfork = xfs_bmapi_whichfork(flags);
3980 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3981 XFS_BMAPI_IGSTATE|XFS_BMAPI_COWFORK)));
3982 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3984 if (unlikely(XFS_TEST_ERROR(
3985 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3986 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
3987 mp, XFS_ERRTAG_BMAPIFORMAT))) {
3988 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
3989 return -EFSCORRUPTED;
3992 if (XFS_FORCED_SHUTDOWN(mp))
3995 XFS_STATS_INC(mp, xs_blk_mapr);
3997 ifp = XFS_IFORK_PTR(ip, whichfork);
3999 /* No CoW fork? Return a hole. */
4000 if (whichfork == XFS_COW_FORK && !ifp) {
4001 mval->br_startoff = bno;
4002 mval->br_startblock = HOLESTARTBLOCK;
4003 mval->br_blockcount = len;
4004 mval->br_state = XFS_EXT_NORM;
4009 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4010 error = xfs_iread_extents(NULL, ip, whichfork);
4015 if (!xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got))
4020 while (bno < end && n < *nmap) {
4021 /* Reading past eof, act as though there's a hole up to end. */
4023 got.br_startoff = end;
4024 if (got.br_startoff > bno) {
4025 /* Reading in a hole. */
4026 mval->br_startoff = bno;
4027 mval->br_startblock = HOLESTARTBLOCK;
4028 mval->br_blockcount =
4029 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
4030 mval->br_state = XFS_EXT_NORM;
4031 bno += mval->br_blockcount;
4032 len -= mval->br_blockcount;
4038 /* set up the extent map to return. */
4039 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
4040 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4042 /* If we're done, stop now. */
4043 if (bno >= end || n >= *nmap)
4046 /* Else go on to the next record. */
4047 if (!xfs_iext_get_extent(ifp, ++idx, &got))
4055 * Add a delayed allocation extent to an inode. Blocks are reserved from the
4056 * global pool and the extent inserted into the inode in-core extent tree.
4058 * On entry, got refers to the first extent beyond the offset of the extent to
4059 * allocate or eof is specified if no such extent exists. On return, got refers
4060 * to the extent record that was inserted to the inode fork.
4062 * Note that the allocated extent may have been merged with contiguous extents
4063 * during insertion into the inode fork. Thus, got does not reflect the current
4064 * state of the inode fork on return. If necessary, the caller can use lastx to
4065 * look up the updated record in the inode fork.
4068 xfs_bmapi_reserve_delalloc(
4069 struct xfs_inode *ip,
4073 xfs_filblks_t prealloc,
4074 struct xfs_bmbt_irec *got,
4075 xfs_extnum_t *lastx,
4078 struct xfs_mount *mp = ip->i_mount;
4079 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4081 xfs_extlen_t indlen;
4082 char rt = XFS_IS_REALTIME_INODE(ip);
4085 xfs_fileoff_t aoff = off;
4088 * Cap the alloc length. Keep track of prealloc so we know whether to
4089 * tag the inode before we return.
4091 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
4093 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
4094 if (prealloc && alen >= len)
4095 prealloc = alen - len;
4097 /* Figure out the extent size, adjust alen */
4098 if (whichfork == XFS_COW_FORK)
4099 extsz = xfs_get_cowextsz_hint(ip);
4101 extsz = xfs_get_extsz_hint(ip);
4103 struct xfs_bmbt_irec prev;
4105 if (!xfs_iext_get_extent(ifp, *lastx - 1, &prev))
4106 prev.br_startoff = NULLFILEOFF;
4108 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, rt, eof,
4109 1, 0, &aoff, &alen);
4114 extsz = alen / mp->m_sb.sb_rextsize;
4117 * Make a transaction-less quota reservation for delayed allocation
4118 * blocks. This number gets adjusted later. We return if we haven't
4119 * allocated blocks already inside this loop.
4121 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
4122 rt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4127 * Split changing sb for alen and indlen since they could be coming
4128 * from different places.
4130 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
4134 error = xfs_mod_frextents(mp, -((int64_t)extsz));
4136 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
4140 goto out_unreserve_quota;
4142 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
4144 goto out_unreserve_blocks;
4147 ip->i_delayed_blks += alen;
4149 got->br_startoff = aoff;
4150 got->br_startblock = nullstartblock(indlen);
4151 got->br_blockcount = alen;
4152 got->br_state = XFS_EXT_NORM;
4154 xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
4157 * Tag the inode if blocks were preallocated. Note that COW fork
4158 * preallocation can occur at the start or end of the extent, even when
4159 * prealloc == 0, so we must also check the aligned offset and length.
4161 if (whichfork == XFS_DATA_FORK && prealloc)
4162 xfs_inode_set_eofblocks_tag(ip);
4163 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
4164 xfs_inode_set_cowblocks_tag(ip);
4168 out_unreserve_blocks:
4170 xfs_mod_frextents(mp, extsz);
4172 xfs_mod_fdblocks(mp, alen, false);
4173 out_unreserve_quota:
4174 if (XFS_IS_QUOTA_ON(mp))
4175 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0, rt ?
4176 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4182 struct xfs_bmalloca *bma)
4184 struct xfs_mount *mp = bma->ip->i_mount;
4185 int whichfork = xfs_bmapi_whichfork(bma->flags);
4186 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4187 int tmp_logflags = 0;
4190 ASSERT(bma->length > 0);
4193 * For the wasdelay case, we could also just allocate the stuff asked
4194 * for in this bmap call but that wouldn't be as good.
4197 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4198 bma->offset = bma->got.br_startoff;
4200 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1),
4204 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4206 bma->length = XFS_FILBLKS_MIN(bma->length,
4207 bma->got.br_startoff - bma->offset);
4211 * Set the data type being allocated. For the data fork, the first data
4212 * in the file is treated differently to all other allocations. For the
4213 * attribute fork, we only need to ensure the allocated range is not on
4216 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4217 bma->datatype = XFS_ALLOC_NOBUSY;
4218 if (whichfork == XFS_DATA_FORK) {
4219 if (bma->offset == 0)
4220 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4222 bma->datatype |= XFS_ALLOC_USERDATA;
4224 if (bma->flags & XFS_BMAPI_ZERO)
4225 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4228 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4231 * Only want to do the alignment at the eof if it is userdata and
4232 * allocation length is larger than a stripe unit.
4234 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4235 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4236 error = xfs_bmap_isaeof(bma, whichfork);
4241 error = xfs_bmap_alloc(bma);
4246 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4247 if (bma->blkno == NULLFSBLOCK)
4249 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4250 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4251 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4252 bma->cur->bc_private.b.dfops = bma->dfops;
4255 * Bump the number of extents we've allocated
4261 bma->cur->bc_private.b.flags =
4262 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4264 bma->got.br_startoff = bma->offset;
4265 bma->got.br_startblock = bma->blkno;
4266 bma->got.br_blockcount = bma->length;
4267 bma->got.br_state = XFS_EXT_NORM;
4270 * In the data fork, a wasdelay extent has been initialized, so
4271 * shouldn't be flagged as unwritten.
4273 * For the cow fork, however, we convert delalloc reservations
4274 * (extents allocated for speculative preallocation) to
4275 * allocated unwritten extents, and only convert the unwritten
4276 * extents to real extents when we're about to write the data.
4278 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4279 (bma->flags & XFS_BMAPI_PREALLOC) &&
4280 xfs_sb_version_hasextflgbit(&mp->m_sb))
4281 bma->got.br_state = XFS_EXT_UNWRITTEN;
4284 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4286 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4287 whichfork, &bma->idx, &bma->cur, &bma->got,
4288 bma->firstblock, bma->dfops, &bma->logflags);
4290 bma->logflags |= tmp_logflags;
4295 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4296 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4297 * the neighbouring ones.
4299 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4301 ASSERT(bma->got.br_startoff <= bma->offset);
4302 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4303 bma->offset + bma->length);
4304 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4305 bma->got.br_state == XFS_EXT_UNWRITTEN);
4310 xfs_bmapi_convert_unwritten(
4311 struct xfs_bmalloca *bma,
4312 struct xfs_bmbt_irec *mval,
4316 int whichfork = xfs_bmapi_whichfork(flags);
4317 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4318 int tmp_logflags = 0;
4321 /* check if we need to do unwritten->real conversion */
4322 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4323 (flags & XFS_BMAPI_PREALLOC))
4326 /* check if we need to do real->unwritten conversion */
4327 if (mval->br_state == XFS_EXT_NORM &&
4328 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4329 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4333 * Modify (by adding) the state flag, if writing.
4335 ASSERT(mval->br_blockcount <= len);
4336 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4337 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4338 bma->ip, whichfork);
4339 bma->cur->bc_private.b.firstblock = *bma->firstblock;
4340 bma->cur->bc_private.b.dfops = bma->dfops;
4342 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4343 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4346 * Before insertion into the bmbt, zero the range being converted
4349 if (flags & XFS_BMAPI_ZERO) {
4350 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4351 mval->br_blockcount);
4356 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4357 &bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops,
4360 * Log the inode core unconditionally in the unwritten extent conversion
4361 * path because the conversion might not have done so (e.g., if the
4362 * extent count hasn't changed). We need to make sure the inode is dirty
4363 * in the transaction for the sake of fsync(), even if nothing has
4364 * changed, because fsync() will not force the log for this transaction
4365 * unless it sees the inode pinned.
4367 * Note: If we're only converting cow fork extents, there aren't
4368 * any on-disk updates to make, so we don't need to log anything.
4370 if (whichfork != XFS_COW_FORK)
4371 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4376 * Update our extent pointer, given that
4377 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4378 * of the neighbouring ones.
4380 xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx), &bma->got);
4383 * We may have combined previously unwritten space with written space,
4384 * so generate another request.
4386 if (mval->br_blockcount < len)
4392 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4393 * extent state if necessary. Details behaviour is controlled by the flags
4394 * parameter. Only allocates blocks from a single allocation group, to avoid
4397 * The returned value in "firstblock" from the first call in a transaction
4398 * must be remembered and presented to subsequent calls in "firstblock".
4399 * An upper bound for the number of blocks to be allocated is supplied to
4400 * the first call in "total"; if no allocation group has that many free
4401 * blocks then the call will fail (return NULLFSBLOCK in "firstblock").
4405 struct xfs_trans *tp, /* transaction pointer */
4406 struct xfs_inode *ip, /* incore inode */
4407 xfs_fileoff_t bno, /* starting file offs. mapped */
4408 xfs_filblks_t len, /* length to map in file */
4409 int flags, /* XFS_BMAPI_... */
4410 xfs_fsblock_t *firstblock, /* first allocated block
4411 controls a.g. for allocs */
4412 xfs_extlen_t total, /* total blocks needed */
4413 struct xfs_bmbt_irec *mval, /* output: map values */
4414 int *nmap, /* i/o: mval size/count */
4415 struct xfs_defer_ops *dfops) /* i/o: list extents to free */
4417 struct xfs_mount *mp = ip->i_mount;
4418 struct xfs_ifork *ifp;
4419 struct xfs_bmalloca bma = { NULL }; /* args for xfs_bmap_alloc */
4420 xfs_fileoff_t end; /* end of mapped file region */
4421 bool eof = false; /* after the end of extents */
4422 int error; /* error return */
4423 int n; /* current extent index */
4424 xfs_fileoff_t obno; /* old block number (offset) */
4425 int whichfork; /* data or attr fork */
4428 xfs_fileoff_t orig_bno; /* original block number value */
4429 int orig_flags; /* original flags arg value */
4430 xfs_filblks_t orig_len; /* original value of len arg */
4431 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4432 int orig_nmap; /* original value of *nmap */
4440 whichfork = xfs_bmapi_whichfork(flags);
4443 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4444 ASSERT(!(flags & XFS_BMAPI_IGSTATE));
4445 ASSERT(tp != NULL ||
4446 (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
4447 (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
4449 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4450 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4451 ASSERT(!(flags & XFS_BMAPI_REMAP));
4453 /* zeroing is for currently only for data extents, not metadata */
4454 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4455 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4457 * we can allocate unwritten extents or pre-zero allocated blocks,
4458 * but it makes no sense to do both at once. This would result in
4459 * zeroing the unwritten extent twice, but it still being an
4460 * unwritten extent....
4462 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4463 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4465 if (unlikely(XFS_TEST_ERROR(
4466 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4467 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4468 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4469 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4470 return -EFSCORRUPTED;
4473 if (XFS_FORCED_SHUTDOWN(mp))
4476 ifp = XFS_IFORK_PTR(ip, whichfork);
4478 XFS_STATS_INC(mp, xs_blk_mapw);
4480 if (*firstblock == NULLFSBLOCK) {
4481 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE)
4482 bma.minleft = be16_to_cpu(ifp->if_broot->bb_level) + 1;
4489 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4490 error = xfs_iread_extents(tp, ip, whichfork);
4499 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.idx, &bma.got))
4501 if (!xfs_iext_get_extent(ifp, bma.idx - 1, &bma.prev))
4502 bma.prev.br_startoff = NULLFILEOFF;
4508 bma.firstblock = firstblock;
4510 while (bno < end && n < *nmap) {
4511 bool need_alloc = false, wasdelay = false;
4513 /* in hole or beyoned EOF? */
4514 if (eof || bma.got.br_startoff > bno) {
4515 if (flags & XFS_BMAPI_DELALLOC) {
4517 * For the COW fork we can reasonably get a
4518 * request for converting an extent that races
4519 * with other threads already having converted
4520 * part of it, as there converting COW to
4521 * regular blocks is not protected using the
4524 ASSERT(flags & XFS_BMAPI_COWFORK);
4525 if (!(flags & XFS_BMAPI_COWFORK)) {
4530 if (eof || bno >= end)
4535 } else if (isnullstartblock(bma.got.br_startblock)) {
4540 * First, deal with the hole before the allocated space
4541 * that we found, if any.
4543 if (need_alloc || wasdelay) {
4545 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4546 bma.wasdel = wasdelay;
4551 * There's a 32/64 bit type mismatch between the
4552 * allocation length request (which can be 64 bits in
4553 * length) and the bma length request, which is
4554 * xfs_extlen_t and therefore 32 bits. Hence we have to
4555 * check for 32-bit overflows and handle them here.
4557 if (len > (xfs_filblks_t)MAXEXTLEN)
4558 bma.length = MAXEXTLEN;
4563 ASSERT(bma.length > 0);
4564 error = xfs_bmapi_allocate(&bma);
4567 if (bma.blkno == NULLFSBLOCK)
4571 * If this is a CoW allocation, record the data in
4572 * the refcount btree for orphan recovery.
4574 if (whichfork == XFS_COW_FORK) {
4575 error = xfs_refcount_alloc_cow_extent(mp, dfops,
4576 bma.blkno, bma.length);
4582 /* Deal with the allocated space we found. */
4583 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4586 /* Execute unwritten extent conversion if necessary */
4587 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4588 if (error == -EAGAIN)
4593 /* update the extent map to return */
4594 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4597 * If we're done, stop now. Stop when we've allocated
4598 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4599 * the transaction may get too big.
4601 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4604 /* Else go on to the next record. */
4606 if (!xfs_iext_get_extent(ifp, ++bma.idx, &bma.got))
4612 * Transform from btree to extents, give it cur.
4614 if (xfs_bmap_wants_extents(ip, whichfork)) {
4615 int tmp_logflags = 0;
4618 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur,
4619 &tmp_logflags, whichfork);
4620 bma.logflags |= tmp_logflags;
4625 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4626 XFS_IFORK_NEXTENTS(ip, whichfork) >
4627 XFS_IFORK_MAXEXT(ip, whichfork));
4631 * Log everything. Do this after conversion, there's no point in
4632 * logging the extent records if we've converted to btree format.
4634 if ((bma.logflags & xfs_ilog_fext(whichfork)) &&
4635 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4636 bma.logflags &= ~xfs_ilog_fext(whichfork);
4637 else if ((bma.logflags & xfs_ilog_fbroot(whichfork)) &&
4638 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
4639 bma.logflags &= ~xfs_ilog_fbroot(whichfork);
4641 * Log whatever the flags say, even if error. Otherwise we might miss
4642 * detecting a case where the data is changed, there's an error,
4643 * and it's not logged so we don't shutdown when we should.
4646 xfs_trans_log_inode(tp, ip, bma.logflags);
4650 ASSERT(*firstblock == NULLFSBLOCK ||
4651 XFS_FSB_TO_AGNO(mp, *firstblock) <=
4653 bma.cur->bc_private.b.firstblock));
4654 *firstblock = bma.cur->bc_private.b.firstblock;
4656 xfs_btree_del_cursor(bma.cur,
4657 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4660 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4667 struct xfs_trans *tp,
4668 struct xfs_inode *ip,
4671 xfs_fsblock_t startblock,
4672 struct xfs_defer_ops *dfops)
4674 struct xfs_mount *mp = ip->i_mount;
4675 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
4676 struct xfs_btree_cur *cur = NULL;
4677 xfs_fsblock_t firstblock = NULLFSBLOCK;
4678 struct xfs_bmbt_irec got;
4680 int logflags = 0, error;
4683 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4684 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4686 if (unlikely(XFS_TEST_ERROR(
4687 (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_EXTENTS &&
4688 XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) != XFS_DINODE_FMT_BTREE),
4689 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4690 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
4691 return -EFSCORRUPTED;
4694 if (XFS_FORCED_SHUTDOWN(mp))
4697 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4698 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
4703 if (xfs_iext_lookup_extent(ip, ifp, bno, &idx, &got)) {
4704 /* make sure we only reflink into a hole. */
4705 ASSERT(got.br_startoff > bno);
4706 ASSERT(got.br_startoff - bno >= len);
4709 ip->i_d.di_nblocks += len;
4710 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4712 if (ifp->if_flags & XFS_IFBROOT) {
4713 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
4714 cur->bc_private.b.firstblock = firstblock;
4715 cur->bc_private.b.dfops = dfops;
4716 cur->bc_private.b.flags = 0;
4719 got.br_startoff = bno;
4720 got.br_startblock = startblock;
4721 got.br_blockcount = len;
4722 got.br_state = XFS_EXT_NORM;
4724 error = xfs_bmap_add_extent_hole_real(tp, ip, XFS_DATA_FORK, &idx, &cur,
4725 &got, &firstblock, dfops, &logflags);
4729 if (xfs_bmap_wants_extents(ip, XFS_DATA_FORK)) {
4730 int tmp_logflags = 0;
4732 error = xfs_bmap_btree_to_extents(tp, ip, cur,
4733 &tmp_logflags, XFS_DATA_FORK);
4734 logflags |= tmp_logflags;
4738 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4739 logflags &= ~XFS_ILOG_DEXT;
4740 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4741 logflags &= ~XFS_ILOG_DBROOT;
4744 xfs_trans_log_inode(tp, ip, logflags);
4746 xfs_btree_del_cursor(cur,
4747 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
4753 * When a delalloc extent is split (e.g., due to a hole punch), the original
4754 * indlen reservation must be shared across the two new extents that are left
4757 * Given the original reservation and the worst case indlen for the two new
4758 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4759 * reservation fairly across the two new extents. If necessary, steal available
4760 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4761 * ores == 1). The number of stolen blocks is returned. The availability and
4762 * subsequent accounting of stolen blocks is the responsibility of the caller.
4764 static xfs_filblks_t
4765 xfs_bmap_split_indlen(
4766 xfs_filblks_t ores, /* original res. */
4767 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4768 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4769 xfs_filblks_t avail) /* stealable blocks */
4771 xfs_filblks_t len1 = *indlen1;
4772 xfs_filblks_t len2 = *indlen2;
4773 xfs_filblks_t nres = len1 + len2; /* new total res. */
4774 xfs_filblks_t stolen = 0;
4775 xfs_filblks_t resfactor;
4778 * Steal as many blocks as we can to try and satisfy the worst case
4779 * indlen for both new extents.
4781 if (ores < nres && avail)
4782 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4785 /* nothing else to do if we've satisfied the new reservation */
4790 * We can't meet the total required reservation for the two extents.
4791 * Calculate the percent of the overall shortage between both extents
4792 * and apply this percentage to each of the requested indlen values.
4793 * This distributes the shortage fairly and reduces the chances that one
4794 * of the two extents is left with nothing when extents are repeatedly
4797 resfactor = (ores * 100);
4798 do_div(resfactor, nres);
4803 ASSERT(len1 + len2 <= ores);
4804 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4807 * Hand out the remainder to each extent. If one of the two reservations
4808 * is zero, we want to make sure that one gets a block first. The loop
4809 * below starts with len1, so hand len2 a block right off the bat if it
4812 ores -= (len1 + len2);
4813 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4814 if (ores && !len2 && *indlen2) {
4819 if (len1 < *indlen1) {
4825 if (len2 < *indlen2) {
4838 xfs_bmap_del_extent_delay(
4839 struct xfs_inode *ip,
4842 struct xfs_bmbt_irec *got,
4843 struct xfs_bmbt_irec *del)
4845 struct xfs_mount *mp = ip->i_mount;
4846 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4847 struct xfs_bmbt_irec new;
4848 int64_t da_old, da_new, da_diff = 0;
4849 xfs_fileoff_t del_endoff, got_endoff;
4850 xfs_filblks_t got_indlen, new_indlen, stolen;
4851 int error = 0, state = 0;
4854 XFS_STATS_INC(mp, xs_del_exlist);
4856 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4857 del_endoff = del->br_startoff + del->br_blockcount;
4858 got_endoff = got->br_startoff + got->br_blockcount;
4859 da_old = startblockval(got->br_startblock);
4863 ASSERT(*idx <= xfs_iext_count(ifp));
4864 ASSERT(del->br_blockcount > 0);
4865 ASSERT(got->br_startoff <= del->br_startoff);
4866 ASSERT(got_endoff >= del_endoff);
4869 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4871 do_div(rtexts, mp->m_sb.sb_rextsize);
4872 xfs_mod_frextents(mp, rtexts);
4876 * Update the inode delalloc counter now and wait to update the
4877 * sb counters as we might have to borrow some blocks for the
4878 * indirect block accounting.
4880 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4881 -((long)del->br_blockcount), 0,
4882 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4885 ip->i_delayed_blks -= del->br_blockcount;
4887 if (whichfork == XFS_COW_FORK)
4888 state |= BMAP_COWFORK;
4890 if (got->br_startoff == del->br_startoff)
4891 state |= BMAP_LEFT_CONTIG;
4892 if (got_endoff == del_endoff)
4893 state |= BMAP_RIGHT_CONTIG;
4895 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
4896 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
4898 * Matches the whole extent. Delete the entry.
4900 xfs_iext_remove(ip, *idx, 1, state);
4903 case BMAP_LEFT_CONTIG:
4905 * Deleting the first part of the extent.
4907 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4908 got->br_startoff = del_endoff;
4909 got->br_blockcount -= del->br_blockcount;
4910 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4911 got->br_blockcount), da_old);
4912 got->br_startblock = nullstartblock((int)da_new);
4913 xfs_iext_update_extent(ifp, *idx, got);
4914 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4916 case BMAP_RIGHT_CONTIG:
4918 * Deleting the last part of the extent.
4920 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4921 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4922 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4923 got->br_blockcount), da_old);
4924 got->br_startblock = nullstartblock((int)da_new);
4925 xfs_iext_update_extent(ifp, *idx, got);
4926 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
4930 * Deleting the middle of the extent.
4932 * Distribute the original indlen reservation across the two new
4933 * extents. Steal blocks from the deleted extent if necessary.
4934 * Stealing blocks simply fudges the fdblocks accounting below.
4935 * Warn if either of the new indlen reservations is zero as this
4936 * can lead to delalloc problems.
4938 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
4940 got->br_blockcount = del->br_startoff - got->br_startoff;
4941 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4943 new.br_blockcount = got_endoff - del_endoff;
4944 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4946 WARN_ON_ONCE(!got_indlen || !new_indlen);
4947 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4948 del->br_blockcount);
4950 got->br_startblock = nullstartblock((int)got_indlen);
4951 xfs_iext_update_extent(ifp, *idx, got);
4952 trace_xfs_bmap_post_update(ip, *idx, 0, _THIS_IP_);
4954 new.br_startoff = del_endoff;
4955 new.br_state = got->br_state;
4956 new.br_startblock = nullstartblock((int)new_indlen);
4959 xfs_iext_insert(ip, *idx, 1, &new, state);
4961 da_new = got_indlen + new_indlen - stolen;
4962 del->br_blockcount -= stolen;
4966 ASSERT(da_old >= da_new);
4967 da_diff = da_old - da_new;
4969 da_diff += del->br_blockcount;
4971 xfs_mod_fdblocks(mp, da_diff, false);
4976 xfs_bmap_del_extent_cow(
4977 struct xfs_inode *ip,
4979 struct xfs_bmbt_irec *got,
4980 struct xfs_bmbt_irec *del)
4982 struct xfs_mount *mp = ip->i_mount;
4983 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4984 struct xfs_bmbt_irec new;
4985 xfs_fileoff_t del_endoff, got_endoff;
4986 int state = BMAP_COWFORK;
4988 XFS_STATS_INC(mp, xs_del_exlist);
4990 del_endoff = del->br_startoff + del->br_blockcount;
4991 got_endoff = got->br_startoff + got->br_blockcount;
4994 ASSERT(*idx <= xfs_iext_count(ifp));
4995 ASSERT(del->br_blockcount > 0);
4996 ASSERT(got->br_startoff <= del->br_startoff);
4997 ASSERT(got_endoff >= del_endoff);
4998 ASSERT(!isnullstartblock(got->br_startblock));
5000 if (got->br_startoff == del->br_startoff)
5001 state |= BMAP_LEFT_CONTIG;
5002 if (got_endoff == del_endoff)
5003 state |= BMAP_RIGHT_CONTIG;
5005 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
5006 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
5008 * Matches the whole extent. Delete the entry.
5010 xfs_iext_remove(ip, *idx, 1, state);
5013 case BMAP_LEFT_CONTIG:
5015 * Deleting the first part of the extent.
5017 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5018 got->br_startoff = del_endoff;
5019 got->br_blockcount -= del->br_blockcount;
5020 got->br_startblock = del->br_startblock + del->br_blockcount;
5021 xfs_iext_update_extent(ifp, *idx, got);
5022 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5024 case BMAP_RIGHT_CONTIG:
5026 * Deleting the last part of the extent.
5028 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5029 got->br_blockcount -= del->br_blockcount;
5030 xfs_iext_update_extent(ifp, *idx, got);
5031 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5035 * Deleting the middle of the extent.
5037 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5038 got->br_blockcount = del->br_startoff - got->br_startoff;
5039 xfs_iext_update_extent(ifp, *idx, got);
5040 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5042 new.br_startoff = del_endoff;
5043 new.br_blockcount = got_endoff - del_endoff;
5044 new.br_state = got->br_state;
5045 new.br_startblock = del->br_startblock + del->br_blockcount;
5048 xfs_iext_insert(ip, *idx, 1, &new, state);
5054 * Called by xfs_bmapi to update file extent records and the btree
5055 * after removing space (or undoing a delayed allocation).
5057 STATIC int /* error */
5058 xfs_bmap_del_extent(
5059 xfs_inode_t *ip, /* incore inode pointer */
5060 xfs_trans_t *tp, /* current transaction pointer */
5061 xfs_extnum_t *idx, /* extent number to update/delete */
5062 struct xfs_defer_ops *dfops, /* list of extents to be freed */
5063 xfs_btree_cur_t *cur, /* if null, not a btree */
5064 xfs_bmbt_irec_t *del, /* data to remove from extents */
5065 int *logflagsp, /* inode logging flags */
5066 int whichfork, /* data or attr fork */
5067 int bflags) /* bmapi flags */
5069 xfs_filblks_t da_new; /* new delay-alloc indirect blocks */
5070 xfs_filblks_t da_old; /* old delay-alloc indirect blocks */
5071 xfs_fsblock_t del_endblock=0; /* first block past del */
5072 xfs_fileoff_t del_endoff; /* first offset past del */
5073 int delay; /* current block is delayed allocated */
5074 int do_fx; /* free extent at end of routine */
5075 xfs_bmbt_rec_host_t *ep; /* current extent entry pointer */
5076 int error; /* error return value */
5077 int flags; /* inode logging flags */
5078 xfs_bmbt_irec_t got; /* current extent entry */
5079 xfs_fileoff_t got_endoff; /* first offset past got */
5080 int i; /* temp state */
5081 xfs_ifork_t *ifp; /* inode fork pointer */
5082 xfs_mount_t *mp; /* mount structure */
5083 xfs_filblks_t nblks; /* quota/sb block count */
5084 xfs_bmbt_irec_t new; /* new record to be inserted */
5086 uint qfield; /* quota field to update */
5087 xfs_filblks_t temp; /* for indirect length calculations */
5088 xfs_filblks_t temp2; /* for indirect length calculations */
5092 XFS_STATS_INC(mp, xs_del_exlist);
5094 if (whichfork == XFS_ATTR_FORK)
5095 state |= BMAP_ATTRFORK;
5096 else if (whichfork == XFS_COW_FORK)
5097 state |= BMAP_COWFORK;
5099 ifp = XFS_IFORK_PTR(ip, whichfork);
5100 ASSERT((*idx >= 0) && (*idx < xfs_iext_count(ifp)));
5101 ASSERT(del->br_blockcount > 0);
5102 ep = xfs_iext_get_ext(ifp, *idx);
5103 xfs_bmbt_get_all(ep, &got);
5104 ASSERT(got.br_startoff <= del->br_startoff);
5105 del_endoff = del->br_startoff + del->br_blockcount;
5106 got_endoff = got.br_startoff + got.br_blockcount;
5107 ASSERT(got_endoff >= del_endoff);
5108 delay = isnullstartblock(got.br_startblock);
5109 ASSERT(isnullstartblock(del->br_startblock) == delay);
5114 * If deleting a real allocation, must free up the disk space.
5117 flags = XFS_ILOG_CORE;
5119 * Realtime allocation. Free it and record di_nblocks update.
5121 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
5125 ASSERT(do_mod(del->br_blockcount,
5126 mp->m_sb.sb_rextsize) == 0);
5127 ASSERT(do_mod(del->br_startblock,
5128 mp->m_sb.sb_rextsize) == 0);
5129 bno = del->br_startblock;
5130 len = del->br_blockcount;
5131 do_div(bno, mp->m_sb.sb_rextsize);
5132 do_div(len, mp->m_sb.sb_rextsize);
5133 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
5137 nblks = len * mp->m_sb.sb_rextsize;
5138 qfield = XFS_TRANS_DQ_RTBCOUNT;
5141 * Ordinary allocation.
5145 nblks = del->br_blockcount;
5146 qfield = XFS_TRANS_DQ_BCOUNT;
5149 * Set up del_endblock and cur for later.
5151 del_endblock = del->br_startblock + del->br_blockcount;
5153 if ((error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
5154 got.br_startblock, got.br_blockcount,
5157 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5159 da_old = da_new = 0;
5161 da_old = startblockval(got.br_startblock);
5168 * Set flag value to use in switch statement.
5169 * Left-contig is 2, right-contig is 1.
5171 switch (((got.br_startoff == del->br_startoff) << 1) |
5172 (got_endoff == del_endoff)) {
5175 * Matches the whole extent. Delete the entry.
5177 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5178 xfs_iext_remove(ip, *idx, 1,
5179 whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0);
5184 XFS_IFORK_NEXT_SET(ip, whichfork,
5185 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5186 flags |= XFS_ILOG_CORE;
5188 flags |= xfs_ilog_fext(whichfork);
5191 if ((error = xfs_btree_delete(cur, &i)))
5193 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5198 * Deleting the first part of the extent.
5200 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5201 xfs_bmbt_set_startoff(ep, del_endoff);
5202 temp = got.br_blockcount - del->br_blockcount;
5203 xfs_bmbt_set_blockcount(ep, temp);
5205 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5207 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5208 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5212 xfs_bmbt_set_startblock(ep, del_endblock);
5213 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5215 flags |= xfs_ilog_fext(whichfork);
5218 if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
5219 got.br_blockcount - del->br_blockcount,
5226 * Deleting the last part of the extent.
5228 temp = got.br_blockcount - del->br_blockcount;
5229 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5230 xfs_bmbt_set_blockcount(ep, temp);
5232 temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
5234 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5235 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5239 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5241 flags |= xfs_ilog_fext(whichfork);
5244 if ((error = xfs_bmbt_update(cur, got.br_startoff,
5246 got.br_blockcount - del->br_blockcount,
5253 * Deleting the middle of the extent.
5255 temp = del->br_startoff - got.br_startoff;
5256 trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
5257 xfs_bmbt_set_blockcount(ep, temp);
5258 new.br_startoff = del_endoff;
5259 temp2 = got_endoff - del_endoff;
5260 new.br_blockcount = temp2;
5261 new.br_state = got.br_state;
5263 new.br_startblock = del_endblock;
5264 flags |= XFS_ILOG_CORE;
5266 if ((error = xfs_bmbt_update(cur,
5268 got.br_startblock, temp,
5271 if ((error = xfs_btree_increment(cur, 0, &i)))
5273 cur->bc_rec.b = new;
5274 error = xfs_btree_insert(cur, &i);
5275 if (error && error != -ENOSPC)
5278 * If get no-space back from btree insert,
5279 * it tried a split, and we have a zero
5280 * block reservation.
5281 * Fix up our state and return the error.
5283 if (error == -ENOSPC) {
5285 * Reset the cursor, don't trust
5286 * it after any insert operation.
5288 if ((error = xfs_bmbt_lookup_eq(cur,
5293 XFS_WANT_CORRUPTED_GOTO(mp,
5296 * Update the btree record back
5297 * to the original value.
5299 if ((error = xfs_bmbt_update(cur,
5306 * Reset the extent record back
5307 * to the original value.
5309 xfs_bmbt_set_blockcount(ep,
5315 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5317 flags |= xfs_ilog_fext(whichfork);
5318 XFS_IFORK_NEXT_SET(ip, whichfork,
5319 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5321 xfs_filblks_t stolen;
5322 ASSERT(whichfork == XFS_DATA_FORK);
5325 * Distribute the original indlen reservation across the
5326 * two new extents. Steal blocks from the deleted extent
5327 * if necessary. Stealing blocks simply fudges the
5328 * fdblocks accounting in xfs_bunmapi().
5330 temp = xfs_bmap_worst_indlen(ip, got.br_blockcount);
5331 temp2 = xfs_bmap_worst_indlen(ip, new.br_blockcount);
5332 stolen = xfs_bmap_split_indlen(da_old, &temp, &temp2,
5333 del->br_blockcount);
5334 da_new = temp + temp2 - stolen;
5335 del->br_blockcount -= stolen;
5338 * Set the reservation for each extent. Warn if either
5339 * is zero as this can lead to delalloc problems.
5341 WARN_ON_ONCE(!temp || !temp2);
5342 xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
5343 new.br_startblock = nullstartblock((int)temp2);
5345 trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
5346 xfs_iext_insert(ip, *idx + 1, 1, &new, state);
5351 /* remove reverse mapping */
5353 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
5359 * If we need to, add to list of extents to delete.
5361 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5362 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5363 error = xfs_refcount_decrease_extent(mp, dfops, del);
5367 xfs_bmap_add_free(mp, dfops, del->br_startblock,
5368 del->br_blockcount, NULL);
5372 * Adjust inode # blocks in the file.
5375 ip->i_d.di_nblocks -= nblks;
5377 * Adjust quota data.
5379 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5380 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5383 * Account for change in delayed indirect blocks.
5384 * Nothing to do for disk quota accounting here.
5386 ASSERT(da_old >= da_new);
5387 if (da_old > da_new)
5388 xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new), false);
5395 * Unmap (remove) blocks from a file.
5396 * If nexts is nonzero then the number of extents to remove is limited to
5397 * that value. If not all extents in the block range can be removed then
5402 xfs_trans_t *tp, /* transaction pointer */
5403 struct xfs_inode *ip, /* incore inode */
5404 xfs_fileoff_t bno, /* starting offset to unmap */
5405 xfs_filblks_t *rlen, /* i/o: amount remaining */
5406 int flags, /* misc flags */
5407 xfs_extnum_t nexts, /* number of extents max */
5408 xfs_fsblock_t *firstblock, /* first allocated block
5409 controls a.g. for allocs */
5410 struct xfs_defer_ops *dfops) /* i/o: deferred updates */
5412 xfs_btree_cur_t *cur; /* bmap btree cursor */
5413 xfs_bmbt_irec_t del; /* extent being deleted */
5414 int error; /* error return value */
5415 xfs_extnum_t extno; /* extent number in list */
5416 xfs_bmbt_irec_t got; /* current extent record */
5417 xfs_ifork_t *ifp; /* inode fork pointer */
5418 int isrt; /* freeing in rt area */
5419 xfs_extnum_t lastx; /* last extent index used */
5420 int logflags; /* transaction logging flags */
5421 xfs_extlen_t mod; /* rt extent offset */
5422 xfs_mount_t *mp; /* mount structure */
5423 xfs_fileoff_t start; /* first file offset deleted */
5424 int tmp_logflags; /* partial logging flags */
5425 int wasdel; /* was a delayed alloc extent */
5426 int whichfork; /* data or attribute fork */
5428 xfs_filblks_t len = *rlen; /* length to unmap in file */
5429 xfs_fileoff_t max_len;
5430 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5432 trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_);
5434 whichfork = xfs_bmapi_whichfork(flags);
5435 ASSERT(whichfork != XFS_COW_FORK);
5436 ifp = XFS_IFORK_PTR(ip, whichfork);
5438 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5439 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5440 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5442 return -EFSCORRUPTED;
5445 if (XFS_FORCED_SHUTDOWN(mp))
5448 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5453 * Guesstimate how many blocks we can unmap without running the risk of
5454 * blowing out the transaction with a mix of EFIs and reflink
5457 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5458 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5462 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5463 (error = xfs_iread_extents(tp, ip, whichfork)))
5465 if (xfs_iext_count(ifp) == 0) {
5469 XFS_STATS_INC(mp, xs_blk_unmap);
5470 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5472 bno = start + len - 1;
5475 * Check to see if the given block number is past the end of the
5476 * file, back up to the last block if so...
5478 if (!xfs_iext_lookup_extent(ip, ifp, bno, &lastx, &got)) {
5480 xfs_iext_get_extent(ifp, --lastx, &got);
5481 bno = got.br_startoff + got.br_blockcount - 1;
5485 if (ifp->if_flags & XFS_IFBROOT) {
5486 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5487 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5488 cur->bc_private.b.firstblock = *firstblock;
5489 cur->bc_private.b.dfops = dfops;
5490 cur->bc_private.b.flags = 0;
5496 * Synchronize by locking the bitmap inode.
5498 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5499 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5500 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5501 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5505 while (bno != (xfs_fileoff_t)-1 && bno >= start && lastx >= 0 &&
5506 (nexts == 0 || extno < nexts) && max_len > 0) {
5508 * Is the found extent after a hole in which bno lives?
5509 * Just back up to the previous extent, if so.
5511 if (got.br_startoff > bno) {
5514 xfs_iext_get_extent(ifp, lastx, &got);
5517 * Is the last block of this extent before the range
5518 * we're supposed to delete? If so, we're done.
5520 bno = XFS_FILEOFF_MIN(bno,
5521 got.br_startoff + got.br_blockcount - 1);
5525 * Then deal with the (possibly delayed) allocated space
5529 wasdel = isnullstartblock(del.br_startblock);
5532 * Make sure we don't touch multiple AGF headers out of order
5533 * in a single transaction, as that could cause AB-BA deadlocks.
5536 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5537 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5541 if (got.br_startoff < start) {
5542 del.br_startoff = start;
5543 del.br_blockcount -= start - got.br_startoff;
5545 del.br_startblock += start - got.br_startoff;
5547 if (del.br_startoff + del.br_blockcount > bno + 1)
5548 del.br_blockcount = bno + 1 - del.br_startoff;
5550 /* How much can we safely unmap? */
5551 if (max_len < del.br_blockcount) {
5552 del.br_startoff += del.br_blockcount - max_len;
5554 del.br_startblock += del.br_blockcount - max_len;
5555 del.br_blockcount = max_len;
5558 sum = del.br_startblock + del.br_blockcount;
5560 (mod = do_mod(sum, mp->m_sb.sb_rextsize))) {
5562 * Realtime extent not lined up at the end.
5563 * The extent could have been split into written
5564 * and unwritten pieces, or we could just be
5565 * unmapping part of it. But we can't really
5566 * get rid of part of a realtime extent.
5568 if (del.br_state == XFS_EXT_UNWRITTEN ||
5569 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5571 * This piece is unwritten, or we're not
5572 * using unwritten extents. Skip over it.
5575 bno -= mod > del.br_blockcount ?
5576 del.br_blockcount : mod;
5577 if (bno < got.br_startoff) {
5579 xfs_bmbt_get_all(xfs_iext_get_ext(
5585 * It's written, turn it unwritten.
5586 * This is better than zeroing it.
5588 ASSERT(del.br_state == XFS_EXT_NORM);
5589 ASSERT(tp->t_blk_res > 0);
5591 * If this spans a realtime extent boundary,
5592 * chop it back to the start of the one we end at.
5594 if (del.br_blockcount > mod) {
5595 del.br_startoff += del.br_blockcount - mod;
5596 del.br_startblock += del.br_blockcount - mod;
5597 del.br_blockcount = mod;
5599 del.br_state = XFS_EXT_UNWRITTEN;
5600 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5601 whichfork, &lastx, &cur, &del,
5602 firstblock, dfops, &logflags);
5607 if (isrt && (mod = do_mod(del.br_startblock, mp->m_sb.sb_rextsize))) {
5609 * Realtime extent is lined up at the end but not
5610 * at the front. We'll get rid of full extents if
5613 mod = mp->m_sb.sb_rextsize - mod;
5614 if (del.br_blockcount > mod) {
5615 del.br_blockcount -= mod;
5616 del.br_startoff += mod;
5617 del.br_startblock += mod;
5618 } else if ((del.br_startoff == start &&
5619 (del.br_state == XFS_EXT_UNWRITTEN ||
5620 tp->t_blk_res == 0)) ||
5621 !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
5623 * Can't make it unwritten. There isn't
5624 * a full extent here so just skip it.
5626 ASSERT(bno >= del.br_blockcount);
5627 bno -= del.br_blockcount;
5628 if (got.br_startoff > bno && --lastx >= 0)
5629 xfs_iext_get_extent(ifp, lastx, &got);
5631 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5632 struct xfs_bmbt_irec prev;
5635 * This one is already unwritten.
5636 * It must have a written left neighbor.
5637 * Unwrite the killed part of that one and
5641 xfs_iext_get_extent(ifp, lastx - 1, &prev);
5642 ASSERT(prev.br_state == XFS_EXT_NORM);
5643 ASSERT(!isnullstartblock(prev.br_startblock));
5644 ASSERT(del.br_startblock ==
5645 prev.br_startblock + prev.br_blockcount);
5646 if (prev.br_startoff < start) {
5647 mod = start - prev.br_startoff;
5648 prev.br_blockcount -= mod;
5649 prev.br_startblock += mod;
5650 prev.br_startoff = start;
5652 prev.br_state = XFS_EXT_UNWRITTEN;
5654 error = xfs_bmap_add_extent_unwritten_real(tp,
5655 ip, whichfork, &lastx, &cur,
5656 &prev, firstblock, dfops,
5662 ASSERT(del.br_state == XFS_EXT_NORM);
5663 del.br_state = XFS_EXT_UNWRITTEN;
5664 error = xfs_bmap_add_extent_unwritten_real(tp,
5665 ip, whichfork, &lastx, &cur,
5666 &del, firstblock, dfops,
5675 * If it's the case where the directory code is running
5676 * with no block reservation, and the deleted block is in
5677 * the middle of its extent, and the resulting insert
5678 * of an extent would cause transformation to btree format,
5679 * then reject it. The calling code will then swap
5680 * blocks around instead.
5681 * We have to do this now, rather than waiting for the
5682 * conversion to btree format, since the transaction
5685 if (!wasdel && tp->t_blk_res == 0 &&
5686 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
5687 XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
5688 XFS_IFORK_MAXEXT(ip, whichfork) &&
5689 del.br_startoff > got.br_startoff &&
5690 del.br_startoff + del.br_blockcount <
5691 got.br_startoff + got.br_blockcount) {
5697 * Unreserve quota and update realtime free space, if
5698 * appropriate. If delayed allocation, update the inode delalloc
5699 * counter now and wait to update the sb counters as
5700 * xfs_bmap_del_extent() might need to borrow some blocks.
5703 ASSERT(startblockval(del.br_startblock) > 0);
5705 xfs_filblks_t rtexts;
5707 rtexts = XFS_FSB_TO_B(mp, del.br_blockcount);
5708 do_div(rtexts, mp->m_sb.sb_rextsize);
5709 xfs_mod_frextents(mp, (int64_t)rtexts);
5710 (void)xfs_trans_reserve_quota_nblks(NULL,
5711 ip, -((long)del.br_blockcount), 0,
5712 XFS_QMOPT_RES_RTBLKS);
5714 (void)xfs_trans_reserve_quota_nblks(NULL,
5715 ip, -((long)del.br_blockcount), 0,
5716 XFS_QMOPT_RES_REGBLKS);
5718 ip->i_delayed_blks -= del.br_blockcount;
5720 cur->bc_private.b.flags |=
5721 XFS_BTCUR_BPRV_WASDEL;
5723 cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
5725 error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
5726 &tmp_logflags, whichfork, flags);
5727 logflags |= tmp_logflags;
5731 if (!isrt && wasdel)
5732 xfs_mod_fdblocks(mp, (int64_t)del.br_blockcount, false);
5734 max_len -= del.br_blockcount;
5735 bno = del.br_startoff - 1;
5738 * If not done go on to the next (previous) record.
5740 if (bno != (xfs_fileoff_t)-1 && bno >= start) {
5742 xfs_iext_get_extent(ifp, lastx, &got);
5743 if (got.br_startoff > bno && --lastx >= 0)
5744 xfs_iext_get_extent(ifp, lastx, &got);
5749 if (bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0)
5752 *rlen = bno - start + 1;
5755 * Convert to a btree if necessary.
5757 if (xfs_bmap_needs_btree(ip, whichfork)) {
5758 ASSERT(cur == NULL);
5759 error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
5760 &cur, 0, &tmp_logflags, whichfork);
5761 logflags |= tmp_logflags;
5766 * transform from btree to extents, give it cur
5768 else if (xfs_bmap_wants_extents(ip, whichfork)) {
5769 ASSERT(cur != NULL);
5770 error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
5772 logflags |= tmp_logflags;
5777 * transform from extents to local?
5782 * Log everything. Do this after conversion, there's no point in
5783 * logging the extent records if we've converted to btree format.
5785 if ((logflags & xfs_ilog_fext(whichfork)) &&
5786 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5787 logflags &= ~xfs_ilog_fext(whichfork);
5788 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5789 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5790 logflags &= ~xfs_ilog_fbroot(whichfork);
5792 * Log inode even in the error case, if the transaction
5793 * is dirty we'll need to shut down the filesystem.
5796 xfs_trans_log_inode(tp, ip, logflags);
5799 *firstblock = cur->bc_private.b.firstblock;
5800 cur->bc_private.b.allocated = 0;
5802 xfs_btree_del_cursor(cur,
5803 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
5808 /* Unmap a range of a file. */
5812 struct xfs_inode *ip,
5817 xfs_fsblock_t *firstblock,
5818 struct xfs_defer_ops *dfops,
5823 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts, firstblock,
5830 * Determine whether an extent shift can be accomplished by a merge with the
5831 * extent that precedes the target hole of the shift.
5835 struct xfs_bmbt_irec *left, /* preceding extent */
5836 struct xfs_bmbt_irec *got, /* current extent to shift */
5837 xfs_fileoff_t shift) /* shift fsb */
5839 xfs_fileoff_t startoff;
5841 startoff = got->br_startoff - shift;
5844 * The extent, once shifted, must be adjacent in-file and on-disk with
5845 * the preceding extent.
5847 if ((left->br_startoff + left->br_blockcount != startoff) ||
5848 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5849 (left->br_state != got->br_state) ||
5850 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5857 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5858 * hole in the file. If an extent shift would result in the extent being fully
5859 * adjacent to the extent that currently precedes the hole, we can merge with
5860 * the preceding extent rather than do the shift.
5862 * This function assumes the caller has verified a shift-by-merge is possible
5863 * with the provided extents via xfs_bmse_can_merge().
5867 struct xfs_inode *ip,
5869 xfs_fileoff_t shift, /* shift fsb */
5870 int current_ext, /* idx of gotp */
5871 struct xfs_bmbt_irec *got, /* extent to shift */
5872 struct xfs_bmbt_irec *left, /* preceding extent */
5873 struct xfs_btree_cur *cur,
5874 int *logflags, /* output */
5875 struct xfs_defer_ops *dfops)
5877 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5878 struct xfs_bmbt_irec new;
5879 xfs_filblks_t blockcount;
5881 struct xfs_mount *mp = ip->i_mount;
5883 blockcount = left->br_blockcount + got->br_blockcount;
5885 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5886 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5887 ASSERT(xfs_bmse_can_merge(left, got, shift));
5890 new.br_blockcount = blockcount;
5893 * Update the on-disk extent count, the btree if necessary and log the
5896 XFS_IFORK_NEXT_SET(ip, whichfork,
5897 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5898 *logflags |= XFS_ILOG_CORE;
5900 *logflags |= XFS_ILOG_DEXT;
5904 /* lookup and remove the extent to merge */
5905 error = xfs_bmbt_lookup_eq(cur, got->br_startoff, got->br_startblock,
5906 got->br_blockcount, &i);
5909 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5911 error = xfs_btree_delete(cur, &i);
5914 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5916 /* lookup and update size of the previous extent */
5917 error = xfs_bmbt_lookup_eq(cur, left->br_startoff, left->br_startblock,
5918 left->br_blockcount, &i);
5921 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5923 error = xfs_bmbt_update(cur, new.br_startoff, new.br_startblock,
5924 new.br_blockcount, new.br_state);
5929 xfs_iext_update_extent(ifp, current_ext - 1, &new);
5930 xfs_iext_remove(ip, current_ext, 1, 0);
5932 /* update reverse mapping. rmap functions merge the rmaps for us */
5933 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
5936 memcpy(&new, got, sizeof(new));
5937 new.br_startoff = left->br_startoff + left->br_blockcount;
5938 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
5942 * Shift a single extent.
5946 struct xfs_inode *ip,
5948 xfs_fileoff_t offset_shift_fsb,
5950 struct xfs_bmbt_irec *got,
5951 struct xfs_btree_cur *cur,
5953 enum shift_direction direction,
5954 struct xfs_defer_ops *dfops)
5956 struct xfs_ifork *ifp;
5957 struct xfs_mount *mp;
5958 xfs_fileoff_t startoff;
5959 struct xfs_bmbt_irec adj_irec, new;
5965 ifp = XFS_IFORK_PTR(ip, whichfork);
5966 total_extents = xfs_iext_count(ifp);
5968 /* delalloc extents should be prevented by caller */
5969 XFS_WANT_CORRUPTED_RETURN(mp, !isnullstartblock(got->br_startblock));
5971 if (direction == SHIFT_LEFT) {
5972 startoff = got->br_startoff - offset_shift_fsb;
5975 * Check for merge if we've got an extent to the left,
5976 * otherwise make sure there's enough room at the start
5977 * of the file for the shift.
5979 if (!*current_ext) {
5980 if (got->br_startoff < offset_shift_fsb)
5982 goto update_current_ext;
5986 * grab the left extent and check for a large enough hole.
5988 xfs_iext_get_extent(ifp, *current_ext - 1, &adj_irec);
5989 if (startoff < adj_irec.br_startoff + adj_irec.br_blockcount)
5992 /* check whether to merge the extent or shift it down */
5993 if (xfs_bmse_can_merge(&adj_irec, got, offset_shift_fsb)) {
5994 return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
5995 *current_ext, got, &adj_irec,
5996 cur, logflags, dfops);
5999 startoff = got->br_startoff + offset_shift_fsb;
6000 /* nothing to move if this is the last extent */
6001 if (*current_ext >= (total_extents - 1))
6002 goto update_current_ext;
6005 * If this is not the last extent in the file, make sure there
6006 * is enough room between current extent and next extent for
6007 * accommodating the shift.
6009 xfs_iext_get_extent(ifp, *current_ext + 1, &adj_irec);
6010 if (startoff + got->br_blockcount > adj_irec.br_startoff)
6014 * Unlike a left shift (which involves a hole punch),
6015 * a right shift does not modify extent neighbors
6016 * in any way. We should never find mergeable extents
6017 * in this scenario. Check anyways and warn if we
6018 * encounter two extents that could be one.
6020 if (xfs_bmse_can_merge(got, &adj_irec, offset_shift_fsb))
6025 * Increment the extent index for the next iteration, update the start
6026 * offset of the in-core extent and update the btree if applicable.
6029 *logflags |= XFS_ILOG_CORE;
6032 new.br_startoff = startoff;
6035 error = xfs_bmbt_lookup_eq(cur, got->br_startoff,
6036 got->br_startblock, got->br_blockcount, &i);
6039 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
6041 error = xfs_bmbt_update(cur, new.br_startoff,
6042 new.br_startblock, new.br_blockcount,
6047 *logflags |= XFS_ILOG_DEXT;
6050 xfs_iext_update_extent(ifp, *current_ext, &new);
6052 if (direction == SHIFT_LEFT)
6057 /* update reverse mapping */
6058 error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, got);
6061 return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &new);
6065 * Shift extent records to the left/right to cover/create a hole.
6067 * The maximum number of extents to be shifted in a single operation is
6068 * @num_exts. @stop_fsb specifies the file offset at which to stop shift and the
6069 * file offset where we've left off is returned in @next_fsb. @offset_shift_fsb
6070 * is the length by which each extent is shifted. If there is no hole to shift
6071 * the extents into, this will be considered invalid operation and we abort
6075 xfs_bmap_shift_extents(
6076 struct xfs_trans *tp,
6077 struct xfs_inode *ip,
6078 xfs_fileoff_t *next_fsb,
6079 xfs_fileoff_t offset_shift_fsb,
6081 xfs_fileoff_t stop_fsb,
6082 xfs_fsblock_t *firstblock,
6083 struct xfs_defer_ops *dfops,
6084 enum shift_direction direction,
6087 struct xfs_btree_cur *cur = NULL;
6088 struct xfs_bmbt_irec got;
6089 struct xfs_mount *mp = ip->i_mount;
6090 struct xfs_ifork *ifp;
6091 xfs_extnum_t nexts = 0;
6092 xfs_extnum_t current_ext;
6093 xfs_extnum_t total_extents;
6094 xfs_extnum_t stop_extent;
6096 int whichfork = XFS_DATA_FORK;
6099 if (unlikely(XFS_TEST_ERROR(
6100 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6101 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6102 mp, XFS_ERRTAG_BMAPIFORMAT))) {
6103 XFS_ERROR_REPORT("xfs_bmap_shift_extents",
6104 XFS_ERRLEVEL_LOW, mp);
6105 return -EFSCORRUPTED;
6108 if (XFS_FORCED_SHUTDOWN(mp))
6111 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
6112 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6113 ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
6115 ifp = XFS_IFORK_PTR(ip, whichfork);
6116 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6117 /* Read in all the extents */
6118 error = xfs_iread_extents(tp, ip, whichfork);
6123 if (ifp->if_flags & XFS_IFBROOT) {
6124 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6125 cur->bc_private.b.firstblock = *firstblock;
6126 cur->bc_private.b.dfops = dfops;
6127 cur->bc_private.b.flags = 0;
6131 * There may be delalloc extents in the data fork before the range we
6132 * are collapsing out, so we cannot use the count of real extents here.
6133 * Instead we have to calculate it from the incore fork.
6135 total_extents = xfs_iext_count(ifp);
6136 if (total_extents == 0) {
6142 * In case of first right shift, we need to initialize next_fsb
6144 if (*next_fsb == NULLFSBLOCK) {
6145 ASSERT(direction == SHIFT_RIGHT);
6147 current_ext = total_extents - 1;
6148 xfs_iext_get_extent(ifp, current_ext, &got);
6149 if (stop_fsb > got.br_startoff) {
6153 *next_fsb = got.br_startoff;
6156 * Look up the extent index for the fsb where we start shifting. We can
6157 * henceforth iterate with current_ext as extent list changes are locked
6160 * If next_fsb lies in a hole beyond which there are no extents we are
6163 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, ¤t_ext,
6170 /* Lookup the extent index at which we have to stop */
6171 if (direction == SHIFT_RIGHT) {
6172 struct xfs_bmbt_irec s;
6174 xfs_iext_lookup_extent(ip, ifp, stop_fsb, &stop_extent, &s);
6175 /* Make stop_extent exclusive of shift range */
6177 if (current_ext <= stop_extent) {
6182 stop_extent = total_extents;
6183 if (current_ext >= stop_extent) {
6189 while (nexts++ < num_exts) {
6190 error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
6191 ¤t_ext, &got, cur, &logflags,
6196 * If there was an extent merge during the shift, the extent
6197 * count can change. Update the total and grade the next record.
6199 if (direction == SHIFT_LEFT) {
6200 total_extents = xfs_iext_count(ifp);
6201 stop_extent = total_extents;
6204 if (current_ext == stop_extent) {
6206 *next_fsb = NULLFSBLOCK;
6209 xfs_iext_get_extent(ifp, current_ext, &got);
6213 *next_fsb = got.br_startoff;
6217 xfs_btree_del_cursor(cur,
6218 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6221 xfs_trans_log_inode(tp, ip, logflags);
6227 * Splits an extent into two extents at split_fsb block such that it is
6228 * the first block of the current_ext. @current_ext is a target extent
6229 * to be split. @split_fsb is a block where the extents is split.
6230 * If split_fsb lies in a hole or the first block of extents, just return 0.
6233 xfs_bmap_split_extent_at(
6234 struct xfs_trans *tp,
6235 struct xfs_inode *ip,
6236 xfs_fileoff_t split_fsb,
6237 xfs_fsblock_t *firstfsb,
6238 struct xfs_defer_ops *dfops)
6240 int whichfork = XFS_DATA_FORK;
6241 struct xfs_btree_cur *cur = NULL;
6242 struct xfs_bmbt_irec got;
6243 struct xfs_bmbt_irec new; /* split extent */
6244 struct xfs_mount *mp = ip->i_mount;
6245 struct xfs_ifork *ifp;
6246 xfs_fsblock_t gotblkcnt; /* new block count for got */
6247 xfs_extnum_t current_ext;
6252 if (unlikely(XFS_TEST_ERROR(
6253 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
6254 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
6255 mp, XFS_ERRTAG_BMAPIFORMAT))) {
6256 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
6257 XFS_ERRLEVEL_LOW, mp);
6258 return -EFSCORRUPTED;
6261 if (XFS_FORCED_SHUTDOWN(mp))
6264 ifp = XFS_IFORK_PTR(ip, whichfork);
6265 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
6266 /* Read in all the extents */
6267 error = xfs_iread_extents(tp, ip, whichfork);
6273 * If there are not extents, or split_fsb lies in a hole we are done.
6275 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, ¤t_ext, &got) ||
6276 got.br_startoff >= split_fsb)
6279 gotblkcnt = split_fsb - got.br_startoff;
6280 new.br_startoff = split_fsb;
6281 new.br_startblock = got.br_startblock + gotblkcnt;
6282 new.br_blockcount = got.br_blockcount - gotblkcnt;
6283 new.br_state = got.br_state;
6285 if (ifp->if_flags & XFS_IFBROOT) {
6286 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
6287 cur->bc_private.b.firstblock = *firstfsb;
6288 cur->bc_private.b.dfops = dfops;
6289 cur->bc_private.b.flags = 0;
6290 error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
6296 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6299 got.br_blockcount = gotblkcnt;
6300 xfs_iext_update_extent(ifp, current_ext, &got);
6302 logflags = XFS_ILOG_CORE;
6304 error = xfs_bmbt_update(cur, got.br_startoff,
6311 logflags |= XFS_ILOG_DEXT;
6313 /* Add new extent */
6315 xfs_iext_insert(ip, current_ext, 1, &new, 0);
6316 XFS_IFORK_NEXT_SET(ip, whichfork,
6317 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
6320 error = xfs_bmbt_lookup_eq(cur, new.br_startoff,
6321 new.br_startblock, new.br_blockcount,
6325 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
6326 cur->bc_rec.b.br_state = new.br_state;
6328 error = xfs_btree_insert(cur, &i);
6331 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
6335 * Convert to a btree if necessary.
6337 if (xfs_bmap_needs_btree(ip, whichfork)) {
6338 int tmp_logflags; /* partial log flag return val */
6340 ASSERT(cur == NULL);
6341 error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
6342 &cur, 0, &tmp_logflags, whichfork);
6343 logflags |= tmp_logflags;
6348 cur->bc_private.b.allocated = 0;
6349 xfs_btree_del_cursor(cur,
6350 error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
6354 xfs_trans_log_inode(tp, ip, logflags);
6359 xfs_bmap_split_extent(
6360 struct xfs_inode *ip,
6361 xfs_fileoff_t split_fsb)
6363 struct xfs_mount *mp = ip->i_mount;
6364 struct xfs_trans *tp;
6365 struct xfs_defer_ops dfops;
6366 xfs_fsblock_t firstfsb;
6369 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6370 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6374 xfs_ilock(ip, XFS_ILOCK_EXCL);
6375 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6377 xfs_defer_init(&dfops, &firstfsb);
6379 error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
6384 error = xfs_defer_finish(&tp, &dfops);
6388 return xfs_trans_commit(tp);
6391 xfs_defer_cancel(&dfops);
6392 xfs_trans_cancel(tp);
6396 /* Deferred mapping is only for real extents in the data fork. */
6398 xfs_bmap_is_update_needed(
6399 struct xfs_bmbt_irec *bmap)
6401 return bmap->br_startblock != HOLESTARTBLOCK &&
6402 bmap->br_startblock != DELAYSTARTBLOCK;
6405 /* Record a bmap intent. */
6408 struct xfs_mount *mp,
6409 struct xfs_defer_ops *dfops,
6410 enum xfs_bmap_intent_type type,
6411 struct xfs_inode *ip,
6413 struct xfs_bmbt_irec *bmap)
6416 struct xfs_bmap_intent *bi;
6418 trace_xfs_bmap_defer(mp,
6419 XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
6421 XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
6422 ip->i_ino, whichfork,
6424 bmap->br_blockcount,
6427 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
6428 INIT_LIST_HEAD(&bi->bi_list);
6431 bi->bi_whichfork = whichfork;
6432 bi->bi_bmap = *bmap;
6434 error = xfs_defer_ijoin(dfops, bi->bi_owner);
6440 xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6444 /* Map an extent into a file. */
6446 xfs_bmap_map_extent(
6447 struct xfs_mount *mp,
6448 struct xfs_defer_ops *dfops,
6449 struct xfs_inode *ip,
6450 struct xfs_bmbt_irec *PREV)
6452 if (!xfs_bmap_is_update_needed(PREV))
6455 return __xfs_bmap_add(mp, dfops, XFS_BMAP_MAP, ip,
6456 XFS_DATA_FORK, PREV);
6459 /* Unmap an extent out of a file. */
6461 xfs_bmap_unmap_extent(
6462 struct xfs_mount *mp,
6463 struct xfs_defer_ops *dfops,
6464 struct xfs_inode *ip,
6465 struct xfs_bmbt_irec *PREV)
6467 if (!xfs_bmap_is_update_needed(PREV))
6470 return __xfs_bmap_add(mp, dfops, XFS_BMAP_UNMAP, ip,
6471 XFS_DATA_FORK, PREV);
6475 * Process one of the deferred bmap operations. We pass back the
6476 * btree cursor to maintain our lock on the bmapbt between calls.
6479 xfs_bmap_finish_one(
6480 struct xfs_trans *tp,
6481 struct xfs_defer_ops *dfops,
6482 struct xfs_inode *ip,
6483 enum xfs_bmap_intent_type type,
6485 xfs_fileoff_t startoff,
6486 xfs_fsblock_t startblock,
6487 xfs_filblks_t *blockcount,
6490 xfs_fsblock_t firstfsb;
6494 * firstfsb is tied to the transaction lifetime and is used to
6495 * ensure correct AG locking order and schedule work item
6496 * continuations. XFS_BUI_MAX_FAST_EXTENTS (== 1) restricts us
6497 * to only making one bmap call per transaction, so it should
6498 * be safe to have it as a local variable here.
6500 firstfsb = NULLFSBLOCK;
6502 trace_xfs_bmap_deferred(tp->t_mountp,
6503 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6504 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6505 ip->i_ino, whichfork, startoff, *blockcount, state);
6507 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6508 return -EFSCORRUPTED;
6510 if (XFS_TEST_ERROR(false, tp->t_mountp,
6511 XFS_ERRTAG_BMAP_FINISH_ONE))
6516 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6520 case XFS_BMAP_UNMAP:
6521 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6522 XFS_BMAPI_REMAP, 1, &firstfsb, dfops);
6526 error = -EFSCORRUPTED;