1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_da_format.h"
17 #include "xfs_da_btree.h"
19 #include "xfs_inode.h"
20 #include "xfs_btree.h"
21 #include "xfs_trans.h"
22 #include "xfs_inode_item.h"
23 #include "xfs_extfree_item.h"
24 #include "xfs_alloc.h"
26 #include "xfs_bmap_util.h"
27 #include "xfs_bmap_btree.h"
28 #include "xfs_rtalloc.h"
29 #include "xfs_errortag.h"
30 #include "xfs_error.h"
31 #include "xfs_quota.h"
32 #include "xfs_trans_space.h"
33 #include "xfs_buf_item.h"
34 #include "xfs_trace.h"
35 #include "xfs_symlink.h"
36 #include "xfs_attr_leaf.h"
37 #include "xfs_filestream.h"
39 #include "xfs_ag_resv.h"
40 #include "xfs_refcount.h"
41 #include "xfs_icache.h"
44 kmem_zone_t *xfs_bmap_free_item_zone;
47 * Miscellaneous helper functions
51 * Compute and fill in the value of the maximum depth of a bmap btree
52 * in this filesystem. Done once, during mount.
55 xfs_bmap_compute_maxlevels(
56 xfs_mount_t *mp, /* file system mount structure */
57 int whichfork) /* data or attr fork */
59 int level; /* btree level */
60 uint maxblocks; /* max blocks at this level */
61 uint maxleafents; /* max leaf entries possible */
62 int maxrootrecs; /* max records in root block */
63 int minleafrecs; /* min records in leaf block */
64 int minnoderecs; /* min records in node block */
65 int sz; /* root block size */
68 * The maximum number of extents in a file, hence the maximum
69 * number of leaf entries, is controlled by the type of di_nextents
70 * (a signed 32-bit number, xfs_extnum_t), or by di_anextents
71 * (a signed 16-bit number, xfs_aextnum_t).
73 * Note that we can no longer assume that if we are in ATTR1 that
74 * the fork offset of all the inodes will be
75 * (xfs_default_attroffset(ip) >> 3) because we could have mounted
76 * with ATTR2 and then mounted back with ATTR1, keeping the
77 * di_forkoff's fixed but probably at various positions. Therefore,
78 * for both ATTR1 and ATTR2 we have to assume the worst case scenario
79 * of a minimum size available.
81 if (whichfork == XFS_DATA_FORK) {
82 maxleafents = MAXEXTNUM;
83 sz = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
85 maxleafents = MAXAEXTNUM;
86 sz = XFS_BMDR_SPACE_CALC(MINABTPTRS);
88 maxrootrecs = xfs_bmdr_maxrecs(sz, 0);
89 minleafrecs = mp->m_bmap_dmnr[0];
90 minnoderecs = mp->m_bmap_dmnr[1];
91 maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
92 for (level = 1; maxblocks > 1; level++) {
93 if (maxblocks <= maxrootrecs)
96 maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
98 mp->m_bm_maxlevels[whichfork] = level;
101 STATIC int /* error */
103 struct xfs_btree_cur *cur,
104 struct xfs_bmbt_irec *irec,
105 int *stat) /* success/failure */
107 cur->bc_rec.b = *irec;
108 return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
111 STATIC int /* error */
112 xfs_bmbt_lookup_first(
113 struct xfs_btree_cur *cur,
114 int *stat) /* success/failure */
116 cur->bc_rec.b.br_startoff = 0;
117 cur->bc_rec.b.br_startblock = 0;
118 cur->bc_rec.b.br_blockcount = 0;
119 return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
123 * Check if the inode needs to be converted to btree format.
125 static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
127 return whichfork != XFS_COW_FORK &&
128 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
129 XFS_IFORK_NEXTENTS(ip, whichfork) >
130 XFS_IFORK_MAXEXT(ip, whichfork);
134 * Check if the inode should be converted to extent format.
136 static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
138 return whichfork != XFS_COW_FORK &&
139 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
140 XFS_IFORK_NEXTENTS(ip, whichfork) <=
141 XFS_IFORK_MAXEXT(ip, whichfork);
145 * Update the record referred to by cur to the value given by irec
146 * This either works (return 0) or gets an EFSCORRUPTED error.
150 struct xfs_btree_cur *cur,
151 struct xfs_bmbt_irec *irec)
153 union xfs_btree_rec rec;
155 xfs_bmbt_disk_set_all(&rec.bmbt, irec);
156 return xfs_btree_update(cur, &rec);
160 * Compute the worst-case number of indirect blocks that will be used
161 * for ip's delayed extent of length "len".
164 xfs_bmap_worst_indlen(
165 xfs_inode_t *ip, /* incore inode pointer */
166 xfs_filblks_t len) /* delayed extent length */
168 int level; /* btree level number */
169 int maxrecs; /* maximum record count at this level */
170 xfs_mount_t *mp; /* mount structure */
171 xfs_filblks_t rval; /* return value */
174 maxrecs = mp->m_bmap_dmxr[0];
175 for (level = 0, rval = 0;
176 level < XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK);
179 do_div(len, maxrecs);
182 return rval + XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) -
185 maxrecs = mp->m_bmap_dmxr[1];
191 * Calculate the default attribute fork offset for newly created inodes.
194 xfs_default_attroffset(
195 struct xfs_inode *ip)
197 struct xfs_mount *mp = ip->i_mount;
200 if (mp->m_sb.sb_inodesize == 256) {
201 offset = XFS_LITINO(mp, ip->i_d.di_version) -
202 XFS_BMDR_SPACE_CALC(MINABTPTRS);
204 offset = XFS_BMDR_SPACE_CALC(6 * MINABTPTRS);
207 ASSERT(offset < XFS_LITINO(mp, ip->i_d.di_version));
212 * Helper routine to reset inode di_forkoff field when switching
213 * attribute fork from local to extent format - we reset it where
214 * possible to make space available for inline data fork extents.
217 xfs_bmap_forkoff_reset(
221 if (whichfork == XFS_ATTR_FORK &&
222 ip->i_d.di_format != XFS_DINODE_FMT_DEV &&
223 ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
224 uint dfl_forkoff = xfs_default_attroffset(ip) >> 3;
226 if (dfl_forkoff > ip->i_d.di_forkoff)
227 ip->i_d.di_forkoff = dfl_forkoff;
232 STATIC struct xfs_buf *
234 struct xfs_btree_cur *cur,
237 struct xfs_log_item *lip;
243 for (i = 0; i < XFS_BTREE_MAXLEVELS; i++) {
244 if (!cur->bc_bufs[i])
246 if (XFS_BUF_ADDR(cur->bc_bufs[i]) == bno)
247 return cur->bc_bufs[i];
250 /* Chase down all the log items to see if the bp is there */
251 list_for_each_entry(lip, &cur->bc_tp->t_items, li_trans) {
252 struct xfs_buf_log_item *bip = (struct xfs_buf_log_item *)lip;
254 if (bip->bli_item.li_type == XFS_LI_BUF &&
255 XFS_BUF_ADDR(bip->bli_buf) == bno)
264 struct xfs_btree_block *block,
270 __be64 *pp, *thispa; /* pointer to block address */
271 xfs_bmbt_key_t *prevp, *keyp;
273 ASSERT(be16_to_cpu(block->bb_level) > 0);
276 for( i = 1; i <= xfs_btree_get_numrecs(block); i++) {
277 dmxr = mp->m_bmap_dmxr[0];
278 keyp = XFS_BMBT_KEY_ADDR(mp, block, i);
281 ASSERT(be64_to_cpu(prevp->br_startoff) <
282 be64_to_cpu(keyp->br_startoff));
287 * Compare the block numbers to see if there are dups.
290 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz);
292 pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr);
294 for (j = i+1; j <= be16_to_cpu(block->bb_numrecs); j++) {
296 thispa = XFS_BMAP_BROOT_PTR_ADDR(mp, block, j, sz);
298 thispa = XFS_BMBT_PTR_ADDR(mp, block, j, dmxr);
299 if (*thispa == *pp) {
300 xfs_warn(mp, "%s: thispa(%d) == pp(%d) %Ld",
302 (unsigned long long)be64_to_cpu(*thispa));
303 xfs_err(mp, "%s: ptrs are equal in node\n",
305 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
312 * Check that the extents for the inode ip are in the right order in all
313 * btree leaves. THis becomes prohibitively expensive for large extent count
314 * files, so don't bother with inodes that have more than 10,000 extents in
315 * them. The btree record ordering checks will still be done, so for such large
316 * bmapbt constructs that is going to catch most corruptions.
319 xfs_bmap_check_leaf_extents(
320 xfs_btree_cur_t *cur, /* btree cursor or null */
321 xfs_inode_t *ip, /* incore inode pointer */
322 int whichfork) /* data or attr fork */
324 struct xfs_btree_block *block; /* current btree block */
325 xfs_fsblock_t bno; /* block # of "block" */
326 xfs_buf_t *bp; /* buffer for "block" */
327 int error; /* error return value */
328 xfs_extnum_t i=0, j; /* index into the extents list */
329 struct xfs_ifork *ifp; /* fork structure */
330 int level; /* btree level, for checking */
331 xfs_mount_t *mp; /* file system mount structure */
332 __be64 *pp; /* pointer to block address */
333 xfs_bmbt_rec_t *ep; /* pointer to current extent */
334 xfs_bmbt_rec_t last = {0, 0}; /* last extent in prev block */
335 xfs_bmbt_rec_t *nextp; /* pointer to next extent */
338 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE) {
342 /* skip large extent count inodes */
343 if (ip->i_d.di_nextents > 10000)
348 ifp = XFS_IFORK_PTR(ip, whichfork);
349 block = ifp->if_broot;
351 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
353 level = be16_to_cpu(block->bb_level);
355 xfs_check_block(block, mp, 1, ifp->if_broot_bytes);
356 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
357 bno = be64_to_cpu(*pp);
359 ASSERT(bno != NULLFSBLOCK);
360 ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
361 ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
364 * Go down the tree until leaf level is reached, following the first
365 * pointer (leftmost) at each level.
367 while (level-- > 0) {
368 /* See if buf is in cur first */
370 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
373 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
379 block = XFS_BUF_TO_BLOCK(bp);
384 * Check this block for basic sanity (increasing keys and
385 * no duplicate blocks).
388 xfs_check_block(block, mp, 0, 0);
389 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
390 bno = be64_to_cpu(*pp);
391 XFS_WANT_CORRUPTED_GOTO(mp,
392 xfs_verify_fsbno(mp, bno), error0);
395 xfs_trans_brelse(NULL, bp);
400 * Here with bp and block set to the leftmost leaf node in the tree.
405 * Loop over all leaf nodes checking that all extents are in the right order.
408 xfs_fsblock_t nextbno;
409 xfs_extnum_t num_recs;
412 num_recs = xfs_btree_get_numrecs(block);
415 * Read-ahead the next leaf block, if any.
418 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
421 * Check all the extents to make sure they are OK.
422 * If we had a previous block, the last entry should
423 * conform with the first entry in this one.
426 ep = XFS_BMBT_REC_ADDR(mp, block, 1);
428 ASSERT(xfs_bmbt_disk_get_startoff(&last) +
429 xfs_bmbt_disk_get_blockcount(&last) <=
430 xfs_bmbt_disk_get_startoff(ep));
432 for (j = 1; j < num_recs; j++) {
433 nextp = XFS_BMBT_REC_ADDR(mp, block, j + 1);
434 ASSERT(xfs_bmbt_disk_get_startoff(ep) +
435 xfs_bmbt_disk_get_blockcount(ep) <=
436 xfs_bmbt_disk_get_startoff(nextp));
444 xfs_trans_brelse(NULL, bp);
448 * If we've reached the end, stop.
450 if (bno == NULLFSBLOCK)
454 bp = xfs_bmap_get_bp(cur, XFS_FSB_TO_DADDR(mp, bno));
457 error = xfs_btree_read_bufl(mp, NULL, bno, 0, &bp,
463 block = XFS_BUF_TO_BLOCK(bp);
469 xfs_warn(mp, "%s: at error0", __func__);
471 xfs_trans_brelse(NULL, bp);
473 xfs_warn(mp, "%s: BAD after btree leaves for %d extents",
475 xfs_err(mp, "%s: CORRUPTED BTREE OR SOMETHING", __func__);
476 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
481 * Validate that the bmbt_irecs being returned from bmapi are valid
482 * given the caller's original parameters. Specifically check the
483 * ranges of the returned irecs to ensure that they only extend beyond
484 * the given parameters if the XFS_BMAPI_ENTIRE flag was set.
487 xfs_bmap_validate_ret(
491 xfs_bmbt_irec_t *mval,
495 int i; /* index to map values */
497 ASSERT(ret_nmap <= nmap);
499 for (i = 0; i < ret_nmap; i++) {
500 ASSERT(mval[i].br_blockcount > 0);
501 if (!(flags & XFS_BMAPI_ENTIRE)) {
502 ASSERT(mval[i].br_startoff >= bno);
503 ASSERT(mval[i].br_blockcount <= len);
504 ASSERT(mval[i].br_startoff + mval[i].br_blockcount <=
507 ASSERT(mval[i].br_startoff < bno + len);
508 ASSERT(mval[i].br_startoff + mval[i].br_blockcount >
512 mval[i - 1].br_startoff + mval[i - 1].br_blockcount ==
513 mval[i].br_startoff);
514 ASSERT(mval[i].br_startblock != DELAYSTARTBLOCK &&
515 mval[i].br_startblock != HOLESTARTBLOCK);
516 ASSERT(mval[i].br_state == XFS_EXT_NORM ||
517 mval[i].br_state == XFS_EXT_UNWRITTEN);
522 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
523 #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
527 * bmap free list manipulation functions
531 * Add the extent to the list of extents to be free at transaction end.
532 * The list is maintained sorted (by block number).
536 struct xfs_trans *tp,
539 const struct xfs_owner_info *oinfo,
542 struct xfs_extent_free_item *new; /* new element */
544 struct xfs_mount *mp = tp->t_mountp;
548 ASSERT(bno != NULLFSBLOCK);
550 ASSERT(len <= MAXEXTLEN);
551 ASSERT(!isnullstartblock(bno));
552 agno = XFS_FSB_TO_AGNO(mp, bno);
553 agbno = XFS_FSB_TO_AGBNO(mp, bno);
554 ASSERT(agno < mp->m_sb.sb_agcount);
555 ASSERT(agbno < mp->m_sb.sb_agblocks);
556 ASSERT(len < mp->m_sb.sb_agblocks);
557 ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
559 ASSERT(xfs_bmap_free_item_zone != NULL);
561 new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
562 new->xefi_startblock = bno;
563 new->xefi_blockcount = (xfs_extlen_t)len;
565 new->xefi_oinfo = *oinfo;
567 new->xefi_oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
568 new->xefi_skip_discard = skip_discard;
569 trace_xfs_bmap_free_defer(tp->t_mountp,
570 XFS_FSB_TO_AGNO(tp->t_mountp, bno), 0,
571 XFS_FSB_TO_AGBNO(tp->t_mountp, bno), len);
572 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
576 * Inode fork format manipulation functions
580 * Convert the inode format to extent format if it currently is in btree format,
581 * but the extent list is small enough that it fits into the extent format.
583 * Since the extents are already in-core, all we have to do is give up the space
584 * for the btree root and pitch the leaf block.
586 STATIC int /* error */
587 xfs_bmap_btree_to_extents(
588 struct xfs_trans *tp, /* transaction pointer */
589 struct xfs_inode *ip, /* incore inode pointer */
590 struct xfs_btree_cur *cur, /* btree cursor */
591 int *logflagsp, /* inode logging flags */
592 int whichfork) /* data or attr fork */
594 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
595 struct xfs_mount *mp = ip->i_mount;
596 struct xfs_btree_block *rblock = ifp->if_broot;
597 struct xfs_btree_block *cblock;/* child btree block */
598 xfs_fsblock_t cbno; /* child block number */
599 xfs_buf_t *cbp; /* child block's buffer */
600 int error; /* error return value */
601 __be64 *pp; /* ptr to block address */
602 struct xfs_owner_info oinfo;
604 /* check if we actually need the extent format first: */
605 if (!xfs_bmap_wants_extents(ip, whichfork))
609 ASSERT(whichfork != XFS_COW_FORK);
610 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
611 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
612 ASSERT(be16_to_cpu(rblock->bb_level) == 1);
613 ASSERT(be16_to_cpu(rblock->bb_numrecs) == 1);
614 ASSERT(xfs_bmbt_maxrecs(mp, ifp->if_broot_bytes, 0) == 1);
616 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes);
617 cbno = be64_to_cpu(*pp);
619 XFS_WANT_CORRUPTED_RETURN(cur->bc_mp,
620 xfs_btree_check_lptr(cur, cbno, 1));
622 error = xfs_btree_read_bufl(mp, tp, cbno, 0, &cbp, XFS_BMAP_BTREE_REF,
626 cblock = XFS_BUF_TO_BLOCK(cbp);
627 if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
629 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
630 xfs_bmap_add_free(cur->bc_tp, cbno, 1, &oinfo);
631 ip->i_d.di_nblocks--;
632 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
633 xfs_trans_binval(tp, cbp);
634 if (cur->bc_bufs[0] == cbp)
635 cur->bc_bufs[0] = NULL;
636 xfs_iroot_realloc(ip, -1, whichfork);
637 ASSERT(ifp->if_broot == NULL);
638 ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
639 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
640 *logflagsp |= XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
645 * Convert an extents-format file into a btree-format file.
646 * The new file will have a root block (in the inode) and a single child block.
648 STATIC int /* error */
649 xfs_bmap_extents_to_btree(
650 struct xfs_trans *tp, /* transaction pointer */
651 struct xfs_inode *ip, /* incore inode pointer */
652 struct xfs_btree_cur **curp, /* cursor returned to caller */
653 int wasdel, /* converting a delayed alloc */
654 int *logflagsp, /* inode logging flags */
655 int whichfork) /* data or attr fork */
657 struct xfs_btree_block *ablock; /* allocated (child) bt block */
658 struct xfs_buf *abp; /* buffer for ablock */
659 struct xfs_alloc_arg args; /* allocation arguments */
660 struct xfs_bmbt_rec *arp; /* child record pointer */
661 struct xfs_btree_block *block; /* btree root block */
662 struct xfs_btree_cur *cur; /* bmap btree cursor */
663 int error; /* error return value */
664 struct xfs_ifork *ifp; /* inode fork pointer */
665 struct xfs_bmbt_key *kp; /* root block key pointer */
666 struct xfs_mount *mp; /* mount structure */
667 xfs_bmbt_ptr_t *pp; /* root block address pointer */
668 struct xfs_iext_cursor icur;
669 struct xfs_bmbt_irec rec;
670 xfs_extnum_t cnt = 0;
673 ASSERT(whichfork != XFS_COW_FORK);
674 ifp = XFS_IFORK_PTR(ip, whichfork);
675 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
678 * Make space in the inode incore. This needs to be undone if we fail
679 * to expand the root.
681 xfs_iroot_realloc(ip, 1, whichfork);
682 ifp->if_flags |= XFS_IFBROOT;
687 block = ifp->if_broot;
688 xfs_btree_init_block_int(mp, block, XFS_BUF_DADDR_NULL,
689 XFS_BTNUM_BMAP, 1, 1, ip->i_ino,
690 XFS_BTREE_LONG_PTRS);
692 * Need a cursor. Can't allocate until bb_level is filled in.
694 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
695 cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
697 * Convert to a btree with two levels, one record in root.
699 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_BTREE);
700 memset(&args, 0, sizeof(args));
703 xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
704 if (tp->t_firstblock == NULLFSBLOCK) {
705 args.type = XFS_ALLOCTYPE_START_BNO;
706 args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
707 } else if (tp->t_flags & XFS_TRANS_LOWMODE) {
708 args.type = XFS_ALLOCTYPE_START_BNO;
709 args.fsbno = tp->t_firstblock;
711 args.type = XFS_ALLOCTYPE_NEAR_BNO;
712 args.fsbno = tp->t_firstblock;
714 args.minlen = args.maxlen = args.prod = 1;
715 args.wasdel = wasdel;
717 error = xfs_alloc_vextent(&args);
719 goto out_root_realloc;
721 if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
723 goto out_root_realloc;
727 * Allocation can't fail, the space was reserved.
729 ASSERT(tp->t_firstblock == NULLFSBLOCK ||
730 args.agno >= XFS_FSB_TO_AGNO(mp, tp->t_firstblock));
731 tp->t_firstblock = args.fsbno;
732 cur->bc_private.b.allocated++;
733 ip->i_d.di_nblocks++;
734 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, 1L);
735 abp = xfs_btree_get_bufl(mp, tp, args.fsbno, 0);
737 error = -EFSCORRUPTED;
738 goto out_unreserve_dquot;
742 * Fill in the child block.
744 abp->b_ops = &xfs_bmbt_buf_ops;
745 ablock = XFS_BUF_TO_BLOCK(abp);
746 xfs_btree_init_block_int(mp, ablock, abp->b_bn,
747 XFS_BTNUM_BMAP, 0, 0, ip->i_ino,
748 XFS_BTREE_LONG_PTRS);
750 for_each_xfs_iext(ifp, &icur, &rec) {
751 if (isnullstartblock(rec.br_startblock))
753 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1 + cnt);
754 xfs_bmbt_disk_set_all(arp, &rec);
757 ASSERT(cnt == XFS_IFORK_NEXTENTS(ip, whichfork));
758 xfs_btree_set_numrecs(ablock, cnt);
761 * Fill in the root key and pointer.
763 kp = XFS_BMBT_KEY_ADDR(mp, block, 1);
764 arp = XFS_BMBT_REC_ADDR(mp, ablock, 1);
765 kp->br_startoff = cpu_to_be64(xfs_bmbt_disk_get_startoff(arp));
766 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur,
767 be16_to_cpu(block->bb_level)));
768 *pp = cpu_to_be64(args.fsbno);
771 * Do all this logging at the end so that
772 * the root is at the right level.
774 xfs_btree_log_block(cur, abp, XFS_BB_ALL_BITS);
775 xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
776 ASSERT(*curp == NULL);
778 *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
782 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
784 xfs_iroot_realloc(ip, -1, whichfork);
785 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
786 ASSERT(ifp->if_broot == NULL);
787 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
793 * Convert a local file to an extents file.
794 * This code is out of bounds for data forks of regular files,
795 * since the file data needs to get logged so things will stay consistent.
796 * (The bmap-level manipulations are ok, though).
799 xfs_bmap_local_to_extents_empty(
800 struct xfs_inode *ip,
803 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
805 ASSERT(whichfork != XFS_COW_FORK);
806 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
807 ASSERT(ifp->if_bytes == 0);
808 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
810 xfs_bmap_forkoff_reset(ip, whichfork);
811 ifp->if_flags &= ~XFS_IFINLINE;
812 ifp->if_flags |= XFS_IFEXTENTS;
813 ifp->if_u1.if_root = NULL;
815 XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
819 STATIC int /* error */
820 xfs_bmap_local_to_extents(
821 xfs_trans_t *tp, /* transaction pointer */
822 xfs_inode_t *ip, /* incore inode pointer */
823 xfs_extlen_t total, /* total blocks needed by transaction */
824 int *logflagsp, /* inode logging flags */
826 void (*init_fn)(struct xfs_trans *tp,
828 struct xfs_inode *ip,
829 struct xfs_ifork *ifp))
832 int flags; /* logging flags returned */
833 struct xfs_ifork *ifp; /* inode fork pointer */
834 xfs_alloc_arg_t args; /* allocation arguments */
835 xfs_buf_t *bp; /* buffer for extent block */
836 struct xfs_bmbt_irec rec;
837 struct xfs_iext_cursor icur;
840 * We don't want to deal with the case of keeping inode data inline yet.
841 * So sending the data fork of a regular inode is invalid.
843 ASSERT(!(S_ISREG(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK));
844 ifp = XFS_IFORK_PTR(ip, whichfork);
845 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
847 if (!ifp->if_bytes) {
848 xfs_bmap_local_to_extents_empty(ip, whichfork);
849 flags = XFS_ILOG_CORE;
855 ASSERT((ifp->if_flags & (XFS_IFINLINE|XFS_IFEXTENTS)) == XFS_IFINLINE);
856 memset(&args, 0, sizeof(args));
858 args.mp = ip->i_mount;
859 xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
861 * Allocate a block. We know we need only one, since the
862 * file currently fits in an inode.
864 if (tp->t_firstblock == NULLFSBLOCK) {
865 args.fsbno = XFS_INO_TO_FSB(args.mp, ip->i_ino);
866 args.type = XFS_ALLOCTYPE_START_BNO;
868 args.fsbno = tp->t_firstblock;
869 args.type = XFS_ALLOCTYPE_NEAR_BNO;
872 args.minlen = args.maxlen = args.prod = 1;
873 error = xfs_alloc_vextent(&args);
877 /* Can't fail, the space was reserved. */
878 ASSERT(args.fsbno != NULLFSBLOCK);
879 ASSERT(args.len == 1);
880 tp->t_firstblock = args.fsbno;
881 bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
884 * Initialize the block, copy the data and log the remote buffer.
886 * The callout is responsible for logging because the remote format
887 * might differ from the local format and thus we don't know how much to
888 * log here. Note that init_fn must also set the buffer log item type
891 init_fn(tp, bp, ip, ifp);
893 /* account for the change in fork size */
894 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
895 xfs_bmap_local_to_extents_empty(ip, whichfork);
896 flags |= XFS_ILOG_CORE;
898 ifp->if_u1.if_root = NULL;
902 rec.br_startblock = args.fsbno;
903 rec.br_blockcount = 1;
904 rec.br_state = XFS_EXT_NORM;
905 xfs_iext_first(ifp, &icur);
906 xfs_iext_insert(ip, &icur, &rec, 0);
908 XFS_IFORK_NEXT_SET(ip, whichfork, 1);
909 ip->i_d.di_nblocks = 1;
910 xfs_trans_mod_dquot_byino(tp, ip,
911 XFS_TRANS_DQ_BCOUNT, 1L);
912 flags |= xfs_ilog_fext(whichfork);
920 * Called from xfs_bmap_add_attrfork to handle btree format files.
922 STATIC int /* error */
923 xfs_bmap_add_attrfork_btree(
924 xfs_trans_t *tp, /* transaction pointer */
925 xfs_inode_t *ip, /* incore inode pointer */
926 int *flags) /* inode logging flags */
928 xfs_btree_cur_t *cur; /* btree cursor */
929 int error; /* error return value */
930 xfs_mount_t *mp; /* file system mount struct */
931 int stat; /* newroot status */
934 if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
935 *flags |= XFS_ILOG_DBROOT;
937 cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
938 error = xfs_bmbt_lookup_first(cur, &stat);
941 /* must be at least one entry */
942 XFS_WANT_CORRUPTED_GOTO(mp, stat == 1, error0);
943 if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
946 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
949 cur->bc_private.b.allocated = 0;
950 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
954 xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
959 * Called from xfs_bmap_add_attrfork to handle extents format files.
961 STATIC int /* error */
962 xfs_bmap_add_attrfork_extents(
963 struct xfs_trans *tp, /* transaction pointer */
964 struct xfs_inode *ip, /* incore inode pointer */
965 int *flags) /* inode logging flags */
967 xfs_btree_cur_t *cur; /* bmap btree cursor */
968 int error; /* error return value */
970 if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
973 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0, flags,
976 cur->bc_private.b.allocated = 0;
977 xfs_btree_del_cursor(cur, error);
983 * Called from xfs_bmap_add_attrfork to handle local format files. Each
984 * different data fork content type needs a different callout to do the
985 * conversion. Some are basic and only require special block initialisation
986 * callouts for the data formating, others (directories) are so specialised they
987 * handle everything themselves.
989 * XXX (dgc): investigate whether directory conversion can use the generic
990 * formatting callout. It should be possible - it's just a very complex
993 STATIC int /* error */
994 xfs_bmap_add_attrfork_local(
995 struct xfs_trans *tp, /* transaction pointer */
996 struct xfs_inode *ip, /* incore inode pointer */
997 int *flags) /* inode logging flags */
999 struct xfs_da_args dargs; /* args for dir/attr code */
1001 if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
1004 if (S_ISDIR(VFS_I(ip)->i_mode)) {
1005 memset(&dargs, 0, sizeof(dargs));
1006 dargs.geo = ip->i_mount->m_dir_geo;
1008 dargs.total = dargs.geo->fsbcount;
1009 dargs.whichfork = XFS_DATA_FORK;
1011 return xfs_dir2_sf_to_block(&dargs);
1014 if (S_ISLNK(VFS_I(ip)->i_mode))
1015 return xfs_bmap_local_to_extents(tp, ip, 1, flags,
1017 xfs_symlink_local_to_remote);
1019 /* should only be called for types that support local format data */
1021 return -EFSCORRUPTED;
1024 /* Set an inode attr fork off based on the format */
1026 xfs_bmap_set_attrforkoff(
1027 struct xfs_inode *ip,
1031 switch (ip->i_d.di_format) {
1032 case XFS_DINODE_FMT_DEV:
1033 ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
1035 case XFS_DINODE_FMT_LOCAL:
1036 case XFS_DINODE_FMT_EXTENTS:
1037 case XFS_DINODE_FMT_BTREE:
1038 ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
1039 if (!ip->i_d.di_forkoff)
1040 ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
1041 else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
1053 * Convert inode from non-attributed to attributed.
1054 * Must not be in a transaction, ip must not be locked.
1056 int /* error code */
1057 xfs_bmap_add_attrfork(
1058 xfs_inode_t *ip, /* incore inode pointer */
1059 int size, /* space new attribute needs */
1060 int rsvd) /* xact may use reserved blks */
1062 xfs_mount_t *mp; /* mount structure */
1063 xfs_trans_t *tp; /* transaction pointer */
1064 int blks; /* space reservation */
1065 int version = 1; /* superblock attr version */
1066 int logflags; /* logging flags */
1067 int error; /* error return value */
1069 ASSERT(XFS_IFORK_Q(ip) == 0);
1072 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1074 blks = XFS_ADDAFORK_SPACE_RES(mp);
1076 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_addafork, blks, 0,
1077 rsvd ? XFS_TRANS_RESERVE : 0, &tp);
1081 xfs_ilock(ip, XFS_ILOCK_EXCL);
1082 error = xfs_trans_reserve_quota_nblks(tp, ip, blks, 0, rsvd ?
1083 XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
1084 XFS_QMOPT_RES_REGBLKS);
1087 if (XFS_IFORK_Q(ip))
1089 if (ip->i_d.di_anextents != 0) {
1090 error = -EFSCORRUPTED;
1093 if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS) {
1095 * For inodes coming from pre-6.2 filesystems.
1097 ASSERT(ip->i_d.di_aformat == 0);
1098 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1101 xfs_trans_ijoin(tp, ip, 0);
1102 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1103 error = xfs_bmap_set_attrforkoff(ip, size, &version);
1106 ASSERT(ip->i_afp == NULL);
1107 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
1108 ip->i_afp->if_flags = XFS_IFEXTENTS;
1110 switch (ip->i_d.di_format) {
1111 case XFS_DINODE_FMT_LOCAL:
1112 error = xfs_bmap_add_attrfork_local(tp, ip, &logflags);
1114 case XFS_DINODE_FMT_EXTENTS:
1115 error = xfs_bmap_add_attrfork_extents(tp, ip, &logflags);
1117 case XFS_DINODE_FMT_BTREE:
1118 error = xfs_bmap_add_attrfork_btree(tp, ip, &logflags);
1125 xfs_trans_log_inode(tp, ip, logflags);
1128 if (!xfs_sb_version_hasattr(&mp->m_sb) ||
1129 (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2)) {
1130 bool log_sb = false;
1132 spin_lock(&mp->m_sb_lock);
1133 if (!xfs_sb_version_hasattr(&mp->m_sb)) {
1134 xfs_sb_version_addattr(&mp->m_sb);
1137 if (!xfs_sb_version_hasattr2(&mp->m_sb) && version == 2) {
1138 xfs_sb_version_addattr2(&mp->m_sb);
1141 spin_unlock(&mp->m_sb_lock);
1146 error = xfs_trans_commit(tp);
1147 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1151 xfs_trans_cancel(tp);
1152 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1157 * Internal and external extent tree search functions.
1161 * Read in extents from a btree-format inode.
1165 struct xfs_trans *tp,
1166 struct xfs_inode *ip,
1169 struct xfs_mount *mp = ip->i_mount;
1170 int state = xfs_bmap_fork_to_state(whichfork);
1171 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1172 xfs_extnum_t nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1173 struct xfs_btree_block *block = ifp->if_broot;
1174 struct xfs_iext_cursor icur;
1175 struct xfs_bmbt_irec new;
1183 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1185 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1186 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1187 return -EFSCORRUPTED;
1191 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
1193 level = be16_to_cpu(block->bb_level);
1194 if (unlikely(level == 0)) {
1195 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
1196 return -EFSCORRUPTED;
1198 pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
1199 bno = be64_to_cpu(*pp);
1202 * Go down the tree until leaf level is reached, following the first
1203 * pointer (leftmost) at each level.
1205 while (level-- > 0) {
1206 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1207 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1210 block = XFS_BUF_TO_BLOCK(bp);
1213 pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
1214 bno = be64_to_cpu(*pp);
1215 XFS_WANT_CORRUPTED_GOTO(mp,
1216 xfs_verify_fsbno(mp, bno), out_brelse);
1217 xfs_trans_brelse(tp, bp);
1221 * Here with bp and block set to the leftmost leaf node in the tree.
1224 xfs_iext_first(ifp, &icur);
1227 * Loop over all leaf nodes. Copy information to the extent records.
1230 xfs_bmbt_rec_t *frp;
1231 xfs_fsblock_t nextbno;
1232 xfs_extnum_t num_recs;
1234 num_recs = xfs_btree_get_numrecs(block);
1235 if (unlikely(i + num_recs > nextents)) {
1236 xfs_warn(ip->i_mount,
1237 "corrupt dinode %Lu, (btree extents).",
1238 (unsigned long long) ip->i_ino);
1239 xfs_inode_verifier_error(ip, -EFSCORRUPTED,
1240 __func__, block, sizeof(*block),
1242 error = -EFSCORRUPTED;
1246 * Read-ahead the next leaf block, if any.
1248 nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
1249 if (nextbno != NULLFSBLOCK)
1250 xfs_btree_reada_bufl(mp, nextbno, 1,
1253 * Copy records into the extent records.
1255 frp = XFS_BMBT_REC_ADDR(mp, block, 1);
1256 for (j = 0; j < num_recs; j++, frp++, i++) {
1259 xfs_bmbt_disk_get_all(frp, &new);
1260 fa = xfs_bmap_validate_extent(ip, whichfork, &new);
1262 error = -EFSCORRUPTED;
1263 xfs_inode_verifier_error(ip, error,
1264 "xfs_iread_extents(2)",
1265 frp, sizeof(*frp), fa);
1268 xfs_iext_insert(ip, &icur, &new, state);
1269 trace_xfs_read_extent(ip, &icur, state, _THIS_IP_);
1270 xfs_iext_next(ifp, &icur);
1272 xfs_trans_brelse(tp, bp);
1275 * If we've reached the end, stop.
1277 if (bno == NULLFSBLOCK)
1279 error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
1280 XFS_BMAP_BTREE_REF, &xfs_bmbt_buf_ops);
1283 block = XFS_BUF_TO_BLOCK(bp);
1286 if (i != XFS_IFORK_NEXTENTS(ip, whichfork)) {
1287 error = -EFSCORRUPTED;
1290 ASSERT(i == xfs_iext_count(ifp));
1292 ifp->if_flags |= XFS_IFEXTENTS;
1296 xfs_trans_brelse(tp, bp);
1298 xfs_iext_destroy(ifp);
1303 * Returns the relative block number of the first unused block(s) in the given
1304 * fork with at least "len" logically contiguous blocks free. This is the
1305 * lowest-address hole if the fork has holes, else the first block past the end
1306 * of fork. Return 0 if the fork is currently local (in-inode).
1309 xfs_bmap_first_unused(
1310 struct xfs_trans *tp, /* transaction pointer */
1311 struct xfs_inode *ip, /* incore inode */
1312 xfs_extlen_t len, /* size of hole to find */
1313 xfs_fileoff_t *first_unused, /* unused block */
1314 int whichfork) /* data or attr fork */
1316 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1317 struct xfs_bmbt_irec got;
1318 struct xfs_iext_cursor icur;
1319 xfs_fileoff_t lastaddr = 0;
1320 xfs_fileoff_t lowest, max;
1323 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE ||
1324 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ||
1325 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
1327 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
1332 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1333 error = xfs_iread_extents(tp, ip, whichfork);
1338 lowest = max = *first_unused;
1339 for_each_xfs_iext(ifp, &icur, &got) {
1341 * See if the hole before this extent will work.
1343 if (got.br_startoff >= lowest + len &&
1344 got.br_startoff - max >= len)
1346 lastaddr = got.br_startoff + got.br_blockcount;
1347 max = XFS_FILEOFF_MAX(lastaddr, lowest);
1350 *first_unused = max;
1355 * Returns the file-relative block number of the last block - 1 before
1356 * last_block (input value) in the file.
1357 * This is not based on i_size, it is based on the extent records.
1358 * Returns 0 for local files, as they do not have extent records.
1361 xfs_bmap_last_before(
1362 struct xfs_trans *tp, /* transaction pointer */
1363 struct xfs_inode *ip, /* incore inode */
1364 xfs_fileoff_t *last_block, /* last block */
1365 int whichfork) /* data or attr fork */
1367 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1368 struct xfs_bmbt_irec got;
1369 struct xfs_iext_cursor icur;
1372 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
1373 case XFS_DINODE_FMT_LOCAL:
1376 case XFS_DINODE_FMT_BTREE:
1377 case XFS_DINODE_FMT_EXTENTS:
1383 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1384 error = xfs_iread_extents(tp, ip, whichfork);
1389 if (!xfs_iext_lookup_extent_before(ip, ifp, last_block, &icur, &got))
1395 xfs_bmap_last_extent(
1396 struct xfs_trans *tp,
1397 struct xfs_inode *ip,
1399 struct xfs_bmbt_irec *rec,
1402 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1403 struct xfs_iext_cursor icur;
1406 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1407 error = xfs_iread_extents(tp, ip, whichfork);
1412 xfs_iext_last(ifp, &icur);
1413 if (!xfs_iext_get_extent(ifp, &icur, rec))
1421 * Check the last inode extent to determine whether this allocation will result
1422 * in blocks being allocated at the end of the file. When we allocate new data
1423 * blocks at the end of the file which do not start at the previous data block,
1424 * we will try to align the new blocks at stripe unit boundaries.
1426 * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
1427 * at, or past the EOF.
1431 struct xfs_bmalloca *bma,
1434 struct xfs_bmbt_irec rec;
1439 error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
1450 * Check if we are allocation or past the last extent, or at least into
1451 * the last delayed allocated extent.
1453 bma->aeof = bma->offset >= rec.br_startoff + rec.br_blockcount ||
1454 (bma->offset >= rec.br_startoff &&
1455 isnullstartblock(rec.br_startblock));
1460 * Returns the file-relative block number of the first block past eof in
1461 * the file. This is not based on i_size, it is based on the extent records.
1462 * Returns 0 for local files, as they do not have extent records.
1465 xfs_bmap_last_offset(
1466 struct xfs_inode *ip,
1467 xfs_fileoff_t *last_block,
1470 struct xfs_bmbt_irec rec;
1476 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL)
1479 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE &&
1480 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1483 error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, &is_empty);
1484 if (error || is_empty)
1487 *last_block = rec.br_startoff + rec.br_blockcount;
1492 * Returns whether the selected fork of the inode has exactly one
1493 * block or not. For the data fork we check this matches di_size,
1494 * implying the file's range is 0..bsize-1.
1496 int /* 1=>1 block, 0=>otherwise */
1498 xfs_inode_t *ip, /* incore inode */
1499 int whichfork) /* data or attr fork */
1501 struct xfs_ifork *ifp; /* inode fork pointer */
1502 int rval; /* return value */
1503 xfs_bmbt_irec_t s; /* internal version of extent */
1504 struct xfs_iext_cursor icur;
1507 if (whichfork == XFS_DATA_FORK)
1508 return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
1510 if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
1512 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
1514 ifp = XFS_IFORK_PTR(ip, whichfork);
1515 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
1516 xfs_iext_first(ifp, &icur);
1517 xfs_iext_get_extent(ifp, &icur, &s);
1518 rval = s.br_startoff == 0 && s.br_blockcount == 1;
1519 if (rval && whichfork == XFS_DATA_FORK)
1520 ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
1525 * Extent tree manipulation functions used during allocation.
1529 * Convert a delayed allocation to a real allocation.
1531 STATIC int /* error */
1532 xfs_bmap_add_extent_delay_real(
1533 struct xfs_bmalloca *bma,
1536 struct xfs_bmbt_irec *new = &bma->got;
1537 int error; /* error return value */
1538 int i; /* temp state */
1539 struct xfs_ifork *ifp; /* inode fork pointer */
1540 xfs_fileoff_t new_endoff; /* end offset of new entry */
1541 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
1542 /* left is 0, right is 1, prev is 2 */
1543 int rval=0; /* return value (logging flags) */
1544 int state = xfs_bmap_fork_to_state(whichfork);
1545 xfs_filblks_t da_new; /* new count del alloc blocks used */
1546 xfs_filblks_t da_old; /* old count del alloc blocks used */
1547 xfs_filblks_t temp=0; /* value for da_new calculations */
1548 int tmp_rval; /* partial logging flags */
1549 struct xfs_mount *mp;
1550 xfs_extnum_t *nextents;
1551 struct xfs_bmbt_irec old;
1553 mp = bma->ip->i_mount;
1554 ifp = XFS_IFORK_PTR(bma->ip, whichfork);
1555 ASSERT(whichfork != XFS_ATTR_FORK);
1556 nextents = (whichfork == XFS_COW_FORK ? &bma->ip->i_cnextents :
1557 &bma->ip->i_d.di_nextents);
1559 ASSERT(!isnullstartblock(new->br_startblock));
1561 (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
1563 XFS_STATS_INC(mp, xs_add_exlist);
1570 * Set up a bunch of variables to make the tests simpler.
1572 xfs_iext_get_extent(ifp, &bma->icur, &PREV);
1573 new_endoff = new->br_startoff + new->br_blockcount;
1574 ASSERT(isnullstartblock(PREV.br_startblock));
1575 ASSERT(PREV.br_startoff <= new->br_startoff);
1576 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1578 da_old = startblockval(PREV.br_startblock);
1582 * Set flags determining what part of the previous delayed allocation
1583 * extent is being replaced by a real allocation.
1585 if (PREV.br_startoff == new->br_startoff)
1586 state |= BMAP_LEFT_FILLING;
1587 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1588 state |= BMAP_RIGHT_FILLING;
1591 * Check and set flags if this segment has a left neighbor.
1592 * Don't set contiguous if the combined extent would be too large.
1594 if (xfs_iext_peek_prev_extent(ifp, &bma->icur, &LEFT)) {
1595 state |= BMAP_LEFT_VALID;
1596 if (isnullstartblock(LEFT.br_startblock))
1597 state |= BMAP_LEFT_DELAY;
1600 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1601 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1602 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1603 LEFT.br_state == new->br_state &&
1604 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1605 state |= BMAP_LEFT_CONTIG;
1608 * Check and set flags if this segment has a right neighbor.
1609 * Don't set contiguous if the combined extent would be too large.
1610 * Also check for all-three-contiguous being too large.
1612 if (xfs_iext_peek_next_extent(ifp, &bma->icur, &RIGHT)) {
1613 state |= BMAP_RIGHT_VALID;
1614 if (isnullstartblock(RIGHT.br_startblock))
1615 state |= BMAP_RIGHT_DELAY;
1618 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1619 new_endoff == RIGHT.br_startoff &&
1620 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1621 new->br_state == RIGHT.br_state &&
1622 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1623 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1624 BMAP_RIGHT_FILLING)) !=
1625 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1626 BMAP_RIGHT_FILLING) ||
1627 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1629 state |= BMAP_RIGHT_CONTIG;
1633 * Switch out based on the FILLING and CONTIG state bits.
1635 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1636 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1637 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1638 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1640 * Filling in all of a previously delayed allocation extent.
1641 * The left and right neighbors are both contiguous with new.
1643 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
1645 xfs_iext_remove(bma->ip, &bma->icur, state);
1646 xfs_iext_remove(bma->ip, &bma->icur, state);
1647 xfs_iext_prev(ifp, &bma->icur);
1648 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1651 if (bma->cur == NULL)
1652 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1654 rval = XFS_ILOG_CORE;
1655 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1658 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1659 error = xfs_btree_delete(bma->cur, &i);
1662 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1663 error = xfs_btree_decrement(bma->cur, 0, &i);
1666 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1667 error = xfs_bmbt_update(bma->cur, &LEFT);
1673 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1675 * Filling in all of a previously delayed allocation extent.
1676 * The left neighbor is contiguous, the right is not.
1679 LEFT.br_blockcount += PREV.br_blockcount;
1681 xfs_iext_remove(bma->ip, &bma->icur, state);
1682 xfs_iext_prev(ifp, &bma->icur);
1683 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1685 if (bma->cur == NULL)
1686 rval = XFS_ILOG_DEXT;
1689 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1692 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1693 error = xfs_bmbt_update(bma->cur, &LEFT);
1699 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1701 * Filling in all of a previously delayed allocation extent.
1702 * The right neighbor is contiguous, the left is not. Take care
1703 * with delay -> unwritten extent allocation here because the
1704 * delalloc record we are overwriting is always written.
1706 PREV.br_startblock = new->br_startblock;
1707 PREV.br_blockcount += RIGHT.br_blockcount;
1708 PREV.br_state = new->br_state;
1710 xfs_iext_next(ifp, &bma->icur);
1711 xfs_iext_remove(bma->ip, &bma->icur, state);
1712 xfs_iext_prev(ifp, &bma->icur);
1713 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1715 if (bma->cur == NULL)
1716 rval = XFS_ILOG_DEXT;
1719 error = xfs_bmbt_lookup_eq(bma->cur, &RIGHT, &i);
1722 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1723 error = xfs_bmbt_update(bma->cur, &PREV);
1729 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1731 * Filling in all of a previously delayed allocation extent.
1732 * Neither the left nor right neighbors are contiguous with
1735 PREV.br_startblock = new->br_startblock;
1736 PREV.br_state = new->br_state;
1737 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1740 if (bma->cur == NULL)
1741 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1743 rval = XFS_ILOG_CORE;
1744 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1747 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1748 error = xfs_btree_insert(bma->cur, &i);
1751 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1755 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1757 * Filling in the first part of a previous delayed allocation.
1758 * The left neighbor is contiguous.
1761 temp = PREV.br_blockcount - new->br_blockcount;
1762 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1763 startblockval(PREV.br_startblock));
1765 LEFT.br_blockcount += new->br_blockcount;
1767 PREV.br_blockcount = temp;
1768 PREV.br_startoff += new->br_blockcount;
1769 PREV.br_startblock = nullstartblock(da_new);
1771 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1772 xfs_iext_prev(ifp, &bma->icur);
1773 xfs_iext_update_extent(bma->ip, state, &bma->icur, &LEFT);
1775 if (bma->cur == NULL)
1776 rval = XFS_ILOG_DEXT;
1779 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1782 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1783 error = xfs_bmbt_update(bma->cur, &LEFT);
1789 case BMAP_LEFT_FILLING:
1791 * Filling in the first part of a previous delayed allocation.
1792 * The left neighbor is not contiguous.
1794 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1796 if (bma->cur == NULL)
1797 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1799 rval = XFS_ILOG_CORE;
1800 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1803 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1804 error = xfs_btree_insert(bma->cur, &i);
1807 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1810 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1811 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1812 &bma->cur, 1, &tmp_rval, whichfork);
1818 temp = PREV.br_blockcount - new->br_blockcount;
1819 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1820 startblockval(PREV.br_startblock) -
1821 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1823 PREV.br_startoff = new_endoff;
1824 PREV.br_blockcount = temp;
1825 PREV.br_startblock = nullstartblock(da_new);
1826 xfs_iext_next(ifp, &bma->icur);
1827 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1828 xfs_iext_prev(ifp, &bma->icur);
1831 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1833 * Filling in the last part of a previous delayed allocation.
1834 * The right neighbor is contiguous with the new allocation.
1837 RIGHT.br_startoff = new->br_startoff;
1838 RIGHT.br_startblock = new->br_startblock;
1839 RIGHT.br_blockcount += new->br_blockcount;
1841 if (bma->cur == NULL)
1842 rval = XFS_ILOG_DEXT;
1845 error = xfs_bmbt_lookup_eq(bma->cur, &old, &i);
1848 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1849 error = xfs_bmbt_update(bma->cur, &RIGHT);
1854 temp = PREV.br_blockcount - new->br_blockcount;
1855 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1856 startblockval(PREV.br_startblock));
1858 PREV.br_blockcount = temp;
1859 PREV.br_startblock = nullstartblock(da_new);
1861 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1862 xfs_iext_next(ifp, &bma->icur);
1863 xfs_iext_update_extent(bma->ip, state, &bma->icur, &RIGHT);
1866 case BMAP_RIGHT_FILLING:
1868 * Filling in the last part of a previous delayed allocation.
1869 * The right neighbor is not contiguous.
1871 xfs_iext_update_extent(bma->ip, state, &bma->icur, new);
1873 if (bma->cur == NULL)
1874 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1876 rval = XFS_ILOG_CORE;
1877 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1880 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1881 error = xfs_btree_insert(bma->cur, &i);
1884 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1887 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1888 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1889 &bma->cur, 1, &tmp_rval, whichfork);
1895 temp = PREV.br_blockcount - new->br_blockcount;
1896 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
1897 startblockval(PREV.br_startblock) -
1898 (bma->cur ? bma->cur->bc_private.b.allocated : 0));
1900 PREV.br_startblock = nullstartblock(da_new);
1901 PREV.br_blockcount = temp;
1902 xfs_iext_insert(bma->ip, &bma->icur, &PREV, state);
1903 xfs_iext_next(ifp, &bma->icur);
1908 * Filling in the middle part of a previous delayed allocation.
1909 * Contiguity is impossible here.
1910 * This case is avoided almost all the time.
1912 * We start with a delayed allocation:
1914 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
1917 * and we are allocating:
1918 * +rrrrrrrrrrrrrrrrr+
1921 * and we set it up for insertion as:
1922 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
1924 * PREV @ idx LEFT RIGHT
1925 * inserted at idx + 1
1929 /* LEFT is the new middle */
1932 /* RIGHT is the new right */
1933 RIGHT.br_state = PREV.br_state;
1934 RIGHT.br_startoff = new_endoff;
1935 RIGHT.br_blockcount =
1936 PREV.br_startoff + PREV.br_blockcount - new_endoff;
1937 RIGHT.br_startblock =
1938 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1939 RIGHT.br_blockcount));
1942 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
1943 PREV.br_startblock =
1944 nullstartblock(xfs_bmap_worst_indlen(bma->ip,
1945 PREV.br_blockcount));
1946 xfs_iext_update_extent(bma->ip, state, &bma->icur, &PREV);
1948 xfs_iext_next(ifp, &bma->icur);
1949 xfs_iext_insert(bma->ip, &bma->icur, &RIGHT, state);
1950 xfs_iext_insert(bma->ip, &bma->icur, &LEFT, state);
1953 if (bma->cur == NULL)
1954 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1956 rval = XFS_ILOG_CORE;
1957 error = xfs_bmbt_lookup_eq(bma->cur, new, &i);
1960 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
1961 error = xfs_btree_insert(bma->cur, &i);
1964 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
1967 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
1968 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
1969 &bma->cur, 1, &tmp_rval, whichfork);
1975 da_new = startblockval(PREV.br_startblock) +
1976 startblockval(RIGHT.br_startblock);
1979 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1980 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1981 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1982 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1983 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1984 case BMAP_LEFT_CONTIG:
1985 case BMAP_RIGHT_CONTIG:
1987 * These cases are all impossible.
1992 /* add reverse mapping unless caller opted out */
1993 if (!(bma->flags & XFS_BMAPI_NORMAP)) {
1994 error = xfs_rmap_map_extent(bma->tp, bma->ip, whichfork, new);
1999 /* convert to a btree if necessary */
2000 if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
2001 int tmp_logflags; /* partial log flag return val */
2003 ASSERT(bma->cur == NULL);
2004 error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
2005 &bma->cur, da_old > 0, &tmp_logflags,
2007 bma->logflags |= tmp_logflags;
2013 da_new += bma->cur->bc_private.b.allocated;
2014 bma->cur->bc_private.b.allocated = 0;
2017 /* adjust for changes in reserved delayed indirect blocks */
2018 if (da_new != da_old) {
2019 ASSERT(state == 0 || da_new < da_old);
2020 error = xfs_mod_fdblocks(mp, (int64_t)(da_old - da_new),
2024 xfs_bmap_check_leaf_extents(bma->cur, bma->ip, whichfork);
2026 if (whichfork != XFS_COW_FORK)
2027 bma->logflags |= rval;
2035 * Convert an unwritten allocation to a real allocation or vice versa.
2038 xfs_bmap_add_extent_unwritten_real(
2039 struct xfs_trans *tp,
2040 xfs_inode_t *ip, /* incore inode pointer */
2042 struct xfs_iext_cursor *icur,
2043 xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
2044 xfs_bmbt_irec_t *new, /* new data to add to file extents */
2045 int *logflagsp) /* inode logging flags */
2047 xfs_btree_cur_t *cur; /* btree cursor */
2048 int error; /* error return value */
2049 int i; /* temp state */
2050 struct xfs_ifork *ifp; /* inode fork pointer */
2051 xfs_fileoff_t new_endoff; /* end offset of new entry */
2052 xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
2053 /* left is 0, right is 1, prev is 2 */
2054 int rval=0; /* return value (logging flags) */
2055 int state = xfs_bmap_fork_to_state(whichfork);
2056 struct xfs_mount *mp = ip->i_mount;
2057 struct xfs_bmbt_irec old;
2062 ifp = XFS_IFORK_PTR(ip, whichfork);
2064 ASSERT(!isnullstartblock(new->br_startblock));
2066 XFS_STATS_INC(mp, xs_add_exlist);
2073 * Set up a bunch of variables to make the tests simpler.
2076 xfs_iext_get_extent(ifp, icur, &PREV);
2077 ASSERT(new->br_state != PREV.br_state);
2078 new_endoff = new->br_startoff + new->br_blockcount;
2079 ASSERT(PREV.br_startoff <= new->br_startoff);
2080 ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
2083 * Set flags determining what part of the previous oldext allocation
2084 * extent is being replaced by a newext allocation.
2086 if (PREV.br_startoff == new->br_startoff)
2087 state |= BMAP_LEFT_FILLING;
2088 if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
2089 state |= BMAP_RIGHT_FILLING;
2092 * Check and set flags if this segment has a left neighbor.
2093 * Don't set contiguous if the combined extent would be too large.
2095 if (xfs_iext_peek_prev_extent(ifp, icur, &LEFT)) {
2096 state |= BMAP_LEFT_VALID;
2097 if (isnullstartblock(LEFT.br_startblock))
2098 state |= BMAP_LEFT_DELAY;
2101 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2102 LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
2103 LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
2104 LEFT.br_state == new->br_state &&
2105 LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2106 state |= BMAP_LEFT_CONTIG;
2109 * Check and set flags if this segment has a right neighbor.
2110 * Don't set contiguous if the combined extent would be too large.
2111 * Also check for all-three-contiguous being too large.
2113 if (xfs_iext_peek_next_extent(ifp, icur, &RIGHT)) {
2114 state |= BMAP_RIGHT_VALID;
2115 if (isnullstartblock(RIGHT.br_startblock))
2116 state |= BMAP_RIGHT_DELAY;
2119 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2120 new_endoff == RIGHT.br_startoff &&
2121 new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
2122 new->br_state == RIGHT.br_state &&
2123 new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
2124 ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2125 BMAP_RIGHT_FILLING)) !=
2126 (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
2127 BMAP_RIGHT_FILLING) ||
2128 LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
2130 state |= BMAP_RIGHT_CONTIG;
2133 * Switch out based on the FILLING and CONTIG state bits.
2135 switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2136 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
2137 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
2138 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2140 * Setting all of a previous oldext extent to newext.
2141 * The left and right neighbors are both contiguous with new.
2143 LEFT.br_blockcount += PREV.br_blockcount + RIGHT.br_blockcount;
2145 xfs_iext_remove(ip, icur, state);
2146 xfs_iext_remove(ip, icur, state);
2147 xfs_iext_prev(ifp, icur);
2148 xfs_iext_update_extent(ip, state, icur, &LEFT);
2149 XFS_IFORK_NEXT_SET(ip, whichfork,
2150 XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
2152 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2154 rval = XFS_ILOG_CORE;
2155 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2158 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2159 if ((error = xfs_btree_delete(cur, &i)))
2161 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2162 if ((error = xfs_btree_decrement(cur, 0, &i)))
2164 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2165 if ((error = xfs_btree_delete(cur, &i)))
2167 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2168 if ((error = xfs_btree_decrement(cur, 0, &i)))
2170 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2171 error = xfs_bmbt_update(cur, &LEFT);
2177 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2179 * Setting all of a previous oldext extent to newext.
2180 * The left neighbor is contiguous, the right is not.
2182 LEFT.br_blockcount += PREV.br_blockcount;
2184 xfs_iext_remove(ip, icur, state);
2185 xfs_iext_prev(ifp, icur);
2186 xfs_iext_update_extent(ip, state, icur, &LEFT);
2187 XFS_IFORK_NEXT_SET(ip, whichfork,
2188 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2190 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2192 rval = XFS_ILOG_CORE;
2193 error = xfs_bmbt_lookup_eq(cur, &PREV, &i);
2196 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2197 if ((error = xfs_btree_delete(cur, &i)))
2199 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2200 if ((error = xfs_btree_decrement(cur, 0, &i)))
2202 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2203 error = xfs_bmbt_update(cur, &LEFT);
2209 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2211 * Setting all of a previous oldext extent to newext.
2212 * The right neighbor is contiguous, the left is not.
2214 PREV.br_blockcount += RIGHT.br_blockcount;
2215 PREV.br_state = new->br_state;
2217 xfs_iext_next(ifp, icur);
2218 xfs_iext_remove(ip, icur, state);
2219 xfs_iext_prev(ifp, icur);
2220 xfs_iext_update_extent(ip, state, icur, &PREV);
2222 XFS_IFORK_NEXT_SET(ip, whichfork,
2223 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2225 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2227 rval = XFS_ILOG_CORE;
2228 error = xfs_bmbt_lookup_eq(cur, &RIGHT, &i);
2231 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2232 if ((error = xfs_btree_delete(cur, &i)))
2234 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2235 if ((error = xfs_btree_decrement(cur, 0, &i)))
2237 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2238 error = xfs_bmbt_update(cur, &PREV);
2244 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
2246 * Setting all of a previous oldext extent to newext.
2247 * Neither the left nor right neighbors are contiguous with
2250 PREV.br_state = new->br_state;
2251 xfs_iext_update_extent(ip, state, icur, &PREV);
2254 rval = XFS_ILOG_DEXT;
2257 error = xfs_bmbt_lookup_eq(cur, new, &i);
2260 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2261 error = xfs_bmbt_update(cur, &PREV);
2267 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
2269 * Setting the first part of a previous oldext extent to newext.
2270 * The left neighbor is contiguous.
2272 LEFT.br_blockcount += new->br_blockcount;
2275 PREV.br_startoff += new->br_blockcount;
2276 PREV.br_startblock += new->br_blockcount;
2277 PREV.br_blockcount -= new->br_blockcount;
2279 xfs_iext_update_extent(ip, state, icur, &PREV);
2280 xfs_iext_prev(ifp, icur);
2281 xfs_iext_update_extent(ip, state, icur, &LEFT);
2284 rval = XFS_ILOG_DEXT;
2287 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2290 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2291 error = xfs_bmbt_update(cur, &PREV);
2294 error = xfs_btree_decrement(cur, 0, &i);
2297 error = xfs_bmbt_update(cur, &LEFT);
2303 case BMAP_LEFT_FILLING:
2305 * Setting the first part of a previous oldext extent to newext.
2306 * The left neighbor is not contiguous.
2309 PREV.br_startoff += new->br_blockcount;
2310 PREV.br_startblock += new->br_blockcount;
2311 PREV.br_blockcount -= new->br_blockcount;
2313 xfs_iext_update_extent(ip, state, icur, &PREV);
2314 xfs_iext_insert(ip, icur, new, state);
2315 XFS_IFORK_NEXT_SET(ip, whichfork,
2316 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2318 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2320 rval = XFS_ILOG_CORE;
2321 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2324 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2325 error = xfs_bmbt_update(cur, &PREV);
2328 cur->bc_rec.b = *new;
2329 if ((error = xfs_btree_insert(cur, &i)))
2331 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2335 case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
2337 * Setting the last part of a previous oldext extent to newext.
2338 * The right neighbor is contiguous with the new allocation.
2341 PREV.br_blockcount -= new->br_blockcount;
2343 RIGHT.br_startoff = new->br_startoff;
2344 RIGHT.br_startblock = new->br_startblock;
2345 RIGHT.br_blockcount += new->br_blockcount;
2347 xfs_iext_update_extent(ip, state, icur, &PREV);
2348 xfs_iext_next(ifp, icur);
2349 xfs_iext_update_extent(ip, state, icur, &RIGHT);
2352 rval = XFS_ILOG_DEXT;
2355 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2358 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2359 error = xfs_bmbt_update(cur, &PREV);
2362 error = xfs_btree_increment(cur, 0, &i);
2365 error = xfs_bmbt_update(cur, &RIGHT);
2371 case BMAP_RIGHT_FILLING:
2373 * Setting the last part of a previous oldext extent to newext.
2374 * The right neighbor is not contiguous.
2377 PREV.br_blockcount -= new->br_blockcount;
2379 xfs_iext_update_extent(ip, state, icur, &PREV);
2380 xfs_iext_next(ifp, icur);
2381 xfs_iext_insert(ip, icur, new, state);
2383 XFS_IFORK_NEXT_SET(ip, whichfork,
2384 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2386 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2388 rval = XFS_ILOG_CORE;
2389 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2392 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2393 error = xfs_bmbt_update(cur, &PREV);
2396 error = xfs_bmbt_lookup_eq(cur, new, &i);
2399 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2400 if ((error = xfs_btree_insert(cur, &i)))
2402 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2408 * Setting the middle part of a previous oldext extent to
2409 * newext. Contiguity is impossible here.
2410 * One extent becomes three extents.
2413 PREV.br_blockcount = new->br_startoff - PREV.br_startoff;
2416 r[1].br_startoff = new_endoff;
2417 r[1].br_blockcount =
2418 old.br_startoff + old.br_blockcount - new_endoff;
2419 r[1].br_startblock = new->br_startblock + new->br_blockcount;
2420 r[1].br_state = PREV.br_state;
2422 xfs_iext_update_extent(ip, state, icur, &PREV);
2423 xfs_iext_next(ifp, icur);
2424 xfs_iext_insert(ip, icur, &r[1], state);
2425 xfs_iext_insert(ip, icur, &r[0], state);
2427 XFS_IFORK_NEXT_SET(ip, whichfork,
2428 XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
2430 rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
2432 rval = XFS_ILOG_CORE;
2433 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2436 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2437 /* new right extent - oldext */
2438 error = xfs_bmbt_update(cur, &r[1]);
2441 /* new left extent - oldext */
2442 cur->bc_rec.b = PREV;
2443 if ((error = xfs_btree_insert(cur, &i)))
2445 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2447 * Reset the cursor to the position of the new extent
2448 * we are about to insert as we can't trust it after
2449 * the previous insert.
2451 error = xfs_bmbt_lookup_eq(cur, new, &i);
2454 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2455 /* new middle extent - newext */
2456 if ((error = xfs_btree_insert(cur, &i)))
2458 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2462 case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2463 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2464 case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
2465 case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
2466 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2467 case BMAP_LEFT_CONTIG:
2468 case BMAP_RIGHT_CONTIG:
2470 * These cases are all impossible.
2475 /* update reverse mappings */
2476 error = xfs_rmap_convert_extent(mp, tp, ip, whichfork, new);
2480 /* convert to a btree if necessary */
2481 if (xfs_bmap_needs_btree(ip, whichfork)) {
2482 int tmp_logflags; /* partial log flag return val */
2484 ASSERT(cur == NULL);
2485 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
2486 &tmp_logflags, whichfork);
2487 *logflagsp |= tmp_logflags;
2492 /* clear out the allocated field, done with it now in any case. */
2494 cur->bc_private.b.allocated = 0;
2498 xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
2508 * Convert a hole to a delayed allocation.
2511 xfs_bmap_add_extent_hole_delay(
2512 xfs_inode_t *ip, /* incore inode pointer */
2514 struct xfs_iext_cursor *icur,
2515 xfs_bmbt_irec_t *new) /* new data to add to file extents */
2517 struct xfs_ifork *ifp; /* inode fork pointer */
2518 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2519 xfs_filblks_t newlen=0; /* new indirect size */
2520 xfs_filblks_t oldlen=0; /* old indirect size */
2521 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2522 int state = xfs_bmap_fork_to_state(whichfork);
2523 xfs_filblks_t temp; /* temp for indirect calculations */
2525 ifp = XFS_IFORK_PTR(ip, whichfork);
2526 ASSERT(isnullstartblock(new->br_startblock));
2529 * Check and set flags if this segment has a left neighbor
2531 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2532 state |= BMAP_LEFT_VALID;
2533 if (isnullstartblock(left.br_startblock))
2534 state |= BMAP_LEFT_DELAY;
2538 * Check and set flags if the current (right) segment exists.
2539 * If it doesn't exist, we're converting the hole at end-of-file.
2541 if (xfs_iext_get_extent(ifp, icur, &right)) {
2542 state |= BMAP_RIGHT_VALID;
2543 if (isnullstartblock(right.br_startblock))
2544 state |= BMAP_RIGHT_DELAY;
2548 * Set contiguity flags on the left and right neighbors.
2549 * Don't let extents get too large, even if the pieces are contiguous.
2551 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
2552 left.br_startoff + left.br_blockcount == new->br_startoff &&
2553 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2554 state |= BMAP_LEFT_CONTIG;
2556 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
2557 new->br_startoff + new->br_blockcount == right.br_startoff &&
2558 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2559 (!(state & BMAP_LEFT_CONTIG) ||
2560 (left.br_blockcount + new->br_blockcount +
2561 right.br_blockcount <= MAXEXTLEN)))
2562 state |= BMAP_RIGHT_CONTIG;
2565 * Switch out based on the contiguity flags.
2567 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2568 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2570 * New allocation is contiguous with delayed allocations
2571 * on the left and on the right.
2572 * Merge all three into a single extent record.
2574 temp = left.br_blockcount + new->br_blockcount +
2575 right.br_blockcount;
2577 oldlen = startblockval(left.br_startblock) +
2578 startblockval(new->br_startblock) +
2579 startblockval(right.br_startblock);
2580 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2582 left.br_startblock = nullstartblock(newlen);
2583 left.br_blockcount = temp;
2585 xfs_iext_remove(ip, icur, state);
2586 xfs_iext_prev(ifp, icur);
2587 xfs_iext_update_extent(ip, state, icur, &left);
2590 case BMAP_LEFT_CONTIG:
2592 * New allocation is contiguous with a delayed allocation
2594 * Merge the new allocation with the left neighbor.
2596 temp = left.br_blockcount + new->br_blockcount;
2598 oldlen = startblockval(left.br_startblock) +
2599 startblockval(new->br_startblock);
2600 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2602 left.br_blockcount = temp;
2603 left.br_startblock = nullstartblock(newlen);
2605 xfs_iext_prev(ifp, icur);
2606 xfs_iext_update_extent(ip, state, icur, &left);
2609 case BMAP_RIGHT_CONTIG:
2611 * New allocation is contiguous with a delayed allocation
2613 * Merge the new allocation with the right neighbor.
2615 temp = new->br_blockcount + right.br_blockcount;
2616 oldlen = startblockval(new->br_startblock) +
2617 startblockval(right.br_startblock);
2618 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
2620 right.br_startoff = new->br_startoff;
2621 right.br_startblock = nullstartblock(newlen);
2622 right.br_blockcount = temp;
2623 xfs_iext_update_extent(ip, state, icur, &right);
2628 * New allocation is not contiguous with another
2629 * delayed allocation.
2630 * Insert a new entry.
2632 oldlen = newlen = 0;
2633 xfs_iext_insert(ip, icur, new, state);
2636 if (oldlen != newlen) {
2637 ASSERT(oldlen > newlen);
2638 xfs_mod_fdblocks(ip->i_mount, (int64_t)(oldlen - newlen),
2641 * Nothing to do for disk quota accounting here.
2647 * Convert a hole to a real allocation.
2649 STATIC int /* error */
2650 xfs_bmap_add_extent_hole_real(
2651 struct xfs_trans *tp,
2652 struct xfs_inode *ip,
2654 struct xfs_iext_cursor *icur,
2655 struct xfs_btree_cur **curp,
2656 struct xfs_bmbt_irec *new,
2660 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
2661 struct xfs_mount *mp = ip->i_mount;
2662 struct xfs_btree_cur *cur = *curp;
2663 int error; /* error return value */
2664 int i; /* temp state */
2665 xfs_bmbt_irec_t left; /* left neighbor extent entry */
2666 xfs_bmbt_irec_t right; /* right neighbor extent entry */
2667 int rval=0; /* return value (logging flags) */
2668 int state = xfs_bmap_fork_to_state(whichfork);
2669 struct xfs_bmbt_irec old;
2671 ASSERT(!isnullstartblock(new->br_startblock));
2672 ASSERT(!cur || !(cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
2674 XFS_STATS_INC(mp, xs_add_exlist);
2677 * Check and set flags if this segment has a left neighbor.
2679 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) {
2680 state |= BMAP_LEFT_VALID;
2681 if (isnullstartblock(left.br_startblock))
2682 state |= BMAP_LEFT_DELAY;
2686 * Check and set flags if this segment has a current value.
2687 * Not true if we're inserting into the "hole" at eof.
2689 if (xfs_iext_get_extent(ifp, icur, &right)) {
2690 state |= BMAP_RIGHT_VALID;
2691 if (isnullstartblock(right.br_startblock))
2692 state |= BMAP_RIGHT_DELAY;
2696 * We're inserting a real allocation between "left" and "right".
2697 * Set the contiguity flags. Don't let extents get too large.
2699 if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
2700 left.br_startoff + left.br_blockcount == new->br_startoff &&
2701 left.br_startblock + left.br_blockcount == new->br_startblock &&
2702 left.br_state == new->br_state &&
2703 left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
2704 state |= BMAP_LEFT_CONTIG;
2706 if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
2707 new->br_startoff + new->br_blockcount == right.br_startoff &&
2708 new->br_startblock + new->br_blockcount == right.br_startblock &&
2709 new->br_state == right.br_state &&
2710 new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
2711 (!(state & BMAP_LEFT_CONTIG) ||
2712 left.br_blockcount + new->br_blockcount +
2713 right.br_blockcount <= MAXEXTLEN))
2714 state |= BMAP_RIGHT_CONTIG;
2718 * Select which case we're in here, and implement it.
2720 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
2721 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
2723 * New allocation is contiguous with real allocations on the
2724 * left and on the right.
2725 * Merge all three into a single extent record.
2727 left.br_blockcount += new->br_blockcount + right.br_blockcount;
2729 xfs_iext_remove(ip, icur, state);
2730 xfs_iext_prev(ifp, icur);
2731 xfs_iext_update_extent(ip, state, icur, &left);
2733 XFS_IFORK_NEXT_SET(ip, whichfork,
2734 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
2736 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2738 rval = XFS_ILOG_CORE;
2739 error = xfs_bmbt_lookup_eq(cur, &right, &i);
2742 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2743 error = xfs_btree_delete(cur, &i);
2746 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2747 error = xfs_btree_decrement(cur, 0, &i);
2750 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2751 error = xfs_bmbt_update(cur, &left);
2757 case BMAP_LEFT_CONTIG:
2759 * New allocation is contiguous with a real allocation
2761 * Merge the new allocation with the left neighbor.
2764 left.br_blockcount += new->br_blockcount;
2766 xfs_iext_prev(ifp, icur);
2767 xfs_iext_update_extent(ip, state, icur, &left);
2770 rval = xfs_ilog_fext(whichfork);
2773 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2776 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2777 error = xfs_bmbt_update(cur, &left);
2783 case BMAP_RIGHT_CONTIG:
2785 * New allocation is contiguous with a real allocation
2787 * Merge the new allocation with the right neighbor.
2791 right.br_startoff = new->br_startoff;
2792 right.br_startblock = new->br_startblock;
2793 right.br_blockcount += new->br_blockcount;
2794 xfs_iext_update_extent(ip, state, icur, &right);
2797 rval = xfs_ilog_fext(whichfork);
2800 error = xfs_bmbt_lookup_eq(cur, &old, &i);
2803 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2804 error = xfs_bmbt_update(cur, &right);
2812 * New allocation is not contiguous with another
2814 * Insert a new entry.
2816 xfs_iext_insert(ip, icur, new, state);
2817 XFS_IFORK_NEXT_SET(ip, whichfork,
2818 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
2820 rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
2822 rval = XFS_ILOG_CORE;
2823 error = xfs_bmbt_lookup_eq(cur, new, &i);
2826 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
2827 error = xfs_btree_insert(cur, &i);
2830 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
2835 /* add reverse mapping unless caller opted out */
2836 if (!(flags & XFS_BMAPI_NORMAP)) {
2837 error = xfs_rmap_map_extent(tp, ip, whichfork, new);
2842 /* convert to a btree if necessary */
2843 if (xfs_bmap_needs_btree(ip, whichfork)) {
2844 int tmp_logflags; /* partial log flag return val */
2846 ASSERT(cur == NULL);
2847 error = xfs_bmap_extents_to_btree(tp, ip, curp, 0,
2848 &tmp_logflags, whichfork);
2849 *logflagsp |= tmp_logflags;
2855 /* clear out the allocated field, done with it now in any case. */
2857 cur->bc_private.b.allocated = 0;
2859 xfs_bmap_check_leaf_extents(cur, ip, whichfork);
2866 * Functions used in the extent read, allocate and remove paths
2870 * Adjust the size of the new extent based on di_extsize and rt extsize.
2873 xfs_bmap_extsize_align(
2875 xfs_bmbt_irec_t *gotp, /* next extent pointer */
2876 xfs_bmbt_irec_t *prevp, /* previous extent pointer */
2877 xfs_extlen_t extsz, /* align to this extent size */
2878 int rt, /* is this a realtime inode? */
2879 int eof, /* is extent at end-of-file? */
2880 int delay, /* creating delalloc extent? */
2881 int convert, /* overwriting unwritten extent? */
2882 xfs_fileoff_t *offp, /* in/out: aligned offset */
2883 xfs_extlen_t *lenp) /* in/out: aligned length */
2885 xfs_fileoff_t orig_off; /* original offset */
2886 xfs_extlen_t orig_alen; /* original length */
2887 xfs_fileoff_t orig_end; /* original off+len */
2888 xfs_fileoff_t nexto; /* next file offset */
2889 xfs_fileoff_t prevo; /* previous file offset */
2890 xfs_fileoff_t align_off; /* temp for offset */
2891 xfs_extlen_t align_alen; /* temp for length */
2892 xfs_extlen_t temp; /* temp for calculations */
2897 orig_off = align_off = *offp;
2898 orig_alen = align_alen = *lenp;
2899 orig_end = orig_off + orig_alen;
2902 * If this request overlaps an existing extent, then don't
2903 * attempt to perform any additional alignment.
2905 if (!delay && !eof &&
2906 (orig_off >= gotp->br_startoff) &&
2907 (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
2912 * If the file offset is unaligned vs. the extent size
2913 * we need to align it. This will be possible unless
2914 * the file was previously written with a kernel that didn't
2915 * perform this alignment, or if a truncate shot us in the
2918 div_u64_rem(orig_off, extsz, &temp);
2924 /* Same adjustment for the end of the requested area. */
2925 temp = (align_alen % extsz);
2927 align_alen += extsz - temp;
2930 * For large extent hint sizes, the aligned extent might be larger than
2931 * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
2932 * the length back under MAXEXTLEN. The outer allocation loops handle
2933 * short allocation just fine, so it is safe to do this. We only want to
2934 * do it when we are forced to, though, because it means more allocation
2935 * operations are required.
2937 while (align_alen > MAXEXTLEN)
2938 align_alen -= extsz;
2939 ASSERT(align_alen <= MAXEXTLEN);
2942 * If the previous block overlaps with this proposed allocation
2943 * then move the start forward without adjusting the length.
2945 if (prevp->br_startoff != NULLFILEOFF) {
2946 if (prevp->br_startblock == HOLESTARTBLOCK)
2947 prevo = prevp->br_startoff;
2949 prevo = prevp->br_startoff + prevp->br_blockcount;
2952 if (align_off != orig_off && align_off < prevo)
2955 * If the next block overlaps with this proposed allocation
2956 * then move the start back without adjusting the length,
2957 * but not before offset 0.
2958 * This may of course make the start overlap previous block,
2959 * and if we hit the offset 0 limit then the next block
2960 * can still overlap too.
2962 if (!eof && gotp->br_startoff != NULLFILEOFF) {
2963 if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
2964 (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
2965 nexto = gotp->br_startoff + gotp->br_blockcount;
2967 nexto = gotp->br_startoff;
2969 nexto = NULLFILEOFF;
2971 align_off + align_alen != orig_end &&
2972 align_off + align_alen > nexto)
2973 align_off = nexto > align_alen ? nexto - align_alen : 0;
2975 * If we're now overlapping the next or previous extent that
2976 * means we can't fit an extsz piece in this hole. Just move
2977 * the start forward to the first valid spot and set
2978 * the length so we hit the end.
2980 if (align_off != orig_off && align_off < prevo)
2982 if (align_off + align_alen != orig_end &&
2983 align_off + align_alen > nexto &&
2984 nexto != NULLFILEOFF) {
2985 ASSERT(nexto > prevo);
2986 align_alen = nexto - align_off;
2990 * If realtime, and the result isn't a multiple of the realtime
2991 * extent size we need to remove blocks until it is.
2993 if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
2995 * We're not covering the original request, or
2996 * we won't be able to once we fix the length.
2998 if (orig_off < align_off ||
2999 orig_end > align_off + align_alen ||
3000 align_alen - temp < orig_alen)
3003 * Try to fix it by moving the start up.
3005 if (align_off + temp <= orig_off) {
3010 * Try to fix it by moving the end in.
3012 else if (align_off + align_alen - temp >= orig_end)
3015 * Set the start to the minimum then trim the length.
3018 align_alen -= orig_off - align_off;
3019 align_off = orig_off;
3020 align_alen -= align_alen % mp->m_sb.sb_rextsize;
3023 * Result doesn't cover the request, fail it.
3025 if (orig_off < align_off || orig_end > align_off + align_alen)
3028 ASSERT(orig_off >= align_off);
3029 /* see MAXEXTLEN handling above */
3030 ASSERT(orig_end <= align_off + align_alen ||
3031 align_alen + extsz > MAXEXTLEN);
3035 if (!eof && gotp->br_startoff != NULLFILEOFF)
3036 ASSERT(align_off + align_alen <= gotp->br_startoff);
3037 if (prevp->br_startoff != NULLFILEOFF)
3038 ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
3046 #define XFS_ALLOC_GAP_UNITS 4
3050 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3052 xfs_fsblock_t adjust; /* adjustment to block numbers */
3053 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3054 xfs_mount_t *mp; /* mount point structure */
3055 int nullfb; /* true if ap->firstblock isn't set */
3056 int rt; /* true if inode is realtime */
3058 #define ISVALID(x,y) \
3060 (x) < mp->m_sb.sb_rblocks : \
3061 XFS_FSB_TO_AGNO(mp, x) == XFS_FSB_TO_AGNO(mp, y) && \
3062 XFS_FSB_TO_AGNO(mp, x) < mp->m_sb.sb_agcount && \
3063 XFS_FSB_TO_AGBNO(mp, x) < mp->m_sb.sb_agblocks)
3065 mp = ap->ip->i_mount;
3066 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3067 rt = XFS_IS_REALTIME_INODE(ap->ip) &&
3068 xfs_alloc_is_userdata(ap->datatype);
3069 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3070 ap->tp->t_firstblock);
3072 * If allocating at eof, and there's a previous real block,
3073 * try to use its last block as our starting point.
3075 if (ap->eof && ap->prev.br_startoff != NULLFILEOFF &&
3076 !isnullstartblock(ap->prev.br_startblock) &&
3077 ISVALID(ap->prev.br_startblock + ap->prev.br_blockcount,
3078 ap->prev.br_startblock)) {
3079 ap->blkno = ap->prev.br_startblock + ap->prev.br_blockcount;
3081 * Adjust for the gap between prevp and us.
3083 adjust = ap->offset -
3084 (ap->prev.br_startoff + ap->prev.br_blockcount);
3086 ISVALID(ap->blkno + adjust, ap->prev.br_startblock))
3087 ap->blkno += adjust;
3090 * If not at eof, then compare the two neighbor blocks.
3091 * Figure out whether either one gives us a good starting point,
3092 * and pick the better one.
3094 else if (!ap->eof) {
3095 xfs_fsblock_t gotbno; /* right side block number */
3096 xfs_fsblock_t gotdiff=0; /* right side difference */
3097 xfs_fsblock_t prevbno; /* left side block number */
3098 xfs_fsblock_t prevdiff=0; /* left side difference */
3101 * If there's a previous (left) block, select a requested
3102 * start block based on it.
3104 if (ap->prev.br_startoff != NULLFILEOFF &&
3105 !isnullstartblock(ap->prev.br_startblock) &&
3106 (prevbno = ap->prev.br_startblock +
3107 ap->prev.br_blockcount) &&
3108 ISVALID(prevbno, ap->prev.br_startblock)) {
3110 * Calculate gap to end of previous block.
3112 adjust = prevdiff = ap->offset -
3113 (ap->prev.br_startoff +
3114 ap->prev.br_blockcount);
3116 * Figure the startblock based on the previous block's
3117 * end and the gap size.
3119 * If the gap is large relative to the piece we're
3120 * allocating, or using it gives us an invalid block
3121 * number, then just use the end of the previous block.
3123 if (prevdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3124 ISVALID(prevbno + prevdiff,
3125 ap->prev.br_startblock))
3130 * If the firstblock forbids it, can't use it,
3133 if (!rt && !nullfb &&
3134 XFS_FSB_TO_AGNO(mp, prevbno) != fb_agno)
3135 prevbno = NULLFSBLOCK;
3138 * No previous block or can't follow it, just default.
3141 prevbno = NULLFSBLOCK;
3143 * If there's a following (right) block, select a requested
3144 * start block based on it.
3146 if (!isnullstartblock(ap->got.br_startblock)) {
3148 * Calculate gap to start of next block.
3150 adjust = gotdiff = ap->got.br_startoff - ap->offset;
3152 * Figure the startblock based on the next block's
3153 * start and the gap size.
3155 gotbno = ap->got.br_startblock;
3158 * If the gap is large relative to the piece we're
3159 * allocating, or using it gives us an invalid block
3160 * number, then just use the start of the next block
3161 * offset by our length.
3163 if (gotdiff <= XFS_ALLOC_GAP_UNITS * ap->length &&
3164 ISVALID(gotbno - gotdiff, gotbno))
3166 else if (ISVALID(gotbno - ap->length, gotbno)) {
3167 gotbno -= ap->length;
3168 gotdiff += adjust - ap->length;
3172 * If the firstblock forbids it, can't use it,
3175 if (!rt && !nullfb &&
3176 XFS_FSB_TO_AGNO(mp, gotbno) != fb_agno)
3177 gotbno = NULLFSBLOCK;
3180 * No next block, just default.
3183 gotbno = NULLFSBLOCK;
3185 * If both valid, pick the better one, else the only good
3186 * one, else ap->blkno is already set (to 0 or the inode block).
3188 if (prevbno != NULLFSBLOCK && gotbno != NULLFSBLOCK)
3189 ap->blkno = prevdiff <= gotdiff ? prevbno : gotbno;
3190 else if (prevbno != NULLFSBLOCK)
3191 ap->blkno = prevbno;
3192 else if (gotbno != NULLFSBLOCK)
3199 xfs_bmap_longest_free_extent(
3200 struct xfs_trans *tp,
3205 struct xfs_mount *mp = tp->t_mountp;
3206 struct xfs_perag *pag;
3207 xfs_extlen_t longest;
3210 pag = xfs_perag_get(mp, ag);
3211 if (!pag->pagf_init) {
3212 error = xfs_alloc_pagf_init(mp, tp, ag, XFS_ALLOC_FLAG_TRYLOCK);
3216 if (!pag->pagf_init) {
3222 longest = xfs_alloc_longest_free_extent(pag,
3223 xfs_alloc_min_freelist(mp, pag),
3224 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE));
3225 if (*blen < longest)
3234 xfs_bmap_select_minlen(
3235 struct xfs_bmalloca *ap,
3236 struct xfs_alloc_arg *args,
3240 if (notinit || *blen < ap->minlen) {
3242 * Since we did a BUF_TRYLOCK above, it is possible that
3243 * there is space for this request.
3245 args->minlen = ap->minlen;
3246 } else if (*blen < args->maxlen) {
3248 * If the best seen length is less than the request length,
3249 * use the best as the minimum.
3251 args->minlen = *blen;
3254 * Otherwise we've seen an extent as big as maxlen, use that
3257 args->minlen = args->maxlen;
3262 xfs_bmap_btalloc_nullfb(
3263 struct xfs_bmalloca *ap,
3264 struct xfs_alloc_arg *args,
3267 struct xfs_mount *mp = ap->ip->i_mount;
3268 xfs_agnumber_t ag, startag;
3272 args->type = XFS_ALLOCTYPE_START_BNO;
3273 args->total = ap->total;
3275 startag = ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3276 if (startag == NULLAGNUMBER)
3279 while (*blen < args->maxlen) {
3280 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3285 if (++ag == mp->m_sb.sb_agcount)
3291 xfs_bmap_select_minlen(ap, args, blen, notinit);
3296 xfs_bmap_btalloc_filestreams(
3297 struct xfs_bmalloca *ap,
3298 struct xfs_alloc_arg *args,
3301 struct xfs_mount *mp = ap->ip->i_mount;
3306 args->type = XFS_ALLOCTYPE_NEAR_BNO;
3307 args->total = ap->total;
3309 ag = XFS_FSB_TO_AGNO(mp, args->fsbno);
3310 if (ag == NULLAGNUMBER)
3313 error = xfs_bmap_longest_free_extent(args->tp, ag, blen, ¬init);
3317 if (*blen < args->maxlen) {
3318 error = xfs_filestream_new_ag(ap, &ag);
3322 error = xfs_bmap_longest_free_extent(args->tp, ag, blen,
3329 xfs_bmap_select_minlen(ap, args, blen, notinit);
3332 * Set the failure fallback case to look in the selected AG as stream
3335 ap->blkno = args->fsbno = XFS_AGB_TO_FSB(mp, ag, 0);
3339 /* Update all inode and quota accounting for the allocation we just did. */
3341 xfs_bmap_btalloc_accounting(
3342 struct xfs_bmalloca *ap,
3343 struct xfs_alloc_arg *args)
3345 if (ap->flags & XFS_BMAPI_COWFORK) {
3347 * COW fork blocks are in-core only and thus are treated as
3348 * in-core quota reservation (like delalloc blocks) even when
3349 * converted to real blocks. The quota reservation is not
3350 * accounted to disk until blocks are remapped to the data
3351 * fork. So if these blocks were previously delalloc, we
3352 * already have quota reservation and there's nothing to do
3359 * Otherwise, we've allocated blocks in a hole. The transaction
3360 * has acquired in-core quota reservation for this extent.
3361 * Rather than account these as real blocks, however, we reduce
3362 * the transaction quota reservation based on the allocation.
3363 * This essentially transfers the transaction quota reservation
3364 * to that of a delalloc extent.
3366 ap->ip->i_delayed_blks += args->len;
3367 xfs_trans_mod_dquot_byino(ap->tp, ap->ip, XFS_TRANS_DQ_RES_BLKS,
3372 /* data/attr fork only */
3373 ap->ip->i_d.di_nblocks += args->len;
3374 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
3376 ap->ip->i_delayed_blks -= args->len;
3377 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
3378 ap->wasdel ? XFS_TRANS_DQ_DELBCOUNT : XFS_TRANS_DQ_BCOUNT,
3384 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3386 xfs_mount_t *mp; /* mount point structure */
3387 xfs_alloctype_t atype = 0; /* type for allocation routines */
3388 xfs_extlen_t align = 0; /* minimum allocation alignment */
3389 xfs_agnumber_t fb_agno; /* ag number of ap->firstblock */
3391 xfs_alloc_arg_t args;
3392 xfs_fileoff_t orig_offset;
3393 xfs_extlen_t orig_length;
3395 xfs_extlen_t nextminlen = 0;
3396 int nullfb; /* true if ap->firstblock isn't set */
3403 orig_offset = ap->offset;
3404 orig_length = ap->length;
3406 mp = ap->ip->i_mount;
3408 /* stripe alignment for allocation is determined by mount parameters */
3410 if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
3411 stripe_align = mp->m_swidth;
3412 else if (mp->m_dalign)
3413 stripe_align = mp->m_dalign;
3415 if (ap->flags & XFS_BMAPI_COWFORK)
3416 align = xfs_get_cowextsz_hint(ap->ip);
3417 else if (xfs_alloc_is_userdata(ap->datatype))
3418 align = xfs_get_extsz_hint(ap->ip);
3420 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
3421 align, 0, ap->eof, 0, ap->conv,
3422 &ap->offset, &ap->length);
3428 nullfb = ap->tp->t_firstblock == NULLFSBLOCK;
3429 fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp,
3430 ap->tp->t_firstblock);
3432 if (xfs_alloc_is_userdata(ap->datatype) &&
3433 xfs_inode_is_filestream(ap->ip)) {
3434 ag = xfs_filestream_lookup_ag(ap->ip);
3435 ag = (ag != NULLAGNUMBER) ? ag : 0;
3436 ap->blkno = XFS_AGB_TO_FSB(mp, ag, 0);
3438 ap->blkno = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
3441 ap->blkno = ap->tp->t_firstblock;
3443 xfs_bmap_adjacent(ap);
3446 * If allowed, use ap->blkno; otherwise must use firstblock since
3447 * it's in the right allocation group.
3449 if (nullfb || XFS_FSB_TO_AGNO(mp, ap->blkno) == fb_agno)
3452 ap->blkno = ap->tp->t_firstblock;
3454 * Normal allocation, done through xfs_alloc_vextent.
3456 tryagain = isaligned = 0;
3457 memset(&args, 0, sizeof(args));
3460 args.fsbno = ap->blkno;
3461 args.oinfo = XFS_RMAP_OINFO_SKIP_UPDATE;
3463 /* Trim the allocation back to the maximum an AG can fit. */
3464 args.maxlen = min(ap->length, mp->m_ag_max_usable);
3468 * Search for an allocation group with a single extent large
3469 * enough for the request. If one isn't found, then adjust
3470 * the minimum allocation size to the largest space found.
3472 if (xfs_alloc_is_userdata(ap->datatype) &&
3473 xfs_inode_is_filestream(ap->ip))
3474 error = xfs_bmap_btalloc_filestreams(ap, &args, &blen);
3476 error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
3479 } else if (ap->tp->t_flags & XFS_TRANS_LOWMODE) {
3480 if (xfs_inode_is_filestream(ap->ip))
3481 args.type = XFS_ALLOCTYPE_FIRST_AG;
3483 args.type = XFS_ALLOCTYPE_START_BNO;
3484 args.total = args.minlen = ap->minlen;
3486 args.type = XFS_ALLOCTYPE_NEAR_BNO;
3487 args.total = ap->total;
3488 args.minlen = ap->minlen;
3490 /* apply extent size hints if obtained earlier */
3493 div_u64_rem(ap->offset, args.prod, &args.mod);
3495 args.mod = args.prod - args.mod;
3496 } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) {
3500 args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog;
3501 div_u64_rem(ap->offset, args.prod, &args.mod);
3503 args.mod = args.prod - args.mod;
3506 * If we are not low on available data blocks, and the
3507 * underlying logical volume manager is a stripe, and
3508 * the file offset is zero then try to allocate data
3509 * blocks on stripe unit boundary.
3510 * NOTE: ap->aeof is only set if the allocation length
3511 * is >= the stripe unit and the allocation offset is
3512 * at the end of file.
3514 if (!(ap->tp->t_flags & XFS_TRANS_LOWMODE) && ap->aeof) {
3516 args.alignment = stripe_align;
3520 * Adjust for alignment
3522 if (blen > args.alignment && blen <= args.maxlen)
3523 args.minlen = blen - args.alignment;
3524 args.minalignslop = 0;
3527 * First try an exact bno allocation.
3528 * If it fails then do a near or start bno
3529 * allocation with alignment turned on.
3533 args.type = XFS_ALLOCTYPE_THIS_BNO;
3536 * Compute the minlen+alignment for the
3537 * next case. Set slop so that the value
3538 * of minlen+alignment+slop doesn't go up
3539 * between the calls.
3541 if (blen > stripe_align && blen <= args.maxlen)
3542 nextminlen = blen - stripe_align;
3544 nextminlen = args.minlen;
3545 if (nextminlen + stripe_align > args.minlen + 1)
3547 nextminlen + stripe_align -
3550 args.minalignslop = 0;
3554 args.minalignslop = 0;
3556 args.minleft = ap->minleft;
3557 args.wasdel = ap->wasdel;
3558 args.resv = XFS_AG_RESV_NONE;
3559 args.datatype = ap->datatype;
3560 if (ap->datatype & XFS_ALLOC_USERDATA_ZERO)
3563 error = xfs_alloc_vextent(&args);
3567 if (tryagain && args.fsbno == NULLFSBLOCK) {
3569 * Exact allocation failed. Now try with alignment
3573 args.fsbno = ap->blkno;
3574 args.alignment = stripe_align;
3575 args.minlen = nextminlen;
3576 args.minalignslop = 0;
3578 if ((error = xfs_alloc_vextent(&args)))
3581 if (isaligned && args.fsbno == NULLFSBLOCK) {
3583 * allocation failed, so turn off alignment and
3587 args.fsbno = ap->blkno;
3589 if ((error = xfs_alloc_vextent(&args)))
3592 if (args.fsbno == NULLFSBLOCK && nullfb &&
3593 args.minlen > ap->minlen) {
3594 args.minlen = ap->minlen;
3595 args.type = XFS_ALLOCTYPE_START_BNO;
3596 args.fsbno = ap->blkno;
3597 if ((error = xfs_alloc_vextent(&args)))
3600 if (args.fsbno == NULLFSBLOCK && nullfb) {
3602 args.type = XFS_ALLOCTYPE_FIRST_AG;
3603 args.total = ap->minlen;
3604 if ((error = xfs_alloc_vextent(&args)))
3606 ap->tp->t_flags |= XFS_TRANS_LOWMODE;
3608 if (args.fsbno != NULLFSBLOCK) {
3610 * check the allocation happened at the same or higher AG than
3611 * the first block that was allocated.
3613 ASSERT(ap->tp->t_firstblock == NULLFSBLOCK ||
3614 XFS_FSB_TO_AGNO(mp, ap->tp->t_firstblock) <=
3615 XFS_FSB_TO_AGNO(mp, args.fsbno));
3617 ap->blkno = args.fsbno;
3618 if (ap->tp->t_firstblock == NULLFSBLOCK)
3619 ap->tp->t_firstblock = args.fsbno;
3620 ASSERT(nullfb || fb_agno <= args.agno);
3621 ap->length = args.len;
3623 * If the extent size hint is active, we tried to round the
3624 * caller's allocation request offset down to extsz and the
3625 * length up to another extsz boundary. If we found a free
3626 * extent we mapped it in starting at this new offset. If the
3627 * newly mapped space isn't long enough to cover any of the
3628 * range of offsets that was originally requested, move the
3629 * mapping up so that we can fill as much of the caller's
3630 * original request as possible. Free space is apparently
3631 * very fragmented so we're unlikely to be able to satisfy the
3634 if (ap->length <= orig_length)
3635 ap->offset = orig_offset;
3636 else if (ap->offset + ap->length < orig_offset + orig_length)
3637 ap->offset = orig_offset + orig_length - ap->length;
3638 xfs_bmap_btalloc_accounting(ap, &args);
3640 ap->blkno = NULLFSBLOCK;
3647 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
3648 * It figures out where to ask the underlying allocator to put the new extent.
3652 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
3654 if (XFS_IS_REALTIME_INODE(ap->ip) &&
3655 xfs_alloc_is_userdata(ap->datatype))
3656 return xfs_bmap_rtalloc(ap);
3657 return xfs_bmap_btalloc(ap);
3660 /* Trim extent to fit a logical block range. */
3663 struct xfs_bmbt_irec *irec,
3667 xfs_fileoff_t distance;
3668 xfs_fileoff_t end = bno + len;
3670 if (irec->br_startoff + irec->br_blockcount <= bno ||
3671 irec->br_startoff >= end) {
3672 irec->br_blockcount = 0;
3676 if (irec->br_startoff < bno) {
3677 distance = bno - irec->br_startoff;
3678 if (isnullstartblock(irec->br_startblock))
3679 irec->br_startblock = DELAYSTARTBLOCK;
3680 if (irec->br_startblock != DELAYSTARTBLOCK &&
3681 irec->br_startblock != HOLESTARTBLOCK)
3682 irec->br_startblock += distance;
3683 irec->br_startoff += distance;
3684 irec->br_blockcount -= distance;
3687 if (end < irec->br_startoff + irec->br_blockcount) {
3688 distance = irec->br_startoff + irec->br_blockcount - end;
3689 irec->br_blockcount -= distance;
3694 * Trim the returned map to the required bounds
3698 struct xfs_bmbt_irec *mval,
3699 struct xfs_bmbt_irec *got,
3707 if ((flags & XFS_BMAPI_ENTIRE) ||
3708 got->br_startoff + got->br_blockcount <= obno) {
3710 if (isnullstartblock(got->br_startblock))
3711 mval->br_startblock = DELAYSTARTBLOCK;
3717 ASSERT((*bno >= obno) || (n == 0));
3719 mval->br_startoff = *bno;
3720 if (isnullstartblock(got->br_startblock))
3721 mval->br_startblock = DELAYSTARTBLOCK;
3723 mval->br_startblock = got->br_startblock +
3724 (*bno - got->br_startoff);
3726 * Return the minimum of what we got and what we asked for for
3727 * the length. We can use the len variable here because it is
3728 * modified below and we could have been there before coming
3729 * here if the first part of the allocation didn't overlap what
3732 mval->br_blockcount = XFS_FILBLKS_MIN(end - *bno,
3733 got->br_blockcount - (*bno - got->br_startoff));
3734 mval->br_state = got->br_state;
3735 ASSERT(mval->br_blockcount <= len);
3740 * Update and validate the extent map to return
3743 xfs_bmapi_update_map(
3744 struct xfs_bmbt_irec **map,
3752 xfs_bmbt_irec_t *mval = *map;
3754 ASSERT((flags & XFS_BMAPI_ENTIRE) ||
3755 ((mval->br_startoff + mval->br_blockcount) <= end));
3756 ASSERT((flags & XFS_BMAPI_ENTIRE) || (mval->br_blockcount <= *len) ||
3757 (mval->br_startoff < obno));
3759 *bno = mval->br_startoff + mval->br_blockcount;
3761 if (*n > 0 && mval->br_startoff == mval[-1].br_startoff) {
3762 /* update previous map with new information */
3763 ASSERT(mval->br_startblock == mval[-1].br_startblock);
3764 ASSERT(mval->br_blockcount > mval[-1].br_blockcount);
3765 ASSERT(mval->br_state == mval[-1].br_state);
3766 mval[-1].br_blockcount = mval->br_blockcount;
3767 mval[-1].br_state = mval->br_state;
3768 } else if (*n > 0 && mval->br_startblock != DELAYSTARTBLOCK &&
3769 mval[-1].br_startblock != DELAYSTARTBLOCK &&
3770 mval[-1].br_startblock != HOLESTARTBLOCK &&
3771 mval->br_startblock == mval[-1].br_startblock +
3772 mval[-1].br_blockcount &&
3773 mval[-1].br_state == mval->br_state) {
3774 ASSERT(mval->br_startoff ==
3775 mval[-1].br_startoff + mval[-1].br_blockcount);
3776 mval[-1].br_blockcount += mval->br_blockcount;
3777 } else if (*n > 0 &&
3778 mval->br_startblock == DELAYSTARTBLOCK &&
3779 mval[-1].br_startblock == DELAYSTARTBLOCK &&
3780 mval->br_startoff ==
3781 mval[-1].br_startoff + mval[-1].br_blockcount) {
3782 mval[-1].br_blockcount += mval->br_blockcount;
3783 mval[-1].br_state = mval->br_state;
3784 } else if (!((*n == 0) &&
3785 ((mval->br_startoff + mval->br_blockcount) <=
3794 * Map file blocks to filesystem blocks without allocation.
3798 struct xfs_inode *ip,
3801 struct xfs_bmbt_irec *mval,
3805 struct xfs_mount *mp = ip->i_mount;
3806 struct xfs_ifork *ifp;
3807 struct xfs_bmbt_irec got;
3810 struct xfs_iext_cursor icur;
3814 int whichfork = xfs_bmapi_whichfork(flags);
3817 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK|XFS_BMAPI_ENTIRE|
3818 XFS_BMAPI_COWFORK)));
3819 ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED|XFS_ILOCK_EXCL));
3821 if (unlikely(XFS_TEST_ERROR(
3822 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
3823 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
3824 mp, XFS_ERRTAG_BMAPIFORMAT))) {
3825 XFS_ERROR_REPORT("xfs_bmapi_read", XFS_ERRLEVEL_LOW, mp);
3826 return -EFSCORRUPTED;
3829 if (XFS_FORCED_SHUTDOWN(mp))
3832 XFS_STATS_INC(mp, xs_blk_mapr);
3834 ifp = XFS_IFORK_PTR(ip, whichfork);
3836 /* No CoW fork? Return a hole. */
3837 if (whichfork == XFS_COW_FORK && !ifp) {
3838 mval->br_startoff = bno;
3839 mval->br_startblock = HOLESTARTBLOCK;
3840 mval->br_blockcount = len;
3841 mval->br_state = XFS_EXT_NORM;
3846 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
3847 error = xfs_iread_extents(NULL, ip, whichfork);
3852 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got))
3857 while (bno < end && n < *nmap) {
3858 /* Reading past eof, act as though there's a hole up to end. */
3860 got.br_startoff = end;
3861 if (got.br_startoff > bno) {
3862 /* Reading in a hole. */
3863 mval->br_startoff = bno;
3864 mval->br_startblock = HOLESTARTBLOCK;
3865 mval->br_blockcount =
3866 XFS_FILBLKS_MIN(len, got.br_startoff - bno);
3867 mval->br_state = XFS_EXT_NORM;
3868 bno += mval->br_blockcount;
3869 len -= mval->br_blockcount;
3875 /* set up the extent map to return. */
3876 xfs_bmapi_trim_map(mval, &got, &bno, len, obno, end, n, flags);
3877 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
3879 /* If we're done, stop now. */
3880 if (bno >= end || n >= *nmap)
3883 /* Else go on to the next record. */
3884 if (!xfs_iext_next_extent(ifp, &icur, &got))
3892 * Add a delayed allocation extent to an inode. Blocks are reserved from the
3893 * global pool and the extent inserted into the inode in-core extent tree.
3895 * On entry, got refers to the first extent beyond the offset of the extent to
3896 * allocate or eof is specified if no such extent exists. On return, got refers
3897 * to the extent record that was inserted to the inode fork.
3899 * Note that the allocated extent may have been merged with contiguous extents
3900 * during insertion into the inode fork. Thus, got does not reflect the current
3901 * state of the inode fork on return. If necessary, the caller can use lastx to
3902 * look up the updated record in the inode fork.
3905 xfs_bmapi_reserve_delalloc(
3906 struct xfs_inode *ip,
3910 xfs_filblks_t prealloc,
3911 struct xfs_bmbt_irec *got,
3912 struct xfs_iext_cursor *icur,
3915 struct xfs_mount *mp = ip->i_mount;
3916 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
3918 xfs_extlen_t indlen;
3920 xfs_fileoff_t aoff = off;
3923 * Cap the alloc length. Keep track of prealloc so we know whether to
3924 * tag the inode before we return.
3926 alen = XFS_FILBLKS_MIN(len + prealloc, MAXEXTLEN);
3928 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff);
3929 if (prealloc && alen >= len)
3930 prealloc = alen - len;
3932 /* Figure out the extent size, adjust alen */
3933 if (whichfork == XFS_COW_FORK) {
3934 struct xfs_bmbt_irec prev;
3935 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip);
3937 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev))
3938 prev.br_startoff = NULLFILEOFF;
3940 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof,
3941 1, 0, &aoff, &alen);
3946 * Make a transaction-less quota reservation for delayed allocation
3947 * blocks. This number gets adjusted later. We return if we haven't
3948 * allocated blocks already inside this loop.
3950 error = xfs_trans_reserve_quota_nblks(NULL, ip, (long)alen, 0,
3951 XFS_QMOPT_RES_REGBLKS);
3956 * Split changing sb for alen and indlen since they could be coming
3957 * from different places.
3959 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen);
3962 error = xfs_mod_fdblocks(mp, -((int64_t)alen), false);
3964 goto out_unreserve_quota;
3966 error = xfs_mod_fdblocks(mp, -((int64_t)indlen), false);
3968 goto out_unreserve_blocks;
3971 ip->i_delayed_blks += alen;
3973 got->br_startoff = aoff;
3974 got->br_startblock = nullstartblock(indlen);
3975 got->br_blockcount = alen;
3976 got->br_state = XFS_EXT_NORM;
3978 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got);
3981 * Tag the inode if blocks were preallocated. Note that COW fork
3982 * preallocation can occur at the start or end of the extent, even when
3983 * prealloc == 0, so we must also check the aligned offset and length.
3985 if (whichfork == XFS_DATA_FORK && prealloc)
3986 xfs_inode_set_eofblocks_tag(ip);
3987 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
3988 xfs_inode_set_cowblocks_tag(ip);
3992 out_unreserve_blocks:
3993 xfs_mod_fdblocks(mp, alen, false);
3994 out_unreserve_quota:
3995 if (XFS_IS_QUOTA_ON(mp))
3996 xfs_trans_unreserve_quota_nblks(NULL, ip, (long)alen, 0,
3997 XFS_QMOPT_RES_REGBLKS);
4003 struct xfs_bmalloca *bma)
4005 struct xfs_mount *mp = bma->ip->i_mount;
4006 int whichfork = xfs_bmapi_whichfork(bma->flags);
4007 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4008 int tmp_logflags = 0;
4011 ASSERT(bma->length > 0);
4014 * For the wasdelay case, we could also just allocate the stuff asked
4015 * for in this bmap call but that wouldn't be as good.
4018 bma->length = (xfs_extlen_t)bma->got.br_blockcount;
4019 bma->offset = bma->got.br_startoff;
4020 xfs_iext_peek_prev_extent(ifp, &bma->icur, &bma->prev);
4022 bma->length = XFS_FILBLKS_MIN(bma->length, MAXEXTLEN);
4024 bma->length = XFS_FILBLKS_MIN(bma->length,
4025 bma->got.br_startoff - bma->offset);
4029 * Set the data type being allocated. For the data fork, the first data
4030 * in the file is treated differently to all other allocations. For the
4031 * attribute fork, we only need to ensure the allocated range is not on
4034 if (!(bma->flags & XFS_BMAPI_METADATA)) {
4035 bma->datatype = XFS_ALLOC_NOBUSY;
4036 if (whichfork == XFS_DATA_FORK) {
4037 if (bma->offset == 0)
4038 bma->datatype |= XFS_ALLOC_INITIAL_USER_DATA;
4040 bma->datatype |= XFS_ALLOC_USERDATA;
4042 if (bma->flags & XFS_BMAPI_ZERO)
4043 bma->datatype |= XFS_ALLOC_USERDATA_ZERO;
4046 bma->minlen = (bma->flags & XFS_BMAPI_CONTIG) ? bma->length : 1;
4049 * Only want to do the alignment at the eof if it is userdata and
4050 * allocation length is larger than a stripe unit.
4052 if (mp->m_dalign && bma->length >= mp->m_dalign &&
4053 !(bma->flags & XFS_BMAPI_METADATA) && whichfork == XFS_DATA_FORK) {
4054 error = xfs_bmap_isaeof(bma, whichfork);
4059 error = xfs_bmap_alloc(bma);
4063 if (bma->blkno == NULLFSBLOCK)
4065 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur)
4066 bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
4068 * Bump the number of extents we've allocated
4074 bma->cur->bc_private.b.flags =
4075 bma->wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
4077 bma->got.br_startoff = bma->offset;
4078 bma->got.br_startblock = bma->blkno;
4079 bma->got.br_blockcount = bma->length;
4080 bma->got.br_state = XFS_EXT_NORM;
4083 * In the data fork, a wasdelay extent has been initialized, so
4084 * shouldn't be flagged as unwritten.
4086 * For the cow fork, however, we convert delalloc reservations
4087 * (extents allocated for speculative preallocation) to
4088 * allocated unwritten extents, and only convert the unwritten
4089 * extents to real extents when we're about to write the data.
4091 if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
4092 (bma->flags & XFS_BMAPI_PREALLOC))
4093 bma->got.br_state = XFS_EXT_UNWRITTEN;
4096 error = xfs_bmap_add_extent_delay_real(bma, whichfork);
4098 error = xfs_bmap_add_extent_hole_real(bma->tp, bma->ip,
4099 whichfork, &bma->icur, &bma->cur, &bma->got,
4100 &bma->logflags, bma->flags);
4102 bma->logflags |= tmp_logflags;
4107 * Update our extent pointer, given that xfs_bmap_add_extent_delay_real
4108 * or xfs_bmap_add_extent_hole_real might have merged it into one of
4109 * the neighbouring ones.
4111 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4113 ASSERT(bma->got.br_startoff <= bma->offset);
4114 ASSERT(bma->got.br_startoff + bma->got.br_blockcount >=
4115 bma->offset + bma->length);
4116 ASSERT(bma->got.br_state == XFS_EXT_NORM ||
4117 bma->got.br_state == XFS_EXT_UNWRITTEN);
4122 xfs_bmapi_convert_unwritten(
4123 struct xfs_bmalloca *bma,
4124 struct xfs_bmbt_irec *mval,
4128 int whichfork = xfs_bmapi_whichfork(flags);
4129 struct xfs_ifork *ifp = XFS_IFORK_PTR(bma->ip, whichfork);
4130 int tmp_logflags = 0;
4133 /* check if we need to do unwritten->real conversion */
4134 if (mval->br_state == XFS_EXT_UNWRITTEN &&
4135 (flags & XFS_BMAPI_PREALLOC))
4138 /* check if we need to do real->unwritten conversion */
4139 if (mval->br_state == XFS_EXT_NORM &&
4140 (flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT)) !=
4141 (XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
4145 * Modify (by adding) the state flag, if writing.
4147 ASSERT(mval->br_blockcount <= len);
4148 if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
4149 bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
4150 bma->ip, whichfork);
4152 mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
4153 ? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
4156 * Before insertion into the bmbt, zero the range being converted
4159 if (flags & XFS_BMAPI_ZERO) {
4160 error = xfs_zero_extent(bma->ip, mval->br_startblock,
4161 mval->br_blockcount);
4166 error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
4167 &bma->icur, &bma->cur, mval, &tmp_logflags);
4169 * Log the inode core unconditionally in the unwritten extent conversion
4170 * path because the conversion might not have done so (e.g., if the
4171 * extent count hasn't changed). We need to make sure the inode is dirty
4172 * in the transaction for the sake of fsync(), even if nothing has
4173 * changed, because fsync() will not force the log for this transaction
4174 * unless it sees the inode pinned.
4176 * Note: If we're only converting cow fork extents, there aren't
4177 * any on-disk updates to make, so we don't need to log anything.
4179 if (whichfork != XFS_COW_FORK)
4180 bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
4185 * Update our extent pointer, given that
4186 * xfs_bmap_add_extent_unwritten_real might have merged it into one
4187 * of the neighbouring ones.
4189 xfs_iext_get_extent(ifp, &bma->icur, &bma->got);
4192 * We may have combined previously unwritten space with written space,
4193 * so generate another request.
4195 if (mval->br_blockcount < len)
4200 static inline xfs_extlen_t
4202 struct xfs_trans *tp,
4203 struct xfs_inode *ip,
4206 if (tp && tp->t_firstblock != NULLFSBLOCK)
4208 if (XFS_IFORK_FORMAT(ip, fork) != XFS_DINODE_FMT_BTREE)
4210 return be16_to_cpu(XFS_IFORK_PTR(ip, fork)->if_broot->bb_level) + 1;
4214 * Log whatever the flags say, even if error. Otherwise we might miss detecting
4215 * a case where the data is changed, there's an error, and it's not logged so we
4216 * don't shutdown when we should. Don't bother logging extents/btree changes if
4217 * we converted to the other format.
4221 struct xfs_bmalloca *bma,
4225 if ((bma->logflags & xfs_ilog_fext(whichfork)) &&
4226 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
4227 bma->logflags &= ~xfs_ilog_fext(whichfork);
4228 else if ((bma->logflags & xfs_ilog_fbroot(whichfork)) &&
4229 XFS_IFORK_FORMAT(bma->ip, whichfork) != XFS_DINODE_FMT_BTREE)
4230 bma->logflags &= ~xfs_ilog_fbroot(whichfork);
4233 xfs_trans_log_inode(bma->tp, bma->ip, bma->logflags);
4235 xfs_btree_del_cursor(bma->cur, error);
4239 * Map file blocks to filesystem blocks, and allocate blocks or convert the
4240 * extent state if necessary. Details behaviour is controlled by the flags
4241 * parameter. Only allocates blocks from a single allocation group, to avoid
4246 struct xfs_trans *tp, /* transaction pointer */
4247 struct xfs_inode *ip, /* incore inode */
4248 xfs_fileoff_t bno, /* starting file offs. mapped */
4249 xfs_filblks_t len, /* length to map in file */
4250 int flags, /* XFS_BMAPI_... */
4251 xfs_extlen_t total, /* total blocks needed */
4252 struct xfs_bmbt_irec *mval, /* output: map values */
4253 int *nmap) /* i/o: mval size/count */
4255 struct xfs_bmalloca bma = {
4260 struct xfs_mount *mp = ip->i_mount;
4261 struct xfs_ifork *ifp;
4262 xfs_fileoff_t end; /* end of mapped file region */
4263 bool eof = false; /* after the end of extents */
4264 int error; /* error return */
4265 int n; /* current extent index */
4266 xfs_fileoff_t obno; /* old block number (offset) */
4267 int whichfork; /* data or attr fork */
4270 xfs_fileoff_t orig_bno; /* original block number value */
4271 int orig_flags; /* original flags arg value */
4272 xfs_filblks_t orig_len; /* original value of len arg */
4273 struct xfs_bmbt_irec *orig_mval; /* original value of mval */
4274 int orig_nmap; /* original value of *nmap */
4282 whichfork = xfs_bmapi_whichfork(flags);
4285 ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
4288 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
4289 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4290 ASSERT(!(flags & XFS_BMAPI_REMAP));
4292 /* zeroing is for currently only for data extents, not metadata */
4293 ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
4294 (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO));
4296 * we can allocate unwritten extents or pre-zero allocated blocks,
4297 * but it makes no sense to do both at once. This would result in
4298 * zeroing the unwritten extent twice, but it still being an
4299 * unwritten extent....
4301 ASSERT((flags & (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO)) !=
4302 (XFS_BMAPI_PREALLOC | XFS_BMAPI_ZERO));
4304 if (unlikely(XFS_TEST_ERROR(
4305 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4306 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4307 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4308 XFS_ERROR_REPORT("xfs_bmapi_write", XFS_ERRLEVEL_LOW, mp);
4309 return -EFSCORRUPTED;
4312 if (XFS_FORCED_SHUTDOWN(mp))
4315 ifp = XFS_IFORK_PTR(ip, whichfork);
4317 XFS_STATS_INC(mp, xs_blk_mapw);
4319 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4320 error = xfs_iread_extents(tp, ip, whichfork);
4325 if (!xfs_iext_lookup_extent(ip, ifp, bno, &bma.icur, &bma.got))
4327 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4328 bma.prev.br_startoff = NULLFILEOFF;
4329 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4334 while (bno < end && n < *nmap) {
4335 bool need_alloc = false, wasdelay = false;
4337 /* in hole or beyond EOF? */
4338 if (eof || bma.got.br_startoff > bno) {
4340 * CoW fork conversions should /never/ hit EOF or
4341 * holes. There should always be something for us
4344 ASSERT(!((flags & XFS_BMAPI_CONVERT) &&
4345 (flags & XFS_BMAPI_COWFORK)));
4348 } else if (isnullstartblock(bma.got.br_startblock)) {
4353 * First, deal with the hole before the allocated space
4354 * that we found, if any.
4356 if (need_alloc || wasdelay) {
4358 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
4359 bma.wasdel = wasdelay;
4364 * There's a 32/64 bit type mismatch between the
4365 * allocation length request (which can be 64 bits in
4366 * length) and the bma length request, which is
4367 * xfs_extlen_t and therefore 32 bits. Hence we have to
4368 * check for 32-bit overflows and handle them here.
4370 if (len > (xfs_filblks_t)MAXEXTLEN)
4371 bma.length = MAXEXTLEN;
4376 ASSERT(bma.length > 0);
4377 error = xfs_bmapi_allocate(&bma);
4380 if (bma.blkno == NULLFSBLOCK)
4384 * If this is a CoW allocation, record the data in
4385 * the refcount btree for orphan recovery.
4387 if (whichfork == XFS_COW_FORK) {
4388 error = xfs_refcount_alloc_cow_extent(tp,
4389 bma.blkno, bma.length);
4395 /* Deal with the allocated space we found. */
4396 xfs_bmapi_trim_map(mval, &bma.got, &bno, len, obno,
4399 /* Execute unwritten extent conversion if necessary */
4400 error = xfs_bmapi_convert_unwritten(&bma, mval, len, flags);
4401 if (error == -EAGAIN)
4406 /* update the extent map to return */
4407 xfs_bmapi_update_map(&mval, &bno, &len, obno, end, &n, flags);
4410 * If we're done, stop now. Stop when we've allocated
4411 * XFS_BMAP_MAX_NMAP extents no matter what. Otherwise
4412 * the transaction may get too big.
4414 if (bno >= end || n >= *nmap || bma.nallocs >= *nmap)
4417 /* Else go on to the next record. */
4419 if (!xfs_iext_next_extent(ifp, &bma.icur, &bma.got))
4424 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4429 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
4430 XFS_IFORK_NEXTENTS(ip, whichfork) >
4431 XFS_IFORK_MAXEXT(ip, whichfork));
4432 xfs_bmapi_finish(&bma, whichfork, 0);
4433 xfs_bmap_validate_ret(orig_bno, orig_len, orig_flags, orig_mval,
4437 xfs_bmapi_finish(&bma, whichfork, error);
4442 * Convert an existing delalloc extent to real blocks based on file offset. This
4443 * attempts to allocate the entire delalloc extent and may require multiple
4444 * invocations to allocate the target offset if a large enough physical extent
4448 xfs_bmapi_convert_delalloc(
4449 struct xfs_inode *ip,
4451 xfs_fileoff_t offset_fsb,
4452 struct xfs_bmbt_irec *imap,
4455 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4456 struct xfs_mount *mp = ip->i_mount;
4457 struct xfs_bmalloca bma = { NULL };
4458 struct xfs_trans *tp;
4462 * Space for the extent and indirect blocks was reserved when the
4463 * delalloc extent was created so there's no need to do so here.
4465 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0,
4466 XFS_TRANS_RESERVE, &tp);
4470 xfs_ilock(ip, XFS_ILOCK_EXCL);
4471 xfs_trans_ijoin(tp, ip, 0);
4473 if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &bma.icur, &bma.got) ||
4474 bma.got.br_startoff > offset_fsb) {
4476 * No extent found in the range we are trying to convert. This
4477 * should only happen for the COW fork, where another thread
4478 * might have moved the extent to the data fork in the meantime.
4480 WARN_ON_ONCE(whichfork != XFS_COW_FORK);
4482 goto out_trans_cancel;
4486 * If we find a real extent here we raced with another thread converting
4487 * the extent. Just return the real extent at this offset.
4489 if (!isnullstartblock(bma.got.br_startblock)) {
4491 *seq = READ_ONCE(ifp->if_seq);
4492 goto out_trans_cancel;
4498 bma.offset = bma.got.br_startoff;
4499 bma.length = max_t(xfs_filblks_t, bma.got.br_blockcount, MAXEXTLEN);
4500 bma.total = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK);
4501 bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
4502 if (whichfork == XFS_COW_FORK)
4503 bma.flags = XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
4505 if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
4506 bma.prev.br_startoff = NULLFILEOFF;
4508 error = xfs_bmapi_allocate(&bma);
4513 if (WARN_ON_ONCE(bma.blkno == NULLFSBLOCK))
4515 error = -EFSCORRUPTED;
4516 if (WARN_ON_ONCE(!bma.got.br_startblock && !XFS_IS_REALTIME_INODE(ip)))
4519 XFS_STATS_ADD(mp, xs_xstrat_bytes, XFS_FSB_TO_B(mp, bma.length));
4520 XFS_STATS_INC(mp, xs_xstrat_quick);
4522 ASSERT(!isnullstartblock(bma.got.br_startblock));
4524 *seq = READ_ONCE(ifp->if_seq);
4526 if (whichfork == XFS_COW_FORK) {
4527 error = xfs_refcount_alloc_cow_extent(tp, bma.blkno,
4533 error = xfs_bmap_btree_to_extents(tp, ip, bma.cur, &bma.logflags,
4538 xfs_bmapi_finish(&bma, whichfork, 0);
4539 error = xfs_trans_commit(tp);
4540 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4544 xfs_bmapi_finish(&bma, whichfork, error);
4546 xfs_trans_cancel(tp);
4547 xfs_iunlock(ip, XFS_ILOCK_EXCL);
4553 struct xfs_trans *tp,
4554 struct xfs_inode *ip,
4557 xfs_fsblock_t startblock,
4560 struct xfs_mount *mp = ip->i_mount;
4561 struct xfs_ifork *ifp;
4562 struct xfs_btree_cur *cur = NULL;
4563 struct xfs_bmbt_irec got;
4564 struct xfs_iext_cursor icur;
4565 int whichfork = xfs_bmapi_whichfork(flags);
4566 int logflags = 0, error;
4568 ifp = XFS_IFORK_PTR(ip, whichfork);
4570 ASSERT(len <= (xfs_filblks_t)MAXEXTLEN);
4571 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
4572 ASSERT(!(flags & ~(XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC |
4573 XFS_BMAPI_NORMAP)));
4574 ASSERT((flags & (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC)) !=
4575 (XFS_BMAPI_ATTRFORK | XFS_BMAPI_PREALLOC));
4577 if (unlikely(XFS_TEST_ERROR(
4578 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
4579 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
4580 mp, XFS_ERRTAG_BMAPIFORMAT))) {
4581 XFS_ERROR_REPORT("xfs_bmapi_remap", XFS_ERRLEVEL_LOW, mp);
4582 return -EFSCORRUPTED;
4585 if (XFS_FORCED_SHUTDOWN(mp))
4588 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
4589 error = xfs_iread_extents(tp, ip, whichfork);
4594 if (xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
4595 /* make sure we only reflink into a hole. */
4596 ASSERT(got.br_startoff > bno);
4597 ASSERT(got.br_startoff - bno >= len);
4600 ip->i_d.di_nblocks += len;
4601 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
4603 if (ifp->if_flags & XFS_IFBROOT) {
4604 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
4605 cur->bc_private.b.flags = 0;
4608 got.br_startoff = bno;
4609 got.br_startblock = startblock;
4610 got.br_blockcount = len;
4611 if (flags & XFS_BMAPI_PREALLOC)
4612 got.br_state = XFS_EXT_UNWRITTEN;
4614 got.br_state = XFS_EXT_NORM;
4616 error = xfs_bmap_add_extent_hole_real(tp, ip, whichfork, &icur,
4617 &cur, &got, &logflags, flags);
4621 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags, whichfork);
4624 if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS)
4625 logflags &= ~XFS_ILOG_DEXT;
4626 else if (ip->i_d.di_format != XFS_DINODE_FMT_BTREE)
4627 logflags &= ~XFS_ILOG_DBROOT;
4630 xfs_trans_log_inode(tp, ip, logflags);
4632 xfs_btree_del_cursor(cur, error);
4637 * When a delalloc extent is split (e.g., due to a hole punch), the original
4638 * indlen reservation must be shared across the two new extents that are left
4641 * Given the original reservation and the worst case indlen for the two new
4642 * extents (as calculated by xfs_bmap_worst_indlen()), split the original
4643 * reservation fairly across the two new extents. If necessary, steal available
4644 * blocks from a deleted extent to make up a reservation deficiency (e.g., if
4645 * ores == 1). The number of stolen blocks is returned. The availability and
4646 * subsequent accounting of stolen blocks is the responsibility of the caller.
4648 static xfs_filblks_t
4649 xfs_bmap_split_indlen(
4650 xfs_filblks_t ores, /* original res. */
4651 xfs_filblks_t *indlen1, /* ext1 worst indlen */
4652 xfs_filblks_t *indlen2, /* ext2 worst indlen */
4653 xfs_filblks_t avail) /* stealable blocks */
4655 xfs_filblks_t len1 = *indlen1;
4656 xfs_filblks_t len2 = *indlen2;
4657 xfs_filblks_t nres = len1 + len2; /* new total res. */
4658 xfs_filblks_t stolen = 0;
4659 xfs_filblks_t resfactor;
4662 * Steal as many blocks as we can to try and satisfy the worst case
4663 * indlen for both new extents.
4665 if (ores < nres && avail)
4666 stolen = XFS_FILBLKS_MIN(nres - ores, avail);
4669 /* nothing else to do if we've satisfied the new reservation */
4674 * We can't meet the total required reservation for the two extents.
4675 * Calculate the percent of the overall shortage between both extents
4676 * and apply this percentage to each of the requested indlen values.
4677 * This distributes the shortage fairly and reduces the chances that one
4678 * of the two extents is left with nothing when extents are repeatedly
4681 resfactor = (ores * 100);
4682 do_div(resfactor, nres);
4687 ASSERT(len1 + len2 <= ores);
4688 ASSERT(len1 < *indlen1 && len2 < *indlen2);
4691 * Hand out the remainder to each extent. If one of the two reservations
4692 * is zero, we want to make sure that one gets a block first. The loop
4693 * below starts with len1, so hand len2 a block right off the bat if it
4696 ores -= (len1 + len2);
4697 ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
4698 if (ores && !len2 && *indlen2) {
4703 if (len1 < *indlen1) {
4709 if (len2 < *indlen2) {
4722 xfs_bmap_del_extent_delay(
4723 struct xfs_inode *ip,
4725 struct xfs_iext_cursor *icur,
4726 struct xfs_bmbt_irec *got,
4727 struct xfs_bmbt_irec *del)
4729 struct xfs_mount *mp = ip->i_mount;
4730 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
4731 struct xfs_bmbt_irec new;
4732 int64_t da_old, da_new, da_diff = 0;
4733 xfs_fileoff_t del_endoff, got_endoff;
4734 xfs_filblks_t got_indlen, new_indlen, stolen;
4735 int state = xfs_bmap_fork_to_state(whichfork);
4739 XFS_STATS_INC(mp, xs_del_exlist);
4741 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
4742 del_endoff = del->br_startoff + del->br_blockcount;
4743 got_endoff = got->br_startoff + got->br_blockcount;
4744 da_old = startblockval(got->br_startblock);
4747 ASSERT(del->br_blockcount > 0);
4748 ASSERT(got->br_startoff <= del->br_startoff);
4749 ASSERT(got_endoff >= del_endoff);
4752 uint64_t rtexts = XFS_FSB_TO_B(mp, del->br_blockcount);
4754 do_div(rtexts, mp->m_sb.sb_rextsize);
4755 xfs_mod_frextents(mp, rtexts);
4759 * Update the inode delalloc counter now and wait to update the
4760 * sb counters as we might have to borrow some blocks for the
4761 * indirect block accounting.
4763 error = xfs_trans_reserve_quota_nblks(NULL, ip,
4764 -((long)del->br_blockcount), 0,
4765 isrt ? XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
4768 ip->i_delayed_blks -= del->br_blockcount;
4770 if (got->br_startoff == del->br_startoff)
4771 state |= BMAP_LEFT_FILLING;
4772 if (got_endoff == del_endoff)
4773 state |= BMAP_RIGHT_FILLING;
4775 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4776 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4778 * Matches the whole extent. Delete the entry.
4780 xfs_iext_remove(ip, icur, state);
4781 xfs_iext_prev(ifp, icur);
4783 case BMAP_LEFT_FILLING:
4785 * Deleting the first part of the extent.
4787 got->br_startoff = del_endoff;
4788 got->br_blockcount -= del->br_blockcount;
4789 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4790 got->br_blockcount), da_old);
4791 got->br_startblock = nullstartblock((int)da_new);
4792 xfs_iext_update_extent(ip, state, icur, got);
4794 case BMAP_RIGHT_FILLING:
4796 * Deleting the last part of the extent.
4798 got->br_blockcount = got->br_blockcount - del->br_blockcount;
4799 da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip,
4800 got->br_blockcount), da_old);
4801 got->br_startblock = nullstartblock((int)da_new);
4802 xfs_iext_update_extent(ip, state, icur, got);
4806 * Deleting the middle of the extent.
4808 * Distribute the original indlen reservation across the two new
4809 * extents. Steal blocks from the deleted extent if necessary.
4810 * Stealing blocks simply fudges the fdblocks accounting below.
4811 * Warn if either of the new indlen reservations is zero as this
4812 * can lead to delalloc problems.
4814 got->br_blockcount = del->br_startoff - got->br_startoff;
4815 got_indlen = xfs_bmap_worst_indlen(ip, got->br_blockcount);
4817 new.br_blockcount = got_endoff - del_endoff;
4818 new_indlen = xfs_bmap_worst_indlen(ip, new.br_blockcount);
4820 WARN_ON_ONCE(!got_indlen || !new_indlen);
4821 stolen = xfs_bmap_split_indlen(da_old, &got_indlen, &new_indlen,
4822 del->br_blockcount);
4824 got->br_startblock = nullstartblock((int)got_indlen);
4826 new.br_startoff = del_endoff;
4827 new.br_state = got->br_state;
4828 new.br_startblock = nullstartblock((int)new_indlen);
4830 xfs_iext_update_extent(ip, state, icur, got);
4831 xfs_iext_next(ifp, icur);
4832 xfs_iext_insert(ip, icur, &new, state);
4834 da_new = got_indlen + new_indlen - stolen;
4835 del->br_blockcount -= stolen;
4839 ASSERT(da_old >= da_new);
4840 da_diff = da_old - da_new;
4842 da_diff += del->br_blockcount;
4844 xfs_mod_fdblocks(mp, da_diff, false);
4849 xfs_bmap_del_extent_cow(
4850 struct xfs_inode *ip,
4851 struct xfs_iext_cursor *icur,
4852 struct xfs_bmbt_irec *got,
4853 struct xfs_bmbt_irec *del)
4855 struct xfs_mount *mp = ip->i_mount;
4856 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
4857 struct xfs_bmbt_irec new;
4858 xfs_fileoff_t del_endoff, got_endoff;
4859 int state = BMAP_COWFORK;
4861 XFS_STATS_INC(mp, xs_del_exlist);
4863 del_endoff = del->br_startoff + del->br_blockcount;
4864 got_endoff = got->br_startoff + got->br_blockcount;
4866 ASSERT(del->br_blockcount > 0);
4867 ASSERT(got->br_startoff <= del->br_startoff);
4868 ASSERT(got_endoff >= del_endoff);
4869 ASSERT(!isnullstartblock(got->br_startblock));
4871 if (got->br_startoff == del->br_startoff)
4872 state |= BMAP_LEFT_FILLING;
4873 if (got_endoff == del_endoff)
4874 state |= BMAP_RIGHT_FILLING;
4876 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
4877 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
4879 * Matches the whole extent. Delete the entry.
4881 xfs_iext_remove(ip, icur, state);
4882 xfs_iext_prev(ifp, icur);
4884 case BMAP_LEFT_FILLING:
4886 * Deleting the first part of the extent.
4888 got->br_startoff = del_endoff;
4889 got->br_blockcount -= del->br_blockcount;
4890 got->br_startblock = del->br_startblock + del->br_blockcount;
4891 xfs_iext_update_extent(ip, state, icur, got);
4893 case BMAP_RIGHT_FILLING:
4895 * Deleting the last part of the extent.
4897 got->br_blockcount -= del->br_blockcount;
4898 xfs_iext_update_extent(ip, state, icur, got);
4902 * Deleting the middle of the extent.
4904 got->br_blockcount = del->br_startoff - got->br_startoff;
4906 new.br_startoff = del_endoff;
4907 new.br_blockcount = got_endoff - del_endoff;
4908 new.br_state = got->br_state;
4909 new.br_startblock = del->br_startblock + del->br_blockcount;
4911 xfs_iext_update_extent(ip, state, icur, got);
4912 xfs_iext_next(ifp, icur);
4913 xfs_iext_insert(ip, icur, &new, state);
4916 ip->i_delayed_blks -= del->br_blockcount;
4920 * Called by xfs_bmapi to update file extent records and the btree
4921 * after removing space.
4923 STATIC int /* error */
4924 xfs_bmap_del_extent_real(
4925 xfs_inode_t *ip, /* incore inode pointer */
4926 xfs_trans_t *tp, /* current transaction pointer */
4927 struct xfs_iext_cursor *icur,
4928 xfs_btree_cur_t *cur, /* if null, not a btree */
4929 xfs_bmbt_irec_t *del, /* data to remove from extents */
4930 int *logflagsp, /* inode logging flags */
4931 int whichfork, /* data or attr fork */
4932 int bflags) /* bmapi flags */
4934 xfs_fsblock_t del_endblock=0; /* first block past del */
4935 xfs_fileoff_t del_endoff; /* first offset past del */
4936 int do_fx; /* free extent at end of routine */
4937 int error; /* error return value */
4938 int flags = 0;/* inode logging flags */
4939 struct xfs_bmbt_irec got; /* current extent entry */
4940 xfs_fileoff_t got_endoff; /* first offset past got */
4941 int i; /* temp state */
4942 struct xfs_ifork *ifp; /* inode fork pointer */
4943 xfs_mount_t *mp; /* mount structure */
4944 xfs_filblks_t nblks; /* quota/sb block count */
4945 xfs_bmbt_irec_t new; /* new record to be inserted */
4947 uint qfield; /* quota field to update */
4948 int state = xfs_bmap_fork_to_state(whichfork);
4949 struct xfs_bmbt_irec old;
4952 XFS_STATS_INC(mp, xs_del_exlist);
4954 ifp = XFS_IFORK_PTR(ip, whichfork);
4955 ASSERT(del->br_blockcount > 0);
4956 xfs_iext_get_extent(ifp, icur, &got);
4957 ASSERT(got.br_startoff <= del->br_startoff);
4958 del_endoff = del->br_startoff + del->br_blockcount;
4959 got_endoff = got.br_startoff + got.br_blockcount;
4960 ASSERT(got_endoff >= del_endoff);
4961 ASSERT(!isnullstartblock(got.br_startblock));
4966 * If it's the case where the directory code is running with no block
4967 * reservation, and the deleted block is in the middle of its extent,
4968 * and the resulting insert of an extent would cause transformation to
4969 * btree format, then reject it. The calling code will then swap blocks
4970 * around instead. We have to do this now, rather than waiting for the
4971 * conversion to btree format, since the transaction will be dirty then.
4973 if (tp->t_blk_res == 0 &&
4974 XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
4975 XFS_IFORK_NEXTENTS(ip, whichfork) >=
4976 XFS_IFORK_MAXEXT(ip, whichfork) &&
4977 del->br_startoff > got.br_startoff && del_endoff < got_endoff)
4980 flags = XFS_ILOG_CORE;
4981 if (whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip)) {
4986 bno = div_u64_rem(del->br_startblock, mp->m_sb.sb_rextsize,
4989 len = div_u64_rem(del->br_blockcount, mp->m_sb.sb_rextsize,
4993 error = xfs_rtfree_extent(tp, bno, (xfs_extlen_t)len);
4997 nblks = len * mp->m_sb.sb_rextsize;
4998 qfield = XFS_TRANS_DQ_RTBCOUNT;
5001 nblks = del->br_blockcount;
5002 qfield = XFS_TRANS_DQ_BCOUNT;
5005 del_endblock = del->br_startblock + del->br_blockcount;
5007 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5010 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5013 if (got.br_startoff == del->br_startoff)
5014 state |= BMAP_LEFT_FILLING;
5015 if (got_endoff == del_endoff)
5016 state |= BMAP_RIGHT_FILLING;
5018 switch (state & (BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING)) {
5019 case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
5021 * Matches the whole extent. Delete the entry.
5023 xfs_iext_remove(ip, icur, state);
5024 xfs_iext_prev(ifp, icur);
5025 XFS_IFORK_NEXT_SET(ip, whichfork,
5026 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5027 flags |= XFS_ILOG_CORE;
5029 flags |= xfs_ilog_fext(whichfork);
5032 if ((error = xfs_btree_delete(cur, &i)))
5034 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5036 case BMAP_LEFT_FILLING:
5038 * Deleting the first part of the extent.
5040 got.br_startoff = del_endoff;
5041 got.br_startblock = del_endblock;
5042 got.br_blockcount -= del->br_blockcount;
5043 xfs_iext_update_extent(ip, state, icur, &got);
5045 flags |= xfs_ilog_fext(whichfork);
5048 error = xfs_bmbt_update(cur, &got);
5052 case BMAP_RIGHT_FILLING:
5054 * Deleting the last part of the extent.
5056 got.br_blockcount -= del->br_blockcount;
5057 xfs_iext_update_extent(ip, state, icur, &got);
5059 flags |= xfs_ilog_fext(whichfork);
5062 error = xfs_bmbt_update(cur, &got);
5068 * Deleting the middle of the extent.
5072 got.br_blockcount = del->br_startoff - got.br_startoff;
5073 xfs_iext_update_extent(ip, state, icur, &got);
5075 new.br_startoff = del_endoff;
5076 new.br_blockcount = got_endoff - del_endoff;
5077 new.br_state = got.br_state;
5078 new.br_startblock = del_endblock;
5080 flags |= XFS_ILOG_CORE;
5082 error = xfs_bmbt_update(cur, &got);
5085 error = xfs_btree_increment(cur, 0, &i);
5088 cur->bc_rec.b = new;
5089 error = xfs_btree_insert(cur, &i);
5090 if (error && error != -ENOSPC)
5093 * If get no-space back from btree insert, it tried a
5094 * split, and we have a zero block reservation. Fix up
5095 * our state and return the error.
5097 if (error == -ENOSPC) {
5099 * Reset the cursor, don't trust it after any
5102 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5105 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5107 * Update the btree record back
5108 * to the original value.
5110 error = xfs_bmbt_update(cur, &old);
5114 * Reset the extent record back
5115 * to the original value.
5117 xfs_iext_update_extent(ip, state, icur, &old);
5122 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
5124 flags |= xfs_ilog_fext(whichfork);
5125 XFS_IFORK_NEXT_SET(ip, whichfork,
5126 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5127 xfs_iext_next(ifp, icur);
5128 xfs_iext_insert(ip, icur, &new, state);
5132 /* remove reverse mapping */
5133 error = xfs_rmap_unmap_extent(tp, ip, whichfork, del);
5138 * If we need to, add to list of extents to delete.
5140 if (do_fx && !(bflags & XFS_BMAPI_REMAP)) {
5141 if (xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK) {
5142 error = xfs_refcount_decrease_extent(tp, del);
5146 __xfs_bmap_add_free(tp, del->br_startblock,
5147 del->br_blockcount, NULL,
5148 (bflags & XFS_BMAPI_NODISCARD) ||
5149 del->br_state == XFS_EXT_UNWRITTEN);
5154 * Adjust inode # blocks in the file.
5157 ip->i_d.di_nblocks -= nblks;
5159 * Adjust quota data.
5161 if (qfield && !(bflags & XFS_BMAPI_REMAP))
5162 xfs_trans_mod_dquot_byino(tp, ip, qfield, (long)-nblks);
5170 * Unmap (remove) blocks from a file.
5171 * If nexts is nonzero then the number of extents to remove is limited to
5172 * that value. If not all extents in the block range can be removed then
5177 struct xfs_trans *tp, /* transaction pointer */
5178 struct xfs_inode *ip, /* incore inode */
5179 xfs_fileoff_t start, /* first file offset deleted */
5180 xfs_filblks_t *rlen, /* i/o: amount remaining */
5181 int flags, /* misc flags */
5182 xfs_extnum_t nexts) /* number of extents max */
5184 struct xfs_btree_cur *cur; /* bmap btree cursor */
5185 struct xfs_bmbt_irec del; /* extent being deleted */
5186 int error; /* error return value */
5187 xfs_extnum_t extno; /* extent number in list */
5188 struct xfs_bmbt_irec got; /* current extent record */
5189 struct xfs_ifork *ifp; /* inode fork pointer */
5190 int isrt; /* freeing in rt area */
5191 int logflags; /* transaction logging flags */
5192 xfs_extlen_t mod; /* rt extent offset */
5193 struct xfs_mount *mp; /* mount structure */
5194 int tmp_logflags; /* partial logging flags */
5195 int wasdel; /* was a delayed alloc extent */
5196 int whichfork; /* data or attribute fork */
5198 xfs_filblks_t len = *rlen; /* length to unmap in file */
5199 xfs_fileoff_t max_len;
5200 xfs_agnumber_t prev_agno = NULLAGNUMBER, agno;
5202 struct xfs_iext_cursor icur;
5205 trace_xfs_bunmap(ip, start, len, flags, _RET_IP_);
5207 whichfork = xfs_bmapi_whichfork(flags);
5208 ASSERT(whichfork != XFS_COW_FORK);
5209 ifp = XFS_IFORK_PTR(ip, whichfork);
5211 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5212 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
5213 XFS_ERROR_REPORT("xfs_bunmapi", XFS_ERRLEVEL_LOW,
5215 return -EFSCORRUPTED;
5218 if (XFS_FORCED_SHUTDOWN(mp))
5221 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5226 * Guesstimate how many blocks we can unmap without running the risk of
5227 * blowing out the transaction with a mix of EFIs and reflink
5230 if (tp && xfs_is_reflink_inode(ip) && whichfork == XFS_DATA_FORK)
5231 max_len = min(len, xfs_refcount_max_unmap(tp->t_log_res));
5235 if (!(ifp->if_flags & XFS_IFEXTENTS) &&
5236 (error = xfs_iread_extents(tp, ip, whichfork)))
5238 if (xfs_iext_count(ifp) == 0) {
5242 XFS_STATS_INC(mp, xs_blk_unmap);
5243 isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
5246 if (!xfs_iext_lookup_extent_before(ip, ifp, &end, &icur, &got)) {
5253 if (ifp->if_flags & XFS_IFBROOT) {
5254 ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
5255 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5256 cur->bc_private.b.flags = 0;
5262 * Synchronize by locking the bitmap inode.
5264 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
5265 xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
5266 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
5267 xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
5271 while (end != (xfs_fileoff_t)-1 && end >= start &&
5272 (nexts == 0 || extno < nexts) && max_len > 0) {
5274 * Is the found extent after a hole in which end lives?
5275 * Just back up to the previous extent, if so.
5277 if (got.br_startoff > end &&
5278 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5283 * Is the last block of this extent before the range
5284 * we're supposed to delete? If so, we're done.
5286 end = XFS_FILEOFF_MIN(end,
5287 got.br_startoff + got.br_blockcount - 1);
5291 * Then deal with the (possibly delayed) allocated space
5295 wasdel = isnullstartblock(del.br_startblock);
5298 * Make sure we don't touch multiple AGF headers out of order
5299 * in a single transaction, as that could cause AB-BA deadlocks.
5302 agno = XFS_FSB_TO_AGNO(mp, del.br_startblock);
5303 if (prev_agno != NULLAGNUMBER && prev_agno > agno)
5307 if (got.br_startoff < start) {
5308 del.br_startoff = start;
5309 del.br_blockcount -= start - got.br_startoff;
5311 del.br_startblock += start - got.br_startoff;
5313 if (del.br_startoff + del.br_blockcount > end + 1)
5314 del.br_blockcount = end + 1 - del.br_startoff;
5316 /* How much can we safely unmap? */
5317 if (max_len < del.br_blockcount) {
5318 del.br_startoff += del.br_blockcount - max_len;
5320 del.br_startblock += del.br_blockcount - max_len;
5321 del.br_blockcount = max_len;
5327 sum = del.br_startblock + del.br_blockcount;
5328 div_u64_rem(sum, mp->m_sb.sb_rextsize, &mod);
5331 * Realtime extent not lined up at the end.
5332 * The extent could have been split into written
5333 * and unwritten pieces, or we could just be
5334 * unmapping part of it. But we can't really
5335 * get rid of part of a realtime extent.
5337 if (del.br_state == XFS_EXT_UNWRITTEN) {
5339 * This piece is unwritten, or we're not
5340 * using unwritten extents. Skip over it.
5343 end -= mod > del.br_blockcount ?
5344 del.br_blockcount : mod;
5345 if (end < got.br_startoff &&
5346 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5353 * It's written, turn it unwritten.
5354 * This is better than zeroing it.
5356 ASSERT(del.br_state == XFS_EXT_NORM);
5357 ASSERT(tp->t_blk_res > 0);
5359 * If this spans a realtime extent boundary,
5360 * chop it back to the start of the one we end at.
5362 if (del.br_blockcount > mod) {
5363 del.br_startoff += del.br_blockcount - mod;
5364 del.br_startblock += del.br_blockcount - mod;
5365 del.br_blockcount = mod;
5367 del.br_state = XFS_EXT_UNWRITTEN;
5368 error = xfs_bmap_add_extent_unwritten_real(tp, ip,
5369 whichfork, &icur, &cur, &del,
5375 div_u64_rem(del.br_startblock, mp->m_sb.sb_rextsize, &mod);
5378 * Realtime extent is lined up at the end but not
5379 * at the front. We'll get rid of full extents if
5382 mod = mp->m_sb.sb_rextsize - mod;
5383 if (del.br_blockcount > mod) {
5384 del.br_blockcount -= mod;
5385 del.br_startoff += mod;
5386 del.br_startblock += mod;
5387 } else if (del.br_startoff == start &&
5388 (del.br_state == XFS_EXT_UNWRITTEN ||
5389 tp->t_blk_res == 0)) {
5391 * Can't make it unwritten. There isn't
5392 * a full extent here so just skip it.
5394 ASSERT(end >= del.br_blockcount);
5395 end -= del.br_blockcount;
5396 if (got.br_startoff > end &&
5397 !xfs_iext_prev_extent(ifp, &icur, &got)) {
5402 } else if (del.br_state == XFS_EXT_UNWRITTEN) {
5403 struct xfs_bmbt_irec prev;
5406 * This one is already unwritten.
5407 * It must have a written left neighbor.
5408 * Unwrite the killed part of that one and
5411 if (!xfs_iext_prev_extent(ifp, &icur, &prev))
5413 ASSERT(prev.br_state == XFS_EXT_NORM);
5414 ASSERT(!isnullstartblock(prev.br_startblock));
5415 ASSERT(del.br_startblock ==
5416 prev.br_startblock + prev.br_blockcount);
5417 if (prev.br_startoff < start) {
5418 mod = start - prev.br_startoff;
5419 prev.br_blockcount -= mod;
5420 prev.br_startblock += mod;
5421 prev.br_startoff = start;
5423 prev.br_state = XFS_EXT_UNWRITTEN;
5424 error = xfs_bmap_add_extent_unwritten_real(tp,
5425 ip, whichfork, &icur, &cur,
5431 ASSERT(del.br_state == XFS_EXT_NORM);
5432 del.br_state = XFS_EXT_UNWRITTEN;
5433 error = xfs_bmap_add_extent_unwritten_real(tp,
5434 ip, whichfork, &icur, &cur,
5444 error = xfs_bmap_del_extent_delay(ip, whichfork, &icur,
5447 error = xfs_bmap_del_extent_real(ip, tp, &icur, cur,
5448 &del, &tmp_logflags, whichfork,
5450 logflags |= tmp_logflags;
5456 max_len -= del.br_blockcount;
5457 end = del.br_startoff - 1;
5460 * If not done go on to the next (previous) record.
5462 if (end != (xfs_fileoff_t)-1 && end >= start) {
5463 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5464 (got.br_startoff > end &&
5465 !xfs_iext_prev_extent(ifp, &icur, &got))) {
5472 if (done || end == (xfs_fileoff_t)-1 || end < start)
5475 *rlen = end - start + 1;
5478 * Convert to a btree if necessary.
5480 if (xfs_bmap_needs_btree(ip, whichfork)) {
5481 ASSERT(cur == NULL);
5482 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
5483 &tmp_logflags, whichfork);
5484 logflags |= tmp_logflags;
5486 error = xfs_bmap_btree_to_extents(tp, ip, cur, &logflags,
5492 * Log everything. Do this after conversion, there's no point in
5493 * logging the extent records if we've converted to btree format.
5495 if ((logflags & xfs_ilog_fext(whichfork)) &&
5496 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
5497 logflags &= ~xfs_ilog_fext(whichfork);
5498 else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
5499 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
5500 logflags &= ~xfs_ilog_fbroot(whichfork);
5502 * Log inode even in the error case, if the transaction
5503 * is dirty we'll need to shut down the filesystem.
5506 xfs_trans_log_inode(tp, ip, logflags);
5509 cur->bc_private.b.allocated = 0;
5510 xfs_btree_del_cursor(cur, error);
5515 /* Unmap a range of a file. */
5519 struct xfs_inode *ip,
5528 error = __xfs_bunmapi(tp, ip, bno, &len, flags, nexts);
5534 * Determine whether an extent shift can be accomplished by a merge with the
5535 * extent that precedes the target hole of the shift.
5539 struct xfs_bmbt_irec *left, /* preceding extent */
5540 struct xfs_bmbt_irec *got, /* current extent to shift */
5541 xfs_fileoff_t shift) /* shift fsb */
5543 xfs_fileoff_t startoff;
5545 startoff = got->br_startoff - shift;
5548 * The extent, once shifted, must be adjacent in-file and on-disk with
5549 * the preceding extent.
5551 if ((left->br_startoff + left->br_blockcount != startoff) ||
5552 (left->br_startblock + left->br_blockcount != got->br_startblock) ||
5553 (left->br_state != got->br_state) ||
5554 (left->br_blockcount + got->br_blockcount > MAXEXTLEN))
5561 * A bmap extent shift adjusts the file offset of an extent to fill a preceding
5562 * hole in the file. If an extent shift would result in the extent being fully
5563 * adjacent to the extent that currently precedes the hole, we can merge with
5564 * the preceding extent rather than do the shift.
5566 * This function assumes the caller has verified a shift-by-merge is possible
5567 * with the provided extents via xfs_bmse_can_merge().
5571 struct xfs_trans *tp,
5572 struct xfs_inode *ip,
5574 xfs_fileoff_t shift, /* shift fsb */
5575 struct xfs_iext_cursor *icur,
5576 struct xfs_bmbt_irec *got, /* extent to shift */
5577 struct xfs_bmbt_irec *left, /* preceding extent */
5578 struct xfs_btree_cur *cur,
5579 int *logflags) /* output */
5581 struct xfs_bmbt_irec new;
5582 xfs_filblks_t blockcount;
5584 struct xfs_mount *mp = ip->i_mount;
5586 blockcount = left->br_blockcount + got->br_blockcount;
5588 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5589 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
5590 ASSERT(xfs_bmse_can_merge(left, got, shift));
5593 new.br_blockcount = blockcount;
5596 * Update the on-disk extent count, the btree if necessary and log the
5599 XFS_IFORK_NEXT_SET(ip, whichfork,
5600 XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
5601 *logflags |= XFS_ILOG_CORE;
5603 *logflags |= XFS_ILOG_DEXT;
5607 /* lookup and remove the extent to merge */
5608 error = xfs_bmbt_lookup_eq(cur, got, &i);
5611 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5613 error = xfs_btree_delete(cur, &i);
5616 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5618 /* lookup and update size of the previous extent */
5619 error = xfs_bmbt_lookup_eq(cur, left, &i);
5622 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5624 error = xfs_bmbt_update(cur, &new);
5629 xfs_iext_remove(ip, icur, 0);
5630 xfs_iext_prev(XFS_IFORK_PTR(ip, whichfork), icur);
5631 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5634 /* update reverse mapping. rmap functions merge the rmaps for us */
5635 error = xfs_rmap_unmap_extent(tp, ip, whichfork, got);
5638 memcpy(&new, got, sizeof(new));
5639 new.br_startoff = left->br_startoff + left->br_blockcount;
5640 return xfs_rmap_map_extent(tp, ip, whichfork, &new);
5644 xfs_bmap_shift_update_extent(
5645 struct xfs_trans *tp,
5646 struct xfs_inode *ip,
5648 struct xfs_iext_cursor *icur,
5649 struct xfs_bmbt_irec *got,
5650 struct xfs_btree_cur *cur,
5652 xfs_fileoff_t startoff)
5654 struct xfs_mount *mp = ip->i_mount;
5655 struct xfs_bmbt_irec prev = *got;
5658 *logflags |= XFS_ILOG_CORE;
5660 got->br_startoff = startoff;
5663 error = xfs_bmbt_lookup_eq(cur, &prev, &i);
5666 XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
5668 error = xfs_bmbt_update(cur, got);
5672 *logflags |= XFS_ILOG_DEXT;
5675 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), icur,
5678 /* update reverse mapping */
5679 error = xfs_rmap_unmap_extent(tp, ip, whichfork, &prev);
5682 return xfs_rmap_map_extent(tp, ip, whichfork, got);
5686 xfs_bmap_collapse_extents(
5687 struct xfs_trans *tp,
5688 struct xfs_inode *ip,
5689 xfs_fileoff_t *next_fsb,
5690 xfs_fileoff_t offset_shift_fsb,
5693 int whichfork = XFS_DATA_FORK;
5694 struct xfs_mount *mp = ip->i_mount;
5695 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5696 struct xfs_btree_cur *cur = NULL;
5697 struct xfs_bmbt_irec got, prev;
5698 struct xfs_iext_cursor icur;
5699 xfs_fileoff_t new_startoff;
5703 if (unlikely(XFS_TEST_ERROR(
5704 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5705 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5706 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5707 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5708 return -EFSCORRUPTED;
5711 if (XFS_FORCED_SHUTDOWN(mp))
5714 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5716 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5717 error = xfs_iread_extents(tp, ip, whichfork);
5722 if (ifp->if_flags & XFS_IFBROOT) {
5723 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5724 cur->bc_private.b.flags = 0;
5727 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5731 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5734 new_startoff = got.br_startoff - offset_shift_fsb;
5735 if (xfs_iext_peek_prev_extent(ifp, &icur, &prev)) {
5736 if (new_startoff < prev.br_startoff + prev.br_blockcount) {
5741 if (xfs_bmse_can_merge(&prev, &got, offset_shift_fsb)) {
5742 error = xfs_bmse_merge(tp, ip, whichfork,
5743 offset_shift_fsb, &icur, &got, &prev,
5750 if (got.br_startoff < offset_shift_fsb) {
5756 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5757 cur, &logflags, new_startoff);
5762 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
5767 *next_fsb = got.br_startoff;
5770 xfs_btree_del_cursor(cur, error);
5772 xfs_trans_log_inode(tp, ip, logflags);
5776 /* Make sure we won't be right-shifting an extent past the maximum bound. */
5778 xfs_bmap_can_insert_extents(
5779 struct xfs_inode *ip,
5781 xfs_fileoff_t shift)
5783 struct xfs_bmbt_irec got;
5787 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
5789 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
5792 xfs_ilock(ip, XFS_ILOCK_EXCL);
5793 error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
5794 if (!error && !is_empty && got.br_startoff >= off &&
5795 ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
5797 xfs_iunlock(ip, XFS_ILOCK_EXCL);
5803 xfs_bmap_insert_extents(
5804 struct xfs_trans *tp,
5805 struct xfs_inode *ip,
5806 xfs_fileoff_t *next_fsb,
5807 xfs_fileoff_t offset_shift_fsb,
5809 xfs_fileoff_t stop_fsb)
5811 int whichfork = XFS_DATA_FORK;
5812 struct xfs_mount *mp = ip->i_mount;
5813 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
5814 struct xfs_btree_cur *cur = NULL;
5815 struct xfs_bmbt_irec got, next;
5816 struct xfs_iext_cursor icur;
5817 xfs_fileoff_t new_startoff;
5821 if (unlikely(XFS_TEST_ERROR(
5822 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5823 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5824 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5825 XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
5826 return -EFSCORRUPTED;
5829 if (XFS_FORCED_SHUTDOWN(mp))
5832 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL));
5834 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5835 error = xfs_iread_extents(tp, ip, whichfork);
5840 if (ifp->if_flags & XFS_IFBROOT) {
5841 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5842 cur->bc_private.b.flags = 0;
5845 if (*next_fsb == NULLFSBLOCK) {
5846 xfs_iext_last(ifp, &icur);
5847 if (!xfs_iext_get_extent(ifp, &icur, &got) ||
5848 stop_fsb > got.br_startoff) {
5853 if (!xfs_iext_lookup_extent(ip, ifp, *next_fsb, &icur, &got)) {
5858 XFS_WANT_CORRUPTED_GOTO(mp, !isnullstartblock(got.br_startblock),
5861 if (stop_fsb >= got.br_startoff + got.br_blockcount) {
5866 new_startoff = got.br_startoff + offset_shift_fsb;
5867 if (xfs_iext_peek_next_extent(ifp, &icur, &next)) {
5868 if (new_startoff + got.br_blockcount > next.br_startoff) {
5874 * Unlike a left shift (which involves a hole punch), a right
5875 * shift does not modify extent neighbors in any way. We should
5876 * never find mergeable extents in this scenario. Check anyways
5877 * and warn if we encounter two extents that could be one.
5879 if (xfs_bmse_can_merge(&got, &next, offset_shift_fsb))
5883 error = xfs_bmap_shift_update_extent(tp, ip, whichfork, &icur, &got,
5884 cur, &logflags, new_startoff);
5888 if (!xfs_iext_prev_extent(ifp, &icur, &got) ||
5889 stop_fsb >= got.br_startoff + got.br_blockcount) {
5894 *next_fsb = got.br_startoff;
5897 xfs_btree_del_cursor(cur, error);
5899 xfs_trans_log_inode(tp, ip, logflags);
5904 * Splits an extent into two extents at split_fsb block such that it is the
5905 * first block of the current_ext. @ext is a target extent to be split.
5906 * @split_fsb is a block where the extents is split. If split_fsb lies in a
5907 * hole or the first block of extents, just return 0.
5910 xfs_bmap_split_extent_at(
5911 struct xfs_trans *tp,
5912 struct xfs_inode *ip,
5913 xfs_fileoff_t split_fsb)
5915 int whichfork = XFS_DATA_FORK;
5916 struct xfs_btree_cur *cur = NULL;
5917 struct xfs_bmbt_irec got;
5918 struct xfs_bmbt_irec new; /* split extent */
5919 struct xfs_mount *mp = ip->i_mount;
5920 struct xfs_ifork *ifp;
5921 xfs_fsblock_t gotblkcnt; /* new block count for got */
5922 struct xfs_iext_cursor icur;
5927 if (unlikely(XFS_TEST_ERROR(
5928 (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS &&
5929 XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE),
5930 mp, XFS_ERRTAG_BMAPIFORMAT))) {
5931 XFS_ERROR_REPORT("xfs_bmap_split_extent_at",
5932 XFS_ERRLEVEL_LOW, mp);
5933 return -EFSCORRUPTED;
5936 if (XFS_FORCED_SHUTDOWN(mp))
5939 ifp = XFS_IFORK_PTR(ip, whichfork);
5940 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
5941 /* Read in all the extents */
5942 error = xfs_iread_extents(tp, ip, whichfork);
5948 * If there are not extents, or split_fsb lies in a hole we are done.
5950 if (!xfs_iext_lookup_extent(ip, ifp, split_fsb, &icur, &got) ||
5951 got.br_startoff >= split_fsb)
5954 gotblkcnt = split_fsb - got.br_startoff;
5955 new.br_startoff = split_fsb;
5956 new.br_startblock = got.br_startblock + gotblkcnt;
5957 new.br_blockcount = got.br_blockcount - gotblkcnt;
5958 new.br_state = got.br_state;
5960 if (ifp->if_flags & XFS_IFBROOT) {
5961 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
5962 cur->bc_private.b.flags = 0;
5963 error = xfs_bmbt_lookup_eq(cur, &got, &i);
5966 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5969 got.br_blockcount = gotblkcnt;
5970 xfs_iext_update_extent(ip, xfs_bmap_fork_to_state(whichfork), &icur,
5973 logflags = XFS_ILOG_CORE;
5975 error = xfs_bmbt_update(cur, &got);
5979 logflags |= XFS_ILOG_DEXT;
5981 /* Add new extent */
5982 xfs_iext_next(ifp, &icur);
5983 xfs_iext_insert(ip, &icur, &new, 0);
5984 XFS_IFORK_NEXT_SET(ip, whichfork,
5985 XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
5988 error = xfs_bmbt_lookup_eq(cur, &new, &i);
5991 XFS_WANT_CORRUPTED_GOTO(mp, i == 0, del_cursor);
5992 error = xfs_btree_insert(cur, &i);
5995 XFS_WANT_CORRUPTED_GOTO(mp, i == 1, del_cursor);
5999 * Convert to a btree if necessary.
6001 if (xfs_bmap_needs_btree(ip, whichfork)) {
6002 int tmp_logflags; /* partial log flag return val */
6004 ASSERT(cur == NULL);
6005 error = xfs_bmap_extents_to_btree(tp, ip, &cur, 0,
6006 &tmp_logflags, whichfork);
6007 logflags |= tmp_logflags;
6012 cur->bc_private.b.allocated = 0;
6013 xfs_btree_del_cursor(cur, error);
6017 xfs_trans_log_inode(tp, ip, logflags);
6022 xfs_bmap_split_extent(
6023 struct xfs_inode *ip,
6024 xfs_fileoff_t split_fsb)
6026 struct xfs_mount *mp = ip->i_mount;
6027 struct xfs_trans *tp;
6030 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
6031 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
6035 xfs_ilock(ip, XFS_ILOCK_EXCL);
6036 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6038 error = xfs_bmap_split_extent_at(tp, ip, split_fsb);
6042 return xfs_trans_commit(tp);
6045 xfs_trans_cancel(tp);
6049 /* Deferred mapping is only for real extents in the data fork. */
6051 xfs_bmap_is_update_needed(
6052 struct xfs_bmbt_irec *bmap)
6054 return bmap->br_startblock != HOLESTARTBLOCK &&
6055 bmap->br_startblock != DELAYSTARTBLOCK;
6058 /* Record a bmap intent. */
6061 struct xfs_trans *tp,
6062 enum xfs_bmap_intent_type type,
6063 struct xfs_inode *ip,
6065 struct xfs_bmbt_irec *bmap)
6067 struct xfs_bmap_intent *bi;
6069 trace_xfs_bmap_defer(tp->t_mountp,
6070 XFS_FSB_TO_AGNO(tp->t_mountp, bmap->br_startblock),
6072 XFS_FSB_TO_AGBNO(tp->t_mountp, bmap->br_startblock),
6073 ip->i_ino, whichfork,
6075 bmap->br_blockcount,
6078 bi = kmem_alloc(sizeof(struct xfs_bmap_intent), KM_SLEEP | KM_NOFS);
6079 INIT_LIST_HEAD(&bi->bi_list);
6082 bi->bi_whichfork = whichfork;
6083 bi->bi_bmap = *bmap;
6085 xfs_defer_add(tp, XFS_DEFER_OPS_TYPE_BMAP, &bi->bi_list);
6089 /* Map an extent into a file. */
6091 xfs_bmap_map_extent(
6092 struct xfs_trans *tp,
6093 struct xfs_inode *ip,
6094 struct xfs_bmbt_irec *PREV)
6096 if (!xfs_bmap_is_update_needed(PREV))
6099 return __xfs_bmap_add(tp, XFS_BMAP_MAP, ip, XFS_DATA_FORK, PREV);
6102 /* Unmap an extent out of a file. */
6104 xfs_bmap_unmap_extent(
6105 struct xfs_trans *tp,
6106 struct xfs_inode *ip,
6107 struct xfs_bmbt_irec *PREV)
6109 if (!xfs_bmap_is_update_needed(PREV))
6112 return __xfs_bmap_add(tp, XFS_BMAP_UNMAP, ip, XFS_DATA_FORK, PREV);
6116 * Process one of the deferred bmap operations. We pass back the
6117 * btree cursor to maintain our lock on the bmapbt between calls.
6120 xfs_bmap_finish_one(
6121 struct xfs_trans *tp,
6122 struct xfs_inode *ip,
6123 enum xfs_bmap_intent_type type,
6125 xfs_fileoff_t startoff,
6126 xfs_fsblock_t startblock,
6127 xfs_filblks_t *blockcount,
6132 ASSERT(tp->t_firstblock == NULLFSBLOCK);
6134 trace_xfs_bmap_deferred(tp->t_mountp,
6135 XFS_FSB_TO_AGNO(tp->t_mountp, startblock), type,
6136 XFS_FSB_TO_AGBNO(tp->t_mountp, startblock),
6137 ip->i_ino, whichfork, startoff, *blockcount, state);
6139 if (WARN_ON_ONCE(whichfork != XFS_DATA_FORK))
6140 return -EFSCORRUPTED;
6142 if (XFS_TEST_ERROR(false, tp->t_mountp,
6143 XFS_ERRTAG_BMAP_FINISH_ONE))
6148 error = xfs_bmapi_remap(tp, ip, startoff, *blockcount,
6152 case XFS_BMAP_UNMAP:
6153 error = __xfs_bunmapi(tp, ip, startoff, blockcount,
6154 XFS_BMAPI_REMAP, 1);
6158 error = -EFSCORRUPTED;
6164 /* Check that an inode's extent does not have invalid flags or bad ranges. */
6166 xfs_bmap_validate_extent(
6167 struct xfs_inode *ip,
6169 struct xfs_bmbt_irec *irec)
6171 struct xfs_mount *mp = ip->i_mount;
6172 xfs_fsblock_t endfsb;
6175 isrt = XFS_IS_REALTIME_INODE(ip);
6176 endfsb = irec->br_startblock + irec->br_blockcount - 1;
6178 if (!xfs_verify_rtbno(mp, irec->br_startblock))
6179 return __this_address;
6180 if (!xfs_verify_rtbno(mp, endfsb))
6181 return __this_address;
6183 if (!xfs_verify_fsbno(mp, irec->br_startblock))
6184 return __this_address;
6185 if (!xfs_verify_fsbno(mp, endfsb))
6186 return __this_address;
6187 if (XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
6188 XFS_FSB_TO_AGNO(mp, endfsb))
6189 return __this_address;
6191 if (irec->br_state != XFS_EXT_NORM && whichfork != XFS_DATA_FORK)
6192 return __this_address;