1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2012 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_trans.h"
19 #include "xfs_alloc.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_rtalloc.h"
24 #include "xfs_error.h"
25 #include "xfs_quota.h"
26 #include "xfs_trans_space.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_iomap.h"
30 #include "xfs_reflink.h"
32 /* Kernel only BMAP related definitions and functions */
35 * Convert the given file system block to a disk block. We have to treat it
36 * differently based on whether the file is a real time file or not, because the
40 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
42 if (XFS_IS_REALTIME_INODE(ip))
43 return XFS_FSB_TO_BB(ip->i_mount, fsb);
44 return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
48 * Routine to zero an extent on disk allocated to the specific inode.
50 * The VFS functions take a linearised filesystem block offset, so we have to
51 * convert the sparse xfs fsb to the right format first.
52 * VFS types are real funky, too.
57 xfs_fsblock_t start_fsb,
60 struct xfs_mount *mp = ip->i_mount;
61 struct xfs_buftarg *target = xfs_inode_buftarg(ip);
62 xfs_daddr_t sector = xfs_fsb_to_db(ip, start_fsb);
63 sector_t block = XFS_BB_TO_FSBT(mp, sector);
65 return blkdev_issue_zeroout(target->bt_bdev,
66 block << (mp->m_super->s_blocksize_bits - 9),
67 count_fsb << (mp->m_super->s_blocksize_bits - 9),
74 struct xfs_bmalloca *ap) /* bmap alloc argument struct */
76 int error; /* error return value */
77 xfs_mount_t *mp; /* mount point structure */
78 xfs_extlen_t prod = 0; /* product factor for allocators */
79 xfs_extlen_t mod = 0; /* product factor for allocators */
80 xfs_extlen_t ralen = 0; /* realtime allocation length */
81 xfs_extlen_t align; /* minimum allocation alignment */
85 align = xfs_get_extsz_hint(ap->ip);
86 prod = align / mp->m_sb.sb_rextsize;
87 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
89 ap->conv, &ap->offset, &ap->length);
93 ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
96 * If the offset & length are not perfectly aligned
97 * then kill prod, it will just get us in trouble.
99 div_u64_rem(ap->offset, align, &mod);
100 if (mod || ap->length % align)
103 * Set ralen to be the actual requested length in rtextents.
105 ralen = ap->length / mp->m_sb.sb_rextsize;
107 * If the old value was close enough to MAXEXTLEN that
108 * we rounded up to it, cut it back so it's valid again.
109 * Note that if it's a really large request (bigger than
110 * MAXEXTLEN), we don't hear about that number, and can't
111 * adjust the starting point to match it.
113 if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
114 ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
117 * Lock out modifications to both the RT bitmap and summary inodes
119 xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
120 xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
121 xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
122 xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
125 * If it's an allocation to an empty file at offset 0,
126 * pick an extent that will space things out in the rt area.
128 if (ap->eof && ap->offset == 0) {
129 xfs_rtblock_t rtx; /* realtime extent no */
131 error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
134 ap->blkno = rtx * mp->m_sb.sb_rextsize;
139 xfs_bmap_adjacent(ap);
142 * Realtime allocation, done through xfs_rtallocate_extent.
144 do_div(ap->blkno, mp->m_sb.sb_rextsize);
147 error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
148 &ralen, ap->wasdel, prod, &rtb);
153 if (ap->blkno != NULLFSBLOCK) {
154 ap->blkno *= mp->m_sb.sb_rextsize;
155 ralen *= mp->m_sb.sb_rextsize;
157 ap->ip->i_nblocks += ralen;
158 xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
160 ap->ip->i_delayed_blks -= ralen;
162 * Adjust the disk quota also. This was reserved
165 xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
166 ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
167 XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
173 #endif /* CONFIG_XFS_RT */
176 * Extent tree block counting routines.
180 * Count leaf blocks given a range of extent records. Delayed allocation
181 * extents are not counted towards the totals.
184 xfs_bmap_count_leaves(
185 struct xfs_ifork *ifp,
186 xfs_filblks_t *count)
188 struct xfs_iext_cursor icur;
189 struct xfs_bmbt_irec got;
190 xfs_extnum_t numrecs = 0;
192 for_each_xfs_iext(ifp, &icur, &got) {
193 if (!isnullstartblock(got.br_startblock)) {
194 *count += got.br_blockcount;
203 * Count fsblocks of the given fork. Delayed allocation extents are
204 * not counted towards the totals.
207 xfs_bmap_count_blocks(
208 struct xfs_trans *tp,
209 struct xfs_inode *ip,
211 xfs_extnum_t *nextents,
212 xfs_filblks_t *count)
214 struct xfs_mount *mp = ip->i_mount;
215 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
216 struct xfs_btree_cur *cur;
217 xfs_extlen_t btblocks = 0;
226 switch (ifp->if_format) {
227 case XFS_DINODE_FMT_BTREE:
228 error = xfs_iread_extents(tp, ip, whichfork);
232 cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
233 error = xfs_btree_count_blocks(cur, &btblocks);
234 xfs_btree_del_cursor(cur, error);
239 * xfs_btree_count_blocks includes the root block contained in
240 * the inode fork in @btblocks, so subtract one because we're
241 * only interested in allocated disk blocks.
243 *count += btblocks - 1;
246 case XFS_DINODE_FMT_EXTENTS:
247 *nextents = xfs_bmap_count_leaves(ifp, count);
255 xfs_getbmap_report_one(
256 struct xfs_inode *ip,
257 struct getbmapx *bmv,
258 struct kgetbmap *out,
260 struct xfs_bmbt_irec *got)
262 struct kgetbmap *p = out + bmv->bmv_entries;
266 error = xfs_reflink_trim_around_shared(ip, got, &shared);
270 if (isnullstartblock(got->br_startblock) ||
271 got->br_startblock == DELAYSTARTBLOCK) {
273 * Delalloc extents that start beyond EOF can occur due to
274 * speculative EOF allocation when the delalloc extent is larger
275 * than the largest freespace extent at conversion time. These
276 * extents cannot be converted by data writeback, so can exist
277 * here even if we are not supposed to be finding delalloc
280 if (got->br_startoff < XFS_B_TO_FSB(ip->i_mount, XFS_ISIZE(ip)))
281 ASSERT((bmv->bmv_iflags & BMV_IF_DELALLOC) != 0);
283 p->bmv_oflags |= BMV_OF_DELALLOC;
286 p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
289 if (got->br_state == XFS_EXT_UNWRITTEN &&
290 (bmv->bmv_iflags & BMV_IF_PREALLOC))
291 p->bmv_oflags |= BMV_OF_PREALLOC;
294 p->bmv_oflags |= BMV_OF_SHARED;
296 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
297 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
299 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
300 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
306 xfs_getbmap_report_hole(
307 struct xfs_inode *ip,
308 struct getbmapx *bmv,
309 struct kgetbmap *out,
314 struct kgetbmap *p = out + bmv->bmv_entries;
316 if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
320 p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
321 p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
323 bmv->bmv_offset = p->bmv_offset + p->bmv_length;
324 bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
330 struct getbmapx *bmv)
332 return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
336 xfs_getbmap_next_rec(
337 struct xfs_bmbt_irec *rec,
338 xfs_fileoff_t total_end)
340 xfs_fileoff_t end = rec->br_startoff + rec->br_blockcount;
342 if (end == total_end)
345 rec->br_startoff += rec->br_blockcount;
346 if (!isnullstartblock(rec->br_startblock) &&
347 rec->br_startblock != DELAYSTARTBLOCK)
348 rec->br_startblock += rec->br_blockcount;
349 rec->br_blockcount = total_end - end;
354 * Get inode's extents as described in bmv, and format for output.
355 * Calls formatter to fill the user's buffer until all extents
356 * are mapped, until the passed-in bmv->bmv_count slots have
357 * been filled, or until the formatter short-circuits the loop,
358 * if it is tracking filled-in extents on its own.
362 struct xfs_inode *ip,
363 struct getbmapx *bmv, /* user bmap structure */
364 struct kgetbmap *out)
366 struct xfs_mount *mp = ip->i_mount;
367 int iflags = bmv->bmv_iflags;
368 int whichfork, lock, error = 0;
369 int64_t bmv_end, max_len;
370 xfs_fileoff_t bno, first_bno;
371 struct xfs_ifork *ifp;
372 struct xfs_bmbt_irec got, rec;
374 struct xfs_iext_cursor icur;
376 if (bmv->bmv_iflags & ~BMV_IF_VALID)
379 /* Only allow CoW fork queries if we're debugging. */
380 if (iflags & BMV_IF_COWFORK)
383 if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
386 if (bmv->bmv_length < -1)
388 bmv->bmv_entries = 0;
389 if (bmv->bmv_length == 0)
392 if (iflags & BMV_IF_ATTRFORK)
393 whichfork = XFS_ATTR_FORK;
394 else if (iflags & BMV_IF_COWFORK)
395 whichfork = XFS_COW_FORK;
397 whichfork = XFS_DATA_FORK;
398 ifp = XFS_IFORK_PTR(ip, whichfork);
400 xfs_ilock(ip, XFS_IOLOCK_SHARED);
403 if (!XFS_IFORK_Q(ip))
404 goto out_unlock_iolock;
407 lock = xfs_ilock_attr_map_shared(ip);
410 /* No CoW fork? Just return */
412 goto out_unlock_iolock;
414 if (xfs_get_cowextsz_hint(ip))
415 max_len = mp->m_super->s_maxbytes;
417 max_len = XFS_ISIZE(ip);
419 lock = XFS_ILOCK_SHARED;
423 if (!(iflags & BMV_IF_DELALLOC) &&
424 (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
425 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
427 goto out_unlock_iolock;
430 * Even after flushing the inode, there can still be
431 * delalloc blocks on the inode beyond EOF due to
432 * speculative preallocation. These are not removed
433 * until the release function is called or the inode
434 * is inactivated. Hence we cannot assert here that
435 * ip->i_delayed_blks == 0.
439 if (xfs_get_extsz_hint(ip) ||
441 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))
442 max_len = mp->m_super->s_maxbytes;
444 max_len = XFS_ISIZE(ip);
446 lock = xfs_ilock_data_map_shared(ip);
450 switch (ifp->if_format) {
451 case XFS_DINODE_FMT_EXTENTS:
452 case XFS_DINODE_FMT_BTREE:
454 case XFS_DINODE_FMT_LOCAL:
455 /* Local format inode forks report no extents. */
456 goto out_unlock_ilock;
459 goto out_unlock_ilock;
462 if (bmv->bmv_length == -1) {
463 max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
464 bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
467 bmv_end = bmv->bmv_offset + bmv->bmv_length;
469 first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
470 len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
472 error = xfs_iread_extents(NULL, ip, whichfork);
474 goto out_unlock_ilock;
476 if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
478 * Report a whole-file hole if the delalloc flag is set to
479 * stay compatible with the old implementation.
481 if (iflags & BMV_IF_DELALLOC)
482 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
483 XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
484 goto out_unlock_ilock;
487 while (!xfs_getbmap_full(bmv)) {
488 xfs_trim_extent(&got, first_bno, len);
491 * Report an entry for a hole if this extent doesn't directly
492 * follow the previous one.
494 if (got.br_startoff > bno) {
495 xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
497 if (xfs_getbmap_full(bmv))
502 * In order to report shared extents accurately, we report each
503 * distinct shared / unshared part of a single bmbt record with
504 * an individual getbmapx record.
506 bno = got.br_startoff + got.br_blockcount;
509 error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
511 if (error || xfs_getbmap_full(bmv))
512 goto out_unlock_ilock;
513 } while (xfs_getbmap_next_rec(&rec, bno));
515 if (!xfs_iext_next_extent(ifp, &icur, &got)) {
516 xfs_fileoff_t end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
518 out[bmv->bmv_entries - 1].bmv_oflags |= BMV_OF_LAST;
520 if (whichfork != XFS_ATTR_FORK && bno < end &&
521 !xfs_getbmap_full(bmv)) {
522 xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
528 if (bno >= first_bno + len)
533 xfs_iunlock(ip, lock);
535 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
540 * Dead simple method of punching delalyed allocation blocks from a range in
541 * the inode. This will always punch out both the start and end blocks, even
542 * if the ranges only partially overlap them, so it is up to the caller to
543 * ensure that partial blocks are not passed in.
546 xfs_bmap_punch_delalloc_range(
547 struct xfs_inode *ip,
548 xfs_fileoff_t start_fsb,
549 xfs_fileoff_t length)
551 struct xfs_ifork *ifp = &ip->i_df;
552 xfs_fileoff_t end_fsb = start_fsb + length;
553 struct xfs_bmbt_irec got, del;
554 struct xfs_iext_cursor icur;
557 ASSERT(!xfs_need_iread_extents(ifp));
559 xfs_ilock(ip, XFS_ILOCK_EXCL);
560 if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
563 while (got.br_startoff + got.br_blockcount > start_fsb) {
565 xfs_trim_extent(&del, start_fsb, length);
568 * A delete can push the cursor forward. Step back to the
569 * previous extent on non-delalloc or extents outside the
572 if (!del.br_blockcount ||
573 !isnullstartblock(del.br_startblock)) {
574 if (!xfs_iext_prev_extent(ifp, &icur, &got))
579 error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
581 if (error || !xfs_iext_get_extent(ifp, &icur, &got))
586 xfs_iunlock(ip, XFS_ILOCK_EXCL);
591 * Test whether it is appropriate to check an inode for and free post EOF
592 * blocks. The 'force' parameter determines whether we should also consider
593 * regular files that are marked preallocated or append-only.
596 xfs_can_free_eofblocks(
597 struct xfs_inode *ip,
600 struct xfs_bmbt_irec imap;
601 struct xfs_mount *mp = ip->i_mount;
602 xfs_fileoff_t end_fsb;
603 xfs_fileoff_t last_fsb;
608 * Caller must either hold the exclusive io lock; or be inactivating
609 * the inode, which guarantees there are no other users of the inode.
611 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL) ||
612 (VFS_I(ip)->i_state & I_FREEING));
614 /* prealloc/delalloc exists only on regular files */
615 if (!S_ISREG(VFS_I(ip)->i_mode))
619 * Zero sized files with no cached pages and delalloc blocks will not
620 * have speculative prealloc/delalloc blocks to remove.
622 if (VFS_I(ip)->i_size == 0 &&
623 VFS_I(ip)->i_mapping->nrpages == 0 &&
624 ip->i_delayed_blks == 0)
627 /* If we haven't read in the extent list, then don't do it now. */
628 if (xfs_need_iread_extents(&ip->i_df))
632 * Do not free real preallocated or append-only files unless the file
633 * has delalloc blocks and we are forced to remove them.
635 if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
636 if (!force || ip->i_delayed_blks == 0)
640 * Do not try to free post-EOF blocks if EOF is beyond the end of the
641 * range supported by the page cache, because the truncation will loop
644 end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
645 last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
646 if (last_fsb <= end_fsb)
650 * Look up the mapping for the first block past EOF. If we can't find
651 * it, there's nothing to free.
653 xfs_ilock(ip, XFS_ILOCK_SHARED);
654 error = xfs_bmapi_read(ip, end_fsb, last_fsb - end_fsb, &imap, &nimaps,
656 xfs_iunlock(ip, XFS_ILOCK_SHARED);
657 if (error || nimaps == 0)
661 * If there's a real mapping there or there are delayed allocation
662 * reservations, then we have post-EOF blocks to try to free.
664 return imap.br_startblock != HOLESTARTBLOCK || ip->i_delayed_blks;
668 * This is called to free any blocks beyond eof. The caller must hold
669 * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
670 * reference to the inode.
674 struct xfs_inode *ip)
676 struct xfs_trans *tp;
677 struct xfs_mount *mp = ip->i_mount;
680 /* Attach the dquots to the inode up front. */
681 error = xfs_qm_dqattach(ip);
685 /* Wait on dio to ensure i_size has settled. */
686 inode_dio_wait(VFS_I(ip));
688 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
690 ASSERT(XFS_FORCED_SHUTDOWN(mp));
694 xfs_ilock(ip, XFS_ILOCK_EXCL);
695 xfs_trans_ijoin(tp, ip, 0);
698 * Do not update the on-disk file size. If we update the on-disk file
699 * size and then the system crashes before the contents of the file are
700 * flushed to disk then the files may be full of holes (ie NULL files
703 error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
704 XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
708 error = xfs_trans_commit(tp);
712 xfs_inode_clear_eofblocks_tag(ip);
717 * If we get an error at this point we simply don't
718 * bother truncating the file.
720 xfs_trans_cancel(tp);
722 xfs_iunlock(ip, XFS_ILOCK_EXCL);
727 xfs_alloc_file_space(
728 struct xfs_inode *ip,
733 xfs_mount_t *mp = ip->i_mount;
735 xfs_filblks_t allocated_fsb;
736 xfs_filblks_t allocatesize_fsb;
737 xfs_extlen_t extsz, temp;
738 xfs_fileoff_t startoffset_fsb;
739 xfs_fileoff_t endoffset_fsb;
743 xfs_bmbt_irec_t imaps[1], *imapp;
746 trace_xfs_alloc_file_space(ip);
748 if (XFS_FORCED_SHUTDOWN(mp))
751 error = xfs_qm_dqattach(ip);
758 rt = XFS_IS_REALTIME_INODE(ip);
759 extsz = xfs_get_extsz_hint(ip);
764 startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
765 endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
766 allocatesize_fsb = endoffset_fsb - startoffset_fsb;
769 * Allocate file space until done or until there is an error
771 while (allocatesize_fsb && !error) {
773 unsigned int dblocks, rblocks, resblks;
776 * Determine space reservations for data/realtime.
778 if (unlikely(extsz)) {
782 e = startoffset_fsb + allocatesize_fsb;
783 div_u64_rem(startoffset_fsb, extsz, &temp);
786 div_u64_rem(e, extsz, &temp);
791 e = allocatesize_fsb;
795 * The transaction reservation is limited to a 32-bit block
796 * count, hence we need to limit the number of blocks we are
797 * trying to reserve to avoid an overflow. We can't allocate
798 * more than @nimaps extents, and an extent is limited on disk
799 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
801 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
803 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
806 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
811 * Allocate and setup the transaction.
813 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
814 dblocks, rblocks, false, &tp);
818 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
819 XFS_IEXT_ADD_NOSPLIT_CNT);
823 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
824 allocatesize_fsb, alloc_type, 0, imapp,
830 * Complete the transaction
832 error = xfs_trans_commit(tp);
833 xfs_iunlock(ip, XFS_ILOCK_EXCL);
837 allocated_fsb = imapp->br_blockcount;
844 startoffset_fsb += allocated_fsb;
845 allocatesize_fsb -= allocated_fsb;
851 xfs_trans_cancel(tp);
852 xfs_iunlock(ip, XFS_ILOCK_EXCL);
858 struct xfs_inode *ip,
859 xfs_fileoff_t startoffset_fsb,
860 xfs_filblks_t len_fsb,
863 struct xfs_mount *mp = ip->i_mount;
864 struct xfs_trans *tp;
865 uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
868 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
873 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
874 XFS_IEXT_PUNCH_HOLE_CNT);
876 goto out_trans_cancel;
878 error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
880 goto out_trans_cancel;
882 error = xfs_trans_commit(tp);
884 xfs_iunlock(ip, XFS_ILOCK_EXCL);
888 xfs_trans_cancel(tp);
892 /* Caller must first wait for the completion of any pending DIOs if required. */
894 xfs_flush_unmap_range(
895 struct xfs_inode *ip,
899 struct xfs_mount *mp = ip->i_mount;
900 struct inode *inode = VFS_I(ip);
901 xfs_off_t rounding, start, end;
904 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
905 start = round_down(offset, rounding);
906 end = round_up(offset + len, rounding) - 1;
908 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
911 truncate_pagecache_range(inode, start, end);
917 struct xfs_inode *ip,
921 struct xfs_mount *mp = ip->i_mount;
922 xfs_fileoff_t startoffset_fsb;
923 xfs_fileoff_t endoffset_fsb;
926 trace_xfs_free_file_space(ip);
928 error = xfs_qm_dqattach(ip);
932 if (len <= 0) /* if nothing being freed */
935 startoffset_fsb = XFS_B_TO_FSB(mp, offset);
936 endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
938 /* We can only free complete realtime extents. */
939 if (XFS_IS_REALTIME_INODE(ip) && mp->m_sb.sb_rextsize > 1) {
940 startoffset_fsb = roundup_64(startoffset_fsb,
941 mp->m_sb.sb_rextsize);
942 endoffset_fsb = rounddown_64(endoffset_fsb,
943 mp->m_sb.sb_rextsize);
947 * Need to zero the stuff we're not freeing, on disk.
949 if (endoffset_fsb > startoffset_fsb) {
951 error = xfs_unmap_extent(ip, startoffset_fsb,
952 endoffset_fsb - startoffset_fsb, &done);
959 * Now that we've unmap all full blocks we'll have to zero out any
960 * partial block at the beginning and/or end. iomap_zero_range is smart
961 * enough to skip any holes, including those we just created, but we
962 * must take care not to zero beyond EOF and enlarge i_size.
964 if (offset >= XFS_ISIZE(ip))
966 if (offset + len > XFS_ISIZE(ip))
967 len = XFS_ISIZE(ip) - offset;
968 error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
969 &xfs_buffered_write_iomap_ops);
974 * If we zeroed right up to EOF and EOF straddles a page boundary we
975 * must make sure that the post-EOF area is also zeroed because the
976 * page could be mmap'd and iomap_zero_range doesn't do that for us.
977 * Writeback of the eof page will do this, albeit clumsily.
979 if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
980 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
981 round_down(offset + len, PAGE_SIZE), LLONG_MAX);
989 struct xfs_inode *ip,
992 struct xfs_mount *mp = ip->i_mount;
996 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
997 * into the accessible region of the file.
999 if (xfs_can_free_eofblocks(ip, true)) {
1000 error = xfs_free_eofblocks(ip);
1006 * Shift operations must stabilize the start block offset boundary along
1007 * with the full range of the operation. If we don't, a COW writeback
1008 * completion could race with an insert, front merge with the start
1009 * extent (after split) during the shift and corrupt the file. Start
1010 * with the block just prior to the start to stabilize the boundary.
1012 offset = round_down(offset, 1 << mp->m_sb.sb_blocklog);
1014 offset -= (1 << mp->m_sb.sb_blocklog);
1017 * Writeback and invalidate cache for the remainder of the file as we're
1018 * about to shift down every extent from offset to EOF.
1020 error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
1025 * Clean out anything hanging around in the cow fork now that
1026 * we've flushed all the dirty data out to disk to avoid having
1027 * CoW extents at the wrong offsets.
1029 if (xfs_inode_has_cow_data(ip)) {
1030 error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
1040 * xfs_collapse_file_space()
1041 * This routine frees disk space and shift extent for the given file.
1042 * The first thing we do is to free data blocks in the specified range
1043 * by calling xfs_free_file_space(). It would also sync dirty data
1044 * and invalidate page cache over the region on which collapse range
1045 * is working. And Shift extent records to the left to cover a hole.
1052 xfs_collapse_file_space(
1053 struct xfs_inode *ip,
1057 struct xfs_mount *mp = ip->i_mount;
1058 struct xfs_trans *tp;
1060 xfs_fileoff_t next_fsb = XFS_B_TO_FSB(mp, offset + len);
1061 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1064 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1065 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1067 trace_xfs_collapse_file_space(ip);
1069 error = xfs_free_file_space(ip, offset, len);
1073 error = xfs_prepare_shift(ip, offset);
1077 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1081 xfs_ilock(ip, XFS_ILOCK_EXCL);
1082 xfs_trans_ijoin(tp, ip, 0);
1085 error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1088 goto out_trans_cancel;
1092 /* finish any deferred frees and roll the transaction */
1093 error = xfs_defer_finish(&tp);
1095 goto out_trans_cancel;
1098 error = xfs_trans_commit(tp);
1099 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1103 xfs_trans_cancel(tp);
1104 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1109 * xfs_insert_file_space()
1110 * This routine create hole space by shifting extents for the given file.
1111 * The first thing we do is to sync dirty data and invalidate page cache
1112 * over the region on which insert range is working. And split an extent
1113 * to two extents at given offset by calling xfs_bmap_split_extent.
1114 * And shift all extent records which are laying between [offset,
1115 * last allocated extent] to the right to reserve hole range.
1121 xfs_insert_file_space(
1122 struct xfs_inode *ip,
1126 struct xfs_mount *mp = ip->i_mount;
1127 struct xfs_trans *tp;
1129 xfs_fileoff_t stop_fsb = XFS_B_TO_FSB(mp, offset);
1130 xfs_fileoff_t next_fsb = NULLFSBLOCK;
1131 xfs_fileoff_t shift_fsb = XFS_B_TO_FSB(mp, len);
1134 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1135 ASSERT(xfs_isilocked(ip, XFS_MMAPLOCK_EXCL));
1137 trace_xfs_insert_file_space(ip);
1139 error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1143 error = xfs_prepare_shift(ip, offset);
1147 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1148 XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1152 xfs_ilock(ip, XFS_ILOCK_EXCL);
1153 xfs_trans_ijoin(tp, ip, 0);
1155 error = xfs_iext_count_may_overflow(ip, XFS_DATA_FORK,
1156 XFS_IEXT_PUNCH_HOLE_CNT);
1158 goto out_trans_cancel;
1161 * The extent shifting code works on extent granularity. So, if stop_fsb
1162 * is not the starting block of extent, we need to split the extent at
1165 error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1167 goto out_trans_cancel;
1170 error = xfs_defer_finish(&tp);
1172 goto out_trans_cancel;
1174 error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1177 goto out_trans_cancel;
1180 error = xfs_trans_commit(tp);
1181 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1185 xfs_trans_cancel(tp);
1186 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1191 * We need to check that the format of the data fork in the temporary inode is
1192 * valid for the target inode before doing the swap. This is not a problem with
1193 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1194 * data fork depending on the space the attribute fork is taking so we can get
1195 * invalid formats on the target inode.
1197 * E.g. target has space for 7 extents in extent format, temp inode only has
1198 * space for 6. If we defragment down to 7 extents, then the tmp format is a
1199 * btree, but when swapped it needs to be in extent format. Hence we can't just
1200 * blindly swap data forks on attr2 filesystems.
1202 * Note that we check the swap in both directions so that we don't end up with
1203 * a corrupt temporary inode, either.
1205 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1206 * inode will prevent this situation from occurring, so all we do here is
1207 * reject and log the attempt. basically we are putting the responsibility on
1208 * userspace to get this right.
1211 xfs_swap_extents_check_format(
1212 struct xfs_inode *ip, /* target inode */
1213 struct xfs_inode *tip) /* tmp inode */
1215 struct xfs_ifork *ifp = &ip->i_df;
1216 struct xfs_ifork *tifp = &tip->i_df;
1218 /* User/group/project quota ids must match if quotas are enforced. */
1219 if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1220 (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1221 !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1222 ip->i_projid != tip->i_projid))
1225 /* Should never get a local format */
1226 if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1227 tifp->if_format == XFS_DINODE_FMT_LOCAL)
1231 * if the target inode has less extents that then temporary inode then
1232 * why did userspace call us?
1234 if (ifp->if_nextents < tifp->if_nextents)
1238 * If we have to use the (expensive) rmap swap method, we can
1239 * handle any number of extents and any format.
1241 if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1245 * if the target inode is in extent form and the temp inode is in btree
1246 * form then we will end up with the target inode in the wrong format
1247 * as we already know there are less extents in the temp inode.
1249 if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1250 tifp->if_format == XFS_DINODE_FMT_BTREE)
1253 /* Check temp in extent form to max in target */
1254 if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1255 tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1258 /* Check target in extent form to max in temp */
1259 if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1260 ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1264 * If we are in a btree format, check that the temp root block will fit
1265 * in the target and that it has enough extents to be in btree format
1268 * Note that we have to be careful to allow btree->extent conversions
1269 * (a common defrag case) which will occur when the temp inode is in
1272 if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1273 if (XFS_IFORK_Q(ip) &&
1274 XFS_BMAP_BMDR_SPACE(tifp->if_broot) > XFS_IFORK_BOFF(ip))
1276 if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1280 /* Reciprocal target->temp btree format checks */
1281 if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1282 if (XFS_IFORK_Q(tip) &&
1283 XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1285 if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1293 xfs_swap_extent_flush(
1294 struct xfs_inode *ip)
1298 error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1301 truncate_pagecache_range(VFS_I(ip), 0, -1);
1303 /* Verify O_DIRECT for ftmp */
1304 if (VFS_I(ip)->i_mapping->nrpages)
1310 * Move extents from one file to another, when rmap is enabled.
1313 xfs_swap_extent_rmap(
1314 struct xfs_trans **tpp,
1315 struct xfs_inode *ip,
1316 struct xfs_inode *tip)
1318 struct xfs_trans *tp = *tpp;
1319 struct xfs_bmbt_irec irec;
1320 struct xfs_bmbt_irec uirec;
1321 struct xfs_bmbt_irec tirec;
1322 xfs_fileoff_t offset_fsb;
1323 xfs_fileoff_t end_fsb;
1324 xfs_filblks_t count_fsb;
1329 uint64_t tip_flags2;
1332 * If the source file has shared blocks, we must flag the donor
1333 * file as having shared blocks so that we get the shared-block
1334 * rmap functions when we go to fix up the rmaps. The flags
1335 * will be switch for reals later.
1337 tip_flags2 = tip->i_diflags2;
1338 if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1339 tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1342 end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1343 count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1346 /* Read extent from the donor file */
1348 error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1352 ASSERT(nimaps == 1);
1353 ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1355 trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1356 ilen = tirec.br_blockcount;
1358 /* Unmap the old blocks in the source file. */
1359 while (tirec.br_blockcount) {
1360 ASSERT(tp->t_firstblock == NULLFSBLOCK);
1361 trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1363 /* Read extent from the source file */
1365 error = xfs_bmapi_read(ip, tirec.br_startoff,
1366 tirec.br_blockcount, &irec,
1370 ASSERT(nimaps == 1);
1371 ASSERT(tirec.br_startoff == irec.br_startoff);
1372 trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1374 /* Trim the extent. */
1376 uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1377 tirec.br_blockcount,
1378 irec.br_blockcount);
1379 trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1381 if (xfs_bmap_is_real_extent(&uirec)) {
1382 error = xfs_iext_count_may_overflow(ip,
1384 XFS_IEXT_SWAP_RMAP_CNT);
1389 if (xfs_bmap_is_real_extent(&irec)) {
1390 error = xfs_iext_count_may_overflow(tip,
1392 XFS_IEXT_SWAP_RMAP_CNT);
1397 /* Remove the mapping from the donor file. */
1398 xfs_bmap_unmap_extent(tp, tip, &uirec);
1400 /* Remove the mapping from the source file. */
1401 xfs_bmap_unmap_extent(tp, ip, &irec);
1403 /* Map the donor file's blocks into the source file. */
1404 xfs_bmap_map_extent(tp, ip, &uirec);
1406 /* Map the source file's blocks into the donor file. */
1407 xfs_bmap_map_extent(tp, tip, &irec);
1409 error = xfs_defer_finish(tpp);
1414 tirec.br_startoff += rlen;
1415 if (tirec.br_startblock != HOLESTARTBLOCK &&
1416 tirec.br_startblock != DELAYSTARTBLOCK)
1417 tirec.br_startblock += rlen;
1418 tirec.br_blockcount -= rlen;
1426 tip->i_diflags2 = tip_flags2;
1430 trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1431 tip->i_diflags2 = tip_flags2;
1435 /* Swap the extents of two files by swapping data forks. */
1437 xfs_swap_extent_forks(
1438 struct xfs_trans *tp,
1439 struct xfs_inode *ip,
1440 struct xfs_inode *tip,
1442 int *target_log_flags)
1444 xfs_filblks_t aforkblks = 0;
1445 xfs_filblks_t taforkblks = 0;
1451 * Count the number of extended attribute blocks
1453 if (XFS_IFORK_Q(ip) && ip->i_afp->if_nextents > 0 &&
1454 ip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1455 error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1460 if (XFS_IFORK_Q(tip) && tip->i_afp->if_nextents > 0 &&
1461 tip->i_afp->if_format != XFS_DINODE_FMT_LOCAL) {
1462 error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1469 * Btree format (v3) inodes have the inode number stamped in the bmbt
1470 * block headers. We can't start changing the bmbt blocks until the
1471 * inode owner change is logged so recovery does the right thing in the
1472 * event of a crash. Set the owner change log flags now and leave the
1473 * bmbt scan as the last step.
1475 if (xfs_sb_version_has_v3inode(&ip->i_mount->m_sb)) {
1476 if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1477 (*target_log_flags) |= XFS_ILOG_DOWNER;
1478 if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1479 (*src_log_flags) |= XFS_ILOG_DOWNER;
1483 * Swap the data forks of the inodes
1485 swap(ip->i_df, tip->i_df);
1488 * Fix the on-disk inode values
1490 tmp = (uint64_t)ip->i_nblocks;
1491 ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1492 tip->i_nblocks = tmp + taforkblks - aforkblks;
1495 * The extents in the source inode could still contain speculative
1496 * preallocation beyond EOF (e.g. the file is open but not modified
1497 * while defrag is in progress). In that case, we need to copy over the
1498 * number of delalloc blocks the data fork in the source inode is
1499 * tracking beyond EOF so that when the fork is truncated away when the
1500 * temporary inode is unlinked we don't underrun the i_delayed_blks
1501 * counter on that inode.
1503 ASSERT(tip->i_delayed_blks == 0);
1504 tip->i_delayed_blks = ip->i_delayed_blks;
1505 ip->i_delayed_blks = 0;
1507 switch (ip->i_df.if_format) {
1508 case XFS_DINODE_FMT_EXTENTS:
1509 (*src_log_flags) |= XFS_ILOG_DEXT;
1511 case XFS_DINODE_FMT_BTREE:
1512 ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
1513 (*src_log_flags & XFS_ILOG_DOWNER));
1514 (*src_log_flags) |= XFS_ILOG_DBROOT;
1518 switch (tip->i_df.if_format) {
1519 case XFS_DINODE_FMT_EXTENTS:
1520 (*target_log_flags) |= XFS_ILOG_DEXT;
1522 case XFS_DINODE_FMT_BTREE:
1523 (*target_log_flags) |= XFS_ILOG_DBROOT;
1524 ASSERT(!xfs_sb_version_has_v3inode(&ip->i_mount->m_sb) ||
1525 (*target_log_flags & XFS_ILOG_DOWNER));
1533 * Fix up the owners of the bmbt blocks to refer to the current inode. The
1534 * change owner scan attempts to order all modified buffers in the current
1535 * transaction. In the event of ordered buffer failure, the offending buffer is
1536 * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1537 * the transaction in this case to replenish the fallback log reservation and
1538 * restart the scan. This process repeats until the scan completes.
1541 xfs_swap_change_owner(
1542 struct xfs_trans **tpp,
1543 struct xfs_inode *ip,
1544 struct xfs_inode *tmpip)
1547 struct xfs_trans *tp = *tpp;
1550 error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1552 /* success or fatal error */
1553 if (error != -EAGAIN)
1556 error = xfs_trans_roll(tpp);
1562 * Redirty both inodes so they can relog and keep the log tail
1565 xfs_trans_ijoin(tp, ip, 0);
1566 xfs_trans_ijoin(tp, tmpip, 0);
1567 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1568 xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1576 struct xfs_inode *ip, /* target inode */
1577 struct xfs_inode *tip, /* tmp inode */
1578 struct xfs_swapext *sxp)
1580 struct xfs_mount *mp = ip->i_mount;
1581 struct xfs_trans *tp;
1582 struct xfs_bstat *sbp = &sxp->sx_stat;
1583 int src_log_flags, target_log_flags;
1588 unsigned int flags = 0;
1591 * Lock the inodes against other IO, page faults and truncate to
1592 * begin with. Then we can ensure the inodes are flushed and have no
1593 * page cache safely. Once we have done this we can take the ilocks and
1594 * do the rest of the checks.
1596 lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1597 lock_flags = XFS_MMAPLOCK_EXCL;
1598 xfs_lock_two_inodes(ip, XFS_MMAPLOCK_EXCL, tip, XFS_MMAPLOCK_EXCL);
1600 /* Verify that both files have the same format */
1601 if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1606 /* Verify both files are either real-time or non-realtime */
1607 if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1612 error = xfs_qm_dqattach(ip);
1616 error = xfs_qm_dqattach(tip);
1620 error = xfs_swap_extent_flush(ip);
1623 error = xfs_swap_extent_flush(tip);
1627 if (xfs_inode_has_cow_data(tip)) {
1628 error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1634 * Extent "swapping" with rmap requires a permanent reservation and
1635 * a block reservation because it's really just a remap operation
1636 * performed with log redo items!
1638 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
1639 int w = XFS_DATA_FORK;
1640 uint32_t ipnext = ip->i_df.if_nextents;
1641 uint32_t tipnext = tip->i_df.if_nextents;
1644 * Conceptually this shouldn't affect the shape of either bmbt,
1645 * but since we atomically move extents one by one, we reserve
1646 * enough space to rebuild both trees.
1648 resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1649 resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1652 * If either inode straddles a bmapbt block allocation boundary,
1653 * the rmapbt algorithm triggers repeated allocs and frees as
1654 * extents are remapped. This can exhaust the block reservation
1655 * prematurely and cause shutdown. Return freed blocks to the
1656 * transaction reservation to counter this behavior.
1658 flags |= XFS_TRANS_RES_FDBLKS;
1660 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1666 * Lock and join the inodes to the tansaction so that transaction commit
1667 * or cancel will unlock the inodes from this point onwards.
1669 xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1670 lock_flags |= XFS_ILOCK_EXCL;
1671 xfs_trans_ijoin(tp, ip, 0);
1672 xfs_trans_ijoin(tp, tip, 0);
1675 /* Verify all data are being swapped */
1676 if (sxp->sx_offset != 0 ||
1677 sxp->sx_length != ip->i_disk_size ||
1678 sxp->sx_length != tip->i_disk_size) {
1680 goto out_trans_cancel;
1683 trace_xfs_swap_extent_before(ip, 0);
1684 trace_xfs_swap_extent_before(tip, 1);
1686 /* check inode formats now that data is flushed */
1687 error = xfs_swap_extents_check_format(ip, tip);
1690 "%s: inode 0x%llx format is incompatible for exchanging.",
1691 __func__, ip->i_ino);
1692 goto out_trans_cancel;
1696 * Compare the current change & modify times with that
1697 * passed in. If they differ, we abort this swap.
1698 * This is the mechanism used to ensure the calling
1699 * process that the file was not changed out from
1702 if ((sbp->bs_ctime.tv_sec != VFS_I(ip)->i_ctime.tv_sec) ||
1703 (sbp->bs_ctime.tv_nsec != VFS_I(ip)->i_ctime.tv_nsec) ||
1704 (sbp->bs_mtime.tv_sec != VFS_I(ip)->i_mtime.tv_sec) ||
1705 (sbp->bs_mtime.tv_nsec != VFS_I(ip)->i_mtime.tv_nsec)) {
1707 goto out_trans_cancel;
1711 * Note the trickiness in setting the log flags - we set the owner log
1712 * flag on the opposite inode (i.e. the inode we are setting the new
1713 * owner to be) because once we swap the forks and log that, log
1714 * recovery is going to see the fork as owned by the swapped inode,
1715 * not the pre-swapped inodes.
1717 src_log_flags = XFS_ILOG_CORE;
1718 target_log_flags = XFS_ILOG_CORE;
1720 if (xfs_sb_version_hasrmapbt(&mp->m_sb))
1721 error = xfs_swap_extent_rmap(&tp, ip, tip);
1723 error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1726 goto out_trans_cancel;
1728 /* Do we have to swap reflink flags? */
1729 if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1730 (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1731 f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1732 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1733 ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1734 tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1735 tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1738 /* Swap the cow forks. */
1739 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
1740 ASSERT(!ip->i_cowfp ||
1741 ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1742 ASSERT(!tip->i_cowfp ||
1743 tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1745 swap(ip->i_cowfp, tip->i_cowfp);
1747 if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1748 xfs_inode_set_cowblocks_tag(ip);
1750 xfs_inode_clear_cowblocks_tag(ip);
1751 if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1752 xfs_inode_set_cowblocks_tag(tip);
1754 xfs_inode_clear_cowblocks_tag(tip);
1757 xfs_trans_log_inode(tp, ip, src_log_flags);
1758 xfs_trans_log_inode(tp, tip, target_log_flags);
1761 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1762 * have inode number owner values in the bmbt blocks that still refer to
1763 * the old inode. Scan each bmbt to fix up the owner values with the
1764 * inode number of the current inode.
1766 if (src_log_flags & XFS_ILOG_DOWNER) {
1767 error = xfs_swap_change_owner(&tp, ip, tip);
1769 goto out_trans_cancel;
1771 if (target_log_flags & XFS_ILOG_DOWNER) {
1772 error = xfs_swap_change_owner(&tp, tip, ip);
1774 goto out_trans_cancel;
1778 * If this is a synchronous mount, make sure that the
1779 * transaction goes to disk before returning to the user.
1781 if (mp->m_flags & XFS_MOUNT_WSYNC)
1782 xfs_trans_set_sync(tp);
1784 error = xfs_trans_commit(tp);
1786 trace_xfs_swap_extent_after(ip, 0);
1787 trace_xfs_swap_extent_after(tip, 1);
1790 xfs_iunlock(ip, lock_flags);
1791 xfs_iunlock(tip, lock_flags);
1792 unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1796 xfs_trans_cancel(tp);