1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_inode.h"
15 #include "xfs_errortag.h"
16 #include "xfs_error.h"
17 #include "xfs_cksum.h"
18 #include "xfs_icache.h"
19 #include "xfs_trans.h"
20 #include "xfs_ialloc.h"
23 #include <linux/iversion.h>
26 * Check that none of the inode's in the buffer have a next
27 * unlinked field of 0.
39 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
41 for (i = 0; i < j; i++) {
42 dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
43 if (!dip->di_next_unlinked) {
45 "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
46 i, (long long)bp->b_bn);
53 xfs_dinode_good_version(
57 if (xfs_sb_version_hascrc(&mp->m_sb))
60 return version == 1 || version == 2;
64 * If we are doing readahead on an inode buffer, we might be in log recovery
65 * reading an inode allocation buffer that hasn't yet been replayed, and hence
66 * has not had the inode cores stamped into it. Hence for readahead, the buffer
67 * may be potentially invalid.
69 * If the readahead buffer is invalid, we need to mark it with an error and
70 * clear the DONE status of the buffer so that a followup read will re-read it
71 * from disk. We don't report the error otherwise to avoid warnings during log
72 * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
73 * because all we want to do is say readahead failed; there is no-one to report
74 * the error to, so this will distinguish it from a non-ra verifier failure.
75 * Changes to this readahead error behavour also need to be reflected in
76 * xfs_dquot_buf_readahead_verify().
83 struct xfs_mount *mp = bp->b_target->bt_mount;
89 * Validate the magic number and version of every inode in the buffer
91 agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
92 ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
93 for (i = 0; i < ni; i++) {
96 xfs_agino_t unlinked_ino;
98 dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
99 unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
100 di_ok = xfs_verify_magic16(bp, dip->di_magic) &&
101 xfs_dinode_good_version(mp, dip->di_version) &&
102 xfs_verify_agino_or_null(mp, agno, unlinked_ino);
103 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
104 XFS_ERRTAG_ITOBP_INOTOBP))) {
106 bp->b_flags &= ~XBF_DONE;
107 xfs_buf_ioerror(bp, -EIO);
113 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
114 (unsigned long long)bp->b_bn, i,
115 be16_to_cpu(dip->di_magic));
117 xfs_buf_verifier_error(bp, -EFSCORRUPTED,
118 __func__, dip, sizeof(*dip),
127 xfs_inode_buf_read_verify(
130 xfs_inode_buf_verify(bp, false);
134 xfs_inode_buf_readahead_verify(
137 xfs_inode_buf_verify(bp, true);
141 xfs_inode_buf_write_verify(
144 xfs_inode_buf_verify(bp, false);
147 const struct xfs_buf_ops xfs_inode_buf_ops = {
149 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
150 cpu_to_be16(XFS_DINODE_MAGIC) },
151 .verify_read = xfs_inode_buf_read_verify,
152 .verify_write = xfs_inode_buf_write_verify,
155 const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
156 .name = "xfs_inode_ra",
157 .magic16 = { cpu_to_be16(XFS_DINODE_MAGIC),
158 cpu_to_be16(XFS_DINODE_MAGIC) },
159 .verify_read = xfs_inode_buf_readahead_verify,
160 .verify_write = xfs_inode_buf_write_verify,
165 * This routine is called to map an inode to the buffer containing the on-disk
166 * version of the inode. It returns a pointer to the buffer containing the
167 * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
168 * pointer to the on-disk inode within that buffer.
170 * If a non-zero error is returned, then the contents of bpp and dipp are
175 struct xfs_mount *mp,
176 struct xfs_trans *tp,
177 struct xfs_imap *imap,
178 struct xfs_dinode **dipp,
179 struct xfs_buf **bpp,
186 buf_flags |= XBF_UNMAPPED;
187 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
188 (int)imap->im_len, buf_flags, &bp,
191 if (error == -EAGAIN) {
192 ASSERT(buf_flags & XBF_TRYLOCK);
195 xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
201 *dipp = xfs_buf_offset(bp, imap->im_boffset);
207 struct xfs_inode *ip,
208 struct xfs_dinode *from)
210 struct xfs_icdinode *to = &ip->i_d;
211 struct inode *inode = VFS_I(ip);
215 * Convert v1 inodes immediately to v2 inode format as this is the
216 * minimum inode version format we support in the rest of the code.
218 to->di_version = from->di_version;
219 if (to->di_version == 1) {
220 set_nlink(inode, be16_to_cpu(from->di_onlink));
221 to->di_projid_lo = 0;
222 to->di_projid_hi = 0;
225 set_nlink(inode, be32_to_cpu(from->di_nlink));
226 to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
227 to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
230 to->di_format = from->di_format;
231 to->di_uid = be32_to_cpu(from->di_uid);
232 to->di_gid = be32_to_cpu(from->di_gid);
233 to->di_flushiter = be16_to_cpu(from->di_flushiter);
236 * Time is signed, so need to convert to signed 32 bit before
237 * storing in inode timestamp which may be 64 bit. Otherwise
238 * a time before epoch is converted to a time long after epoch
241 inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
242 inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
243 inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
244 inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
245 inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
246 inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
247 inode->i_generation = be32_to_cpu(from->di_gen);
248 inode->i_mode = be16_to_cpu(from->di_mode);
250 to->di_size = be64_to_cpu(from->di_size);
251 to->di_nblocks = be64_to_cpu(from->di_nblocks);
252 to->di_extsize = be32_to_cpu(from->di_extsize);
253 to->di_nextents = be32_to_cpu(from->di_nextents);
254 to->di_anextents = be16_to_cpu(from->di_anextents);
255 to->di_forkoff = from->di_forkoff;
256 to->di_aformat = from->di_aformat;
257 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
258 to->di_dmstate = be16_to_cpu(from->di_dmstate);
259 to->di_flags = be16_to_cpu(from->di_flags);
261 if (to->di_version == 3) {
262 inode_set_iversion_queried(inode,
263 be64_to_cpu(from->di_changecount));
264 to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
265 to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
266 to->di_flags2 = be64_to_cpu(from->di_flags2);
267 to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
273 struct xfs_inode *ip,
274 struct xfs_dinode *to,
277 struct xfs_icdinode *from = &ip->i_d;
278 struct inode *inode = VFS_I(ip);
280 to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
283 to->di_version = from->di_version;
284 to->di_format = from->di_format;
285 to->di_uid = cpu_to_be32(from->di_uid);
286 to->di_gid = cpu_to_be32(from->di_gid);
287 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
288 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
290 memset(to->di_pad, 0, sizeof(to->di_pad));
291 to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
292 to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
293 to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
294 to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
295 to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
296 to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
297 to->di_nlink = cpu_to_be32(inode->i_nlink);
298 to->di_gen = cpu_to_be32(inode->i_generation);
299 to->di_mode = cpu_to_be16(inode->i_mode);
301 to->di_size = cpu_to_be64(from->di_size);
302 to->di_nblocks = cpu_to_be64(from->di_nblocks);
303 to->di_extsize = cpu_to_be32(from->di_extsize);
304 to->di_nextents = cpu_to_be32(from->di_nextents);
305 to->di_anextents = cpu_to_be16(from->di_anextents);
306 to->di_forkoff = from->di_forkoff;
307 to->di_aformat = from->di_aformat;
308 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
309 to->di_dmstate = cpu_to_be16(from->di_dmstate);
310 to->di_flags = cpu_to_be16(from->di_flags);
312 if (from->di_version == 3) {
313 to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
314 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
315 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
316 to->di_flags2 = cpu_to_be64(from->di_flags2);
317 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
318 to->di_ino = cpu_to_be64(ip->i_ino);
319 to->di_lsn = cpu_to_be64(lsn);
320 memset(to->di_pad2, 0, sizeof(to->di_pad2));
321 uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
322 to->di_flushiter = 0;
324 to->di_flushiter = cpu_to_be16(from->di_flushiter);
329 xfs_log_dinode_to_disk(
330 struct xfs_log_dinode *from,
331 struct xfs_dinode *to)
333 to->di_magic = cpu_to_be16(from->di_magic);
334 to->di_mode = cpu_to_be16(from->di_mode);
335 to->di_version = from->di_version;
336 to->di_format = from->di_format;
338 to->di_uid = cpu_to_be32(from->di_uid);
339 to->di_gid = cpu_to_be32(from->di_gid);
340 to->di_nlink = cpu_to_be32(from->di_nlink);
341 to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
342 to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
343 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
345 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
346 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
347 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
348 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
349 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
350 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
352 to->di_size = cpu_to_be64(from->di_size);
353 to->di_nblocks = cpu_to_be64(from->di_nblocks);
354 to->di_extsize = cpu_to_be32(from->di_extsize);
355 to->di_nextents = cpu_to_be32(from->di_nextents);
356 to->di_anextents = cpu_to_be16(from->di_anextents);
357 to->di_forkoff = from->di_forkoff;
358 to->di_aformat = from->di_aformat;
359 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
360 to->di_dmstate = cpu_to_be16(from->di_dmstate);
361 to->di_flags = cpu_to_be16(from->di_flags);
362 to->di_gen = cpu_to_be32(from->di_gen);
364 if (from->di_version == 3) {
365 to->di_changecount = cpu_to_be64(from->di_changecount);
366 to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
367 to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
368 to->di_flags2 = cpu_to_be64(from->di_flags2);
369 to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
370 to->di_ino = cpu_to_be64(from->di_ino);
371 to->di_lsn = cpu_to_be64(from->di_lsn);
372 memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
373 uuid_copy(&to->di_uuid, &from->di_uuid);
374 to->di_flushiter = 0;
376 to->di_flushiter = cpu_to_be16(from->di_flushiter);
380 static xfs_failaddr_t
381 xfs_dinode_verify_fork(
382 struct xfs_dinode *dip,
383 struct xfs_mount *mp,
386 uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
388 switch (XFS_DFORK_FORMAT(dip, whichfork)) {
389 case XFS_DINODE_FMT_LOCAL:
391 * no local regular files yet
393 if (whichfork == XFS_DATA_FORK) {
394 if (S_ISREG(be16_to_cpu(dip->di_mode)))
395 return __this_address;
396 if (be64_to_cpu(dip->di_size) >
397 XFS_DFORK_SIZE(dip, mp, whichfork))
398 return __this_address;
401 return __this_address;
403 case XFS_DINODE_FMT_EXTENTS:
404 if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
405 return __this_address;
407 case XFS_DINODE_FMT_BTREE:
408 if (whichfork == XFS_ATTR_FORK) {
409 if (di_nextents > MAXAEXTNUM)
410 return __this_address;
411 } else if (di_nextents > MAXEXTNUM) {
412 return __this_address;
416 return __this_address;
421 static xfs_failaddr_t
422 xfs_dinode_verify_forkoff(
423 struct xfs_dinode *dip,
424 struct xfs_mount *mp)
426 if (!XFS_DFORK_Q(dip))
429 switch (dip->di_format) {
430 case XFS_DINODE_FMT_DEV:
431 if (dip->di_forkoff != (roundup(sizeof(xfs_dev_t), 8) >> 3))
432 return __this_address;
434 case XFS_DINODE_FMT_LOCAL: /* fall through ... */
435 case XFS_DINODE_FMT_EXTENTS: /* fall through ... */
436 case XFS_DINODE_FMT_BTREE:
437 if (dip->di_forkoff >= (XFS_LITINO(mp, dip->di_version) >> 3))
438 return __this_address;
441 return __this_address;
448 struct xfs_mount *mp,
450 struct xfs_dinode *dip)
458 if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
459 return __this_address;
461 /* Verify v3 integrity information first */
462 if (dip->di_version >= 3) {
463 if (!xfs_sb_version_hascrc(&mp->m_sb))
464 return __this_address;
465 if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
467 return __this_address;
468 if (be64_to_cpu(dip->di_ino) != ino)
469 return __this_address;
470 if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
471 return __this_address;
474 /* don't allow invalid i_size */
475 di_size = be64_to_cpu(dip->di_size);
476 if (di_size & (1ULL << 63))
477 return __this_address;
479 mode = be16_to_cpu(dip->di_mode);
480 if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
481 return __this_address;
483 /* No zero-length symlinks/dirs. */
484 if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
485 return __this_address;
487 /* Fork checks carried over from xfs_iformat_fork */
489 be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
490 be64_to_cpu(dip->di_nblocks))
491 return __this_address;
493 if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
494 return __this_address;
496 flags = be16_to_cpu(dip->di_flags);
498 if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
499 return __this_address;
501 /* check for illegal values of forkoff */
502 fa = xfs_dinode_verify_forkoff(dip, mp);
506 /* Do we have appropriate data fork formats for the mode? */
507 switch (mode & S_IFMT) {
512 if (dip->di_format != XFS_DINODE_FMT_DEV)
513 return __this_address;
518 fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
523 /* Uninitialized inode ok. */
526 return __this_address;
529 if (XFS_DFORK_Q(dip)) {
530 fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
535 * If there is no fork offset, this may be a freshly-made inode
536 * in a new disk cluster, in which case di_aformat is zeroed.
537 * Otherwise, such an inode must be in EXTENTS format; this goes
538 * for freed inodes as well.
540 switch (dip->di_aformat) {
542 case XFS_DINODE_FMT_EXTENTS:
545 return __this_address;
547 if (dip->di_anextents)
548 return __this_address;
551 /* extent size hint validation */
552 fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
557 /* only version 3 or greater inodes are extensively verified here */
558 if (dip->di_version < 3)
561 flags2 = be64_to_cpu(dip->di_flags2);
563 /* don't allow reflink/cowextsize if we don't have reflink */
564 if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
565 !xfs_sb_version_hasreflink(&mp->m_sb))
566 return __this_address;
568 /* only regular files get reflink */
569 if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
570 return __this_address;
572 /* don't let reflink and realtime mix */
573 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
574 return __this_address;
576 /* don't let reflink and dax mix */
577 if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
578 return __this_address;
580 /* COW extent size hint validation */
581 fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
582 mode, flags, flags2);
591 struct xfs_mount *mp,
592 struct xfs_dinode *dip)
596 if (dip->di_version < 3)
599 ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
600 crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
602 dip->di_crc = xfs_end_cksum(crc);
606 * Read the disk inode attributes into the in-core inode structure.
608 * For version 5 superblocks, if we are initialising a new inode and we are not
609 * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
610 * inode core with a random generation number. If we are keeping inodes around,
611 * we need to read the inode cluster to get the existing generation number off
612 * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
613 * format) then log recovery is dependent on the di_flushiter field being
614 * initialised from the current on-disk value and hence we must also read the
630 * Fill in the location information in the in-core inode.
632 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
636 /* shortcut IO on inode allocation if possible */
637 if ((iget_flags & XFS_IGET_CREATE) &&
638 xfs_sb_version_hascrc(&mp->m_sb) &&
639 !(mp->m_flags & XFS_MOUNT_IKEEP)) {
640 /* initialise the on-disk inode core */
641 memset(&ip->i_d, 0, sizeof(ip->i_d));
642 VFS_I(ip)->i_generation = prandom_u32();
643 ip->i_d.di_version = 3;
648 * Get pointers to the on-disk inode and the buffer containing it.
650 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
654 /* even unallocated inodes are verified */
655 fa = xfs_dinode_verify(mp, ip->i_ino, dip);
657 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
659 error = -EFSCORRUPTED;
664 * If the on-disk inode is already linked to a directory
665 * entry, copy all of the inode into the in-core inode.
666 * xfs_iformat_fork() handles copying in the inode format
667 * specific information.
668 * Otherwise, just get the truly permanent information.
671 xfs_inode_from_disk(ip, dip);
672 error = xfs_iformat_fork(ip, dip);
675 xfs_alert(mp, "%s: xfs_iformat() returned error %d",
682 * Partial initialisation of the in-core inode. Just the bits
683 * that xfs_ialloc won't overwrite or relies on being correct.
685 ip->i_d.di_version = dip->di_version;
686 VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
687 ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
690 * Make sure to pull in the mode here as well in
691 * case the inode is released without being used.
692 * This ensures that xfs_inactive() will see that
693 * the inode is already free and not try to mess
694 * with the uninitialized part of it.
696 VFS_I(ip)->i_mode = 0;
699 ASSERT(ip->i_d.di_version >= 2);
700 ip->i_delayed_blks = 0;
703 * Mark the buffer containing the inode as something to keep
704 * around for a while. This helps to keep recently accessed
705 * meta-data in-core longer.
707 xfs_buf_set_ref(bp, XFS_INO_REF);
710 * Use xfs_trans_brelse() to release the buffer containing the on-disk
711 * inode, because it was acquired with xfs_trans_read_buf() in
712 * xfs_imap_to_bp() above. If tp is NULL, this is just a normal
713 * brelse(). If we're within a transaction, then xfs_trans_brelse()
714 * will only release the buffer if it is not dirty within the
715 * transaction. It will be OK to release the buffer in this case,
716 * because inodes on disk are never destroyed and we will be locking the
717 * new in-core inode before putting it in the cache where other
718 * processes can find it. Thus we don't have to worry about the inode
719 * being changed just because we released the buffer.
722 xfs_trans_brelse(tp, bp);
727 * Validate di_extsize hint.
729 * The rules are documented at xfs_ioctl_setattr_check_extsize().
730 * These functions must be kept in sync with each other.
733 xfs_inode_validate_extsize(
734 struct xfs_mount *mp,
742 uint32_t extsize_bytes;
743 uint32_t blocksize_bytes;
745 rt_flag = (flags & XFS_DIFLAG_REALTIME);
746 hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
747 inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
748 extsize_bytes = XFS_FSB_TO_B(mp, extsize);
751 blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
753 blocksize_bytes = mp->m_sb.sb_blocksize;
755 if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
756 return __this_address;
758 if (hint_flag && !S_ISREG(mode))
759 return __this_address;
761 if (inherit_flag && !S_ISDIR(mode))
762 return __this_address;
764 if ((hint_flag || inherit_flag) && extsize == 0)
765 return __this_address;
767 /* free inodes get flags set to zero but extsize remains */
768 if (mode && !(hint_flag || inherit_flag) && extsize != 0)
769 return __this_address;
771 if (extsize_bytes % blocksize_bytes)
772 return __this_address;
774 if (extsize > MAXEXTLEN)
775 return __this_address;
777 if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
778 return __this_address;
784 * Validate di_cowextsize hint.
786 * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
787 * These functions must be kept in sync with each other.
790 xfs_inode_validate_cowextsize(
791 struct xfs_mount *mp,
799 uint32_t cowextsize_bytes;
801 rt_flag = (flags & XFS_DIFLAG_REALTIME);
802 hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
803 cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
805 if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
806 return __this_address;
808 if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
809 return __this_address;
811 if (hint_flag && cowextsize == 0)
812 return __this_address;
814 /* free inodes get flags set to zero but cowextsize remains */
815 if (mode && !hint_flag && cowextsize != 0)
816 return __this_address;
818 if (hint_flag && rt_flag)
819 return __this_address;
821 if (cowextsize_bytes % mp->m_sb.sb_blocksize)
822 return __this_address;
824 if (cowextsize > MAXEXTLEN)
825 return __this_address;
827 if (cowextsize > mp->m_sb.sb_agblocks / 2)
828 return __this_address;