1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_defer.h"
13 #include "xfs_btree.h"
15 #include "xfs_log_format.h"
16 #include "xfs_trans.h"
18 #include "xfs_inode.h"
19 #include "xfs_alloc.h"
20 #include "xfs_ialloc.h"
21 #include "xfs_ialloc_btree.h"
22 #include "xfs_icache.h"
25 #include "xfs_trans_priv.h"
26 #include "scrub/xfs_scrub.h"
27 #include "scrub/scrub.h"
28 #include "scrub/common.h"
29 #include "scrub/btree.h"
30 #include "scrub/trace.h"
33 * Set us up to scrub inode btrees.
34 * If we detect a discrepancy between the inobt and the inode,
35 * try again after forcing logged inode cores out to disk.
38 xchk_setup_ag_iallocbt(
42 return xchk_setup_ag_btree(sc, ip, sc->flags & XCHK_TRY_HARDER);
45 /* Inode btree scrubber. */
47 struct xchk_iallocbt {
48 /* Number of inodes we see while scanning inobt. */
49 unsigned long long inodes;
51 /* Expected next startino, for big block filesystems. */
52 xfs_agino_t next_startino;
54 /* Expected end of the current inode cluster. */
55 xfs_agino_t next_cluster_ino;
59 * If we're checking the finobt, cross-reference with the inobt.
60 * Otherwise we're checking the inobt; if there is an finobt, make sure
61 * we have a record or not depending on freecount.
64 xchk_iallocbt_chunk_xref_other(
66 struct xfs_inobt_rec_incore *irec,
69 struct xfs_btree_cur **pcur;
73 if (sc->sm->sm_type == XFS_SCRUB_TYPE_FINOBT)
74 pcur = &sc->sa.ino_cur;
76 pcur = &sc->sa.fino_cur;
79 error = xfs_ialloc_has_inode_record(*pcur, agino, agino, &has_irec);
80 if (!xchk_should_check_xref(sc, &error, pcur))
82 if (((irec->ir_freecount > 0 && !has_irec) ||
83 (irec->ir_freecount == 0 && has_irec)))
84 xchk_btree_xref_set_corrupt(sc, *pcur, 0);
87 /* Cross-reference with the other btrees. */
89 xchk_iallocbt_chunk_xref(
91 struct xfs_inobt_rec_incore *irec,
96 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
99 xchk_xref_is_used_space(sc, agbno, len);
100 xchk_iallocbt_chunk_xref_other(sc, irec, agino);
101 xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES);
102 xchk_xref_is_not_shared(sc, agbno, len);
105 /* Is this chunk worth checking? */
108 struct xchk_btree *bs,
109 struct xfs_inobt_rec_incore *irec,
113 struct xfs_mount *mp = bs->cur->bc_mp;
114 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
117 bno = XFS_AGINO_TO_AGBNO(mp, agino);
118 if (bno + len <= bno ||
119 !xfs_verify_agbno(mp, agno, bno) ||
120 !xfs_verify_agbno(mp, agno, bno + len - 1))
121 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
123 xchk_iallocbt_chunk_xref(bs->sc, irec, agino, bno, len);
128 /* Count the number of free inodes. */
130 xchk_iallocbt_freecount(
131 xfs_inofree_t freemask)
133 BUILD_BUG_ON(sizeof(freemask) != sizeof(__u64));
134 return hweight64(freemask);
138 * Check that an inode's allocation status matches ir_free in the inobt
139 * record. First we try querying the in-core inode state, and if the inode
140 * isn't loaded we examine the on-disk inode directly.
142 * Since there can be 1:M and M:1 mappings between inobt records and inode
143 * clusters, we pass in the inode location information as an inobt record;
144 * the index of an inode cluster within the inobt record (as well as the
145 * cluster buffer itself); and the index of the inode within the cluster.
147 * @irec is the inobt record.
148 * @irec_ino is the inode offset from the start of the record.
149 * @dip is the on-disk inode.
152 xchk_iallocbt_check_cluster_ifree(
153 struct xchk_btree *bs,
154 struct xfs_inobt_rec_incore *irec,
155 unsigned int irec_ino,
156 struct xfs_dinode *dip)
158 struct xfs_mount *mp = bs->cur->bc_mp;
166 if (xchk_should_terminate(bs->sc, &error))
170 * Given an inobt record and the offset of an inode from the start of
171 * the record, compute which fs inode we're talking about.
173 agino = irec->ir_startino + irec_ino;
174 fsino = XFS_AGINO_TO_INO(mp, bs->cur->bc_private.a.agno, agino);
175 irec_free = (irec->ir_free & XFS_INOBT_MASK(irec_ino));
177 if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC ||
178 (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != fsino)) {
179 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
183 error = xfs_icache_inode_is_allocated(mp, bs->cur->bc_tp, fsino,
185 if (error == -ENODATA) {
186 /* Not cached, just read the disk buffer */
187 freemask_ok = irec_free ^ !!(dip->di_mode);
188 if (!(bs->sc->flags & XCHK_TRY_HARDER) && !freemask_ok)
190 } else if (error < 0) {
192 * Inode is only half assembled, or there was an IO error,
193 * or the verifier failed, so don't bother trying to check.
194 * The inode scrubber can deal with this.
198 /* Inode is all there. */
199 freemask_ok = irec_free ^ ino_inuse;
202 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
208 * Check that the holemask and freemask of a hypothetical inode cluster match
209 * what's actually on disk. If sparse inodes are enabled, the cluster does
210 * not actually have to map to inodes if the corresponding holemask bit is set.
212 * @cluster_base is the first inode in the cluster within the @irec.
215 xchk_iallocbt_check_cluster(
216 struct xchk_btree *bs,
217 struct xfs_inobt_rec_incore *irec,
218 unsigned int cluster_base)
220 struct xfs_imap imap;
221 struct xfs_mount *mp = bs->cur->bc_mp;
222 struct xfs_dinode *dip;
223 struct xfs_buf *cluster_bp;
224 unsigned int nr_inodes;
225 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
227 unsigned int cluster_index;
228 uint16_t cluster_mask = 0;
229 uint16_t ir_holemask;
232 nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK,
233 mp->m_inodes_per_cluster);
235 /* Map this inode cluster */
236 agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base);
238 /* Compute a bitmask for this cluster that can be used for holemask. */
239 for (cluster_index = 0;
240 cluster_index < nr_inodes;
241 cluster_index += XFS_INODES_PER_HOLEMASK_BIT)
242 cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) /
243 XFS_INODES_PER_HOLEMASK_BIT);
246 * Map the first inode of this cluster to a buffer and offset.
247 * Be careful about inobt records that don't align with the start of
248 * the inode buffer when block sizes are large enough to hold multiple
249 * inode chunks. When this happens, cluster_base will be zero but
250 * ir_startino can be large enough to make im_boffset nonzero.
252 ir_holemask = (irec->ir_holemask & cluster_mask);
253 imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno);
254 imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster);
255 imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino);
257 if (imap.im_boffset != 0 && cluster_base != 0) {
258 ASSERT(imap.im_boffset == 0 || cluster_base == 0);
259 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
263 trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino,
264 imap.im_blkno, imap.im_len, cluster_base, nr_inodes,
265 cluster_mask, ir_holemask,
266 XFS_INO_TO_OFFSET(mp, irec->ir_startino +
269 /* The whole cluster must be a hole or not a hole. */
270 if (ir_holemask != cluster_mask && ir_holemask != 0) {
271 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
275 /* If any part of this is a hole, skip it. */
277 xchk_xref_is_not_owned_by(bs->sc, agbno,
278 mp->m_blocks_per_cluster,
279 &XFS_RMAP_OINFO_INODES);
283 xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster,
284 &XFS_RMAP_OINFO_INODES);
286 /* Grab the inode cluster buffer. */
287 error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp,
289 if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error))
292 /* Check free status of each inode within this cluster. */
293 for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) {
294 struct xfs_dinode *dip;
296 if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) {
297 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
301 dip = xfs_buf_offset(cluster_bp, imap.im_boffset);
302 error = xchk_iallocbt_check_cluster_ifree(bs, irec,
303 cluster_base + cluster_index, dip);
306 imap.im_boffset += mp->m_sb.sb_inodesize;
309 xfs_trans_brelse(bs->cur->bc_tp, cluster_bp);
314 * For all the inode clusters that could map to this inobt record, make sure
315 * that the holemask makes sense and that the allocation status of each inode
316 * matches the freemask.
319 xchk_iallocbt_check_clusters(
320 struct xchk_btree *bs,
321 struct xfs_inobt_rec_incore *irec)
323 unsigned int cluster_base;
327 * For the common case where this inobt record maps to multiple inode
328 * clusters this will call _check_cluster for each cluster.
330 * For the case that multiple inobt records map to a single cluster,
331 * this will call _check_cluster once.
333 for (cluster_base = 0;
334 cluster_base < XFS_INODES_PER_CHUNK;
335 cluster_base += bs->sc->mp->m_inodes_per_cluster) {
336 error = xchk_iallocbt_check_cluster(bs, irec, cluster_base);
345 * Make sure this inode btree record is aligned properly. Because a fs block
346 * contains multiple inodes, we check that the inobt record is aligned to the
347 * correct inode, not just the correct block on disk. This results in a finer
348 * grained corruption check.
351 xchk_iallocbt_rec_alignment(
352 struct xchk_btree *bs,
353 struct xfs_inobt_rec_incore *irec)
355 struct xfs_mount *mp = bs->sc->mp;
356 struct xchk_iallocbt *iabt = bs->private;
359 * finobt records have different positioning requirements than inobt
360 * records: each finobt record must have a corresponding inobt record.
361 * That is checked in the xref function, so for now we only catch the
362 * obvious case where the record isn't at all aligned properly.
364 * Note that if a fs block contains more than a single chunk of inodes,
365 * we will have finobt records only for those chunks containing free
366 * inodes, and therefore expect chunk alignment of finobt records.
367 * Otherwise, we expect that the finobt record is aligned to the
368 * cluster alignment as told by the superblock.
370 if (bs->cur->bc_btnum == XFS_BTNUM_FINO) {
373 imask = min_t(unsigned int, XFS_INODES_PER_CHUNK,
374 mp->m_cluster_align_inodes) - 1;
375 if (irec->ir_startino & imask)
376 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
380 if (iabt->next_startino != NULLAGINO) {
382 * We're midway through a cluster of inodes that is mapped by
383 * multiple inobt records. Did we get the record for the next
384 * irec in the sequence?
386 if (irec->ir_startino != iabt->next_startino) {
387 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
391 iabt->next_startino += XFS_INODES_PER_CHUNK;
393 /* Are we done with the cluster? */
394 if (iabt->next_startino >= iabt->next_cluster_ino) {
395 iabt->next_startino = NULLAGINO;
396 iabt->next_cluster_ino = NULLAGINO;
401 /* inobt records must be aligned to cluster and inoalignmnt size. */
402 if (irec->ir_startino & (mp->m_cluster_align_inodes - 1)) {
403 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
407 if (irec->ir_startino & (mp->m_inodes_per_cluster - 1)) {
408 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
412 if (mp->m_inodes_per_cluster <= XFS_INODES_PER_CHUNK)
416 * If this is the start of an inode cluster that can be mapped by
417 * multiple inobt records, the next inobt record must follow exactly
420 iabt->next_startino = irec->ir_startino + XFS_INODES_PER_CHUNK;
421 iabt->next_cluster_ino = irec->ir_startino + mp->m_inodes_per_cluster;
424 /* Scrub an inobt/finobt record. */
427 struct xchk_btree *bs,
428 union xfs_btree_rec *rec)
430 struct xfs_mount *mp = bs->cur->bc_mp;
431 struct xchk_iallocbt *iabt = bs->private;
432 struct xfs_inobt_rec_incore irec;
434 xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
440 unsigned int real_freecount;
443 xfs_inobt_btrec_to_irec(mp, rec, &irec);
445 if (irec.ir_count > XFS_INODES_PER_CHUNK ||
446 irec.ir_freecount > XFS_INODES_PER_CHUNK)
447 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
449 real_freecount = irec.ir_freecount +
450 (XFS_INODES_PER_CHUNK - irec.ir_count);
451 if (real_freecount != xchk_iallocbt_freecount(irec.ir_free))
452 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
454 agino = irec.ir_startino;
455 /* Record has to be properly aligned within the AG. */
456 if (!xfs_verify_agino(mp, agno, agino) ||
457 !xfs_verify_agino(mp, agno, agino + XFS_INODES_PER_CHUNK - 1)) {
458 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
462 xchk_iallocbt_rec_alignment(bs, &irec);
463 if (bs->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
466 iabt->inodes += irec.ir_count;
468 /* Handle non-sparse inodes */
469 if (!xfs_inobt_issparse(irec.ir_holemask)) {
470 len = XFS_B_TO_FSB(mp,
471 XFS_INODES_PER_CHUNK * mp->m_sb.sb_inodesize);
472 if (irec.ir_count != XFS_INODES_PER_CHUNK)
473 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
475 if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
480 /* Check each chunk of a sparse inode cluster. */
481 holemask = irec.ir_holemask;
483 len = XFS_B_TO_FSB(mp,
484 XFS_INODES_PER_HOLEMASK_BIT * mp->m_sb.sb_inodesize);
485 holes = ~xfs_inobt_irec_to_allocmask(&irec);
486 if ((holes & irec.ir_free) != holes ||
487 irec.ir_freecount > irec.ir_count)
488 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
490 for (i = 0; i < XFS_INOBT_HOLEMASK_BITS; i++) {
492 holecount += XFS_INODES_PER_HOLEMASK_BIT;
493 else if (!xchk_iallocbt_chunk(bs, &irec, agino, len))
496 agino += XFS_INODES_PER_HOLEMASK_BIT;
499 if (holecount > XFS_INODES_PER_CHUNK ||
500 holecount + irec.ir_count != XFS_INODES_PER_CHUNK)
501 xchk_btree_set_corrupt(bs->sc, bs->cur, 0);
504 error = xchk_iallocbt_check_clusters(bs, &irec);
513 * Make sure the inode btrees are as large as the rmap thinks they are.
514 * Don't bother if we're missing btree cursors, as we're already corrupt.
517 xchk_iallocbt_xref_rmap_btreeblks(
518 struct xfs_scrub *sc,
521 xfs_filblks_t blocks;
522 xfs_extlen_t inobt_blocks = 0;
523 xfs_extlen_t finobt_blocks = 0;
526 if (!sc->sa.ino_cur || !sc->sa.rmap_cur ||
527 (xfs_sb_version_hasfinobt(&sc->mp->m_sb) && !sc->sa.fino_cur) ||
528 xchk_skip_xref(sc->sm))
531 /* Check that we saw as many inobt blocks as the rmap says. */
532 error = xfs_btree_count_blocks(sc->sa.ino_cur, &inobt_blocks);
533 if (!xchk_process_error(sc, 0, 0, &error))
536 if (sc->sa.fino_cur) {
537 error = xfs_btree_count_blocks(sc->sa.fino_cur, &finobt_blocks);
538 if (!xchk_process_error(sc, 0, 0, &error))
542 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
543 &XFS_RMAP_OINFO_INOBT, &blocks);
544 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
546 if (blocks != inobt_blocks + finobt_blocks)
547 xchk_btree_set_corrupt(sc, sc->sa.ino_cur, 0);
551 * Make sure that the inobt records point to the same number of blocks as
552 * the rmap says are owned by inodes.
555 xchk_iallocbt_xref_rmap_inodes(
556 struct xfs_scrub *sc,
558 unsigned long long inodes)
560 xfs_filblks_t blocks;
561 xfs_filblks_t inode_blocks;
564 if (!sc->sa.rmap_cur || xchk_skip_xref(sc->sm))
567 /* Check that we saw as many inode blocks as the rmap knows about. */
568 error = xchk_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur,
569 &XFS_RMAP_OINFO_INODES, &blocks);
570 if (!xchk_should_check_xref(sc, &error, &sc->sa.rmap_cur))
572 inode_blocks = XFS_B_TO_FSB(sc->mp, inodes * sc->mp->m_sb.sb_inodesize);
573 if (blocks != inode_blocks)
574 xchk_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
577 /* Scrub the inode btrees for some AG. */
580 struct xfs_scrub *sc,
583 struct xfs_btree_cur *cur;
584 struct xchk_iallocbt iabt = {
586 .next_startino = NULLAGINO,
587 .next_cluster_ino = NULLAGINO,
591 cur = which == XFS_BTNUM_INO ? sc->sa.ino_cur : sc->sa.fino_cur;
592 error = xchk_btree(sc, cur, xchk_iallocbt_rec, &XFS_RMAP_OINFO_INOBT,
597 xchk_iallocbt_xref_rmap_btreeblks(sc, which);
600 * If we're scrubbing the inode btree, inode_blocks is the number of
601 * blocks pointed to by all the inode chunk records. Therefore, we
602 * should compare to the number of inode chunk blocks that the rmap
603 * knows about. We can't do this for the finobt since it only points
604 * to inode chunks with free inodes.
606 if (which == XFS_BTNUM_INO)
607 xchk_iallocbt_xref_rmap_inodes(sc, which, iabt.inodes);
614 struct xfs_scrub *sc)
616 return xchk_iallocbt(sc, XFS_BTNUM_INO);
621 struct xfs_scrub *sc)
623 return xchk_iallocbt(sc, XFS_BTNUM_FINO);
626 /* See if an inode btree has (or doesn't have) an inode chunk record. */
628 xchk_xref_inode_check(
629 struct xfs_scrub *sc,
632 struct xfs_btree_cur **icur,
633 bool should_have_inodes)
638 if (!(*icur) || xchk_skip_xref(sc->sm))
641 error = xfs_ialloc_has_inodes_at_extent(*icur, agbno, len, &has_inodes);
642 if (!xchk_should_check_xref(sc, &error, icur))
644 if (has_inodes != should_have_inodes)
645 xchk_btree_xref_set_corrupt(sc, *icur, 0);
648 /* xref check that the extent is not covered by inodes */
650 xchk_xref_is_not_inode_chunk(
651 struct xfs_scrub *sc,
655 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, false);
656 xchk_xref_inode_check(sc, agbno, len, &sc->sa.fino_cur, false);
659 /* xref check that the extent is covered by inodes */
661 xchk_xref_is_inode_chunk(
662 struct xfs_scrub *sc,
666 xchk_xref_inode_check(sc, agbno, len, &sc->sa.ino_cur, true);