1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * Copyright (c) 2013 Red Hat, Inc.
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
16 #include "xfs_dir2_priv.h"
17 #include "xfs_inode.h"
18 #include "xfs_trans.h"
20 #include "xfs_attr_leaf.h"
21 #include "xfs_error.h"
22 #include "xfs_trace.h"
23 #include "xfs_buf_item.h"
29 * Routines to implement directories as Btrees of hashed names.
32 /*========================================================================
33 * Function prototypes for the kernel.
34 *========================================================================*/
37 * Routines used for growing the Btree.
39 STATIC int xfs_da3_root_split(xfs_da_state_t *state,
40 xfs_da_state_blk_t *existing_root,
41 xfs_da_state_blk_t *new_child);
42 STATIC int xfs_da3_node_split(xfs_da_state_t *state,
43 xfs_da_state_blk_t *existing_blk,
44 xfs_da_state_blk_t *split_blk,
45 xfs_da_state_blk_t *blk_to_add,
48 STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
49 xfs_da_state_blk_t *node_blk_1,
50 xfs_da_state_blk_t *node_blk_2);
51 STATIC void xfs_da3_node_add(xfs_da_state_t *state,
52 xfs_da_state_blk_t *old_node_blk,
53 xfs_da_state_blk_t *new_node_blk);
56 * Routines used for shrinking the Btree.
58 STATIC int xfs_da3_root_join(xfs_da_state_t *state,
59 xfs_da_state_blk_t *root_blk);
60 STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
61 STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
62 xfs_da_state_blk_t *drop_blk);
63 STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
64 xfs_da_state_blk_t *src_node_blk,
65 xfs_da_state_blk_t *dst_node_blk);
70 STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
71 xfs_da_state_blk_t *drop_blk,
72 xfs_da_state_blk_t *save_blk);
75 kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
78 * Allocate a dir-state structure.
79 * We don't put them on the stack since they're large.
82 xfs_da_state_alloc(void)
84 return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
88 * Kill the altpath contents of a da-state structure.
91 xfs_da_state_kill_altpath(xfs_da_state_t *state)
95 for (i = 0; i < state->altpath.active; i++)
96 state->altpath.blk[i].bp = NULL;
97 state->altpath.active = 0;
101 * Free a da-state structure.
104 xfs_da_state_free(xfs_da_state_t *state)
106 xfs_da_state_kill_altpath(state);
108 memset((char *)state, 0, sizeof(*state));
110 kmem_zone_free(xfs_da_state_zone, state);
114 * Verify an xfs_da3_blkinfo structure. Note that the da3 fields are only
115 * accessible on v5 filesystems. This header format is common across da node,
116 * attr leaf and dir leaf blocks.
119 xfs_da3_blkinfo_verify(
121 struct xfs_da3_blkinfo *hdr3)
123 struct xfs_mount *mp = bp->b_mount;
124 struct xfs_da_blkinfo *hdr = &hdr3->hdr;
126 if (!xfs_verify_magic16(bp, hdr->magic))
127 return __this_address;
129 if (xfs_sb_version_hascrc(&mp->m_sb)) {
130 if (!uuid_equal(&hdr3->uuid, &mp->m_sb.sb_meta_uuid))
131 return __this_address;
132 if (be64_to_cpu(hdr3->blkno) != bp->b_bn)
133 return __this_address;
134 if (!xfs_log_check_lsn(mp, be64_to_cpu(hdr3->lsn)))
135 return __this_address;
141 static xfs_failaddr_t
145 struct xfs_mount *mp = bp->b_mount;
146 struct xfs_da_intnode *hdr = bp->b_addr;
147 struct xfs_da3_icnode_hdr ichdr;
148 const struct xfs_dir_ops *ops;
151 ops = xfs_dir_get_ops(mp, NULL);
153 ops->node_hdr_from_disk(&ichdr, hdr);
155 fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
159 if (ichdr.level == 0)
160 return __this_address;
161 if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
162 return __this_address;
163 if (ichdr.count == 0)
164 return __this_address;
167 * we don't know if the node is for and attribute or directory tree,
168 * so only fail if the count is outside both bounds
170 if (ichdr.count > mp->m_dir_geo->node_ents &&
171 ichdr.count > mp->m_attr_geo->node_ents)
172 return __this_address;
174 /* XXX: hash order check? */
180 xfs_da3_node_write_verify(
183 struct xfs_mount *mp = bp->b_mount;
184 struct xfs_buf_log_item *bip = bp->b_log_item;
185 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
188 fa = xfs_da3_node_verify(bp);
190 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
194 if (!xfs_sb_version_hascrc(&mp->m_sb))
198 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
200 xfs_buf_update_cksum(bp, XFS_DA3_NODE_CRC_OFF);
204 * leaf/node format detection on trees is sketchy, so a node read can be done on
205 * leaf level blocks when detection identifies the tree as a node format tree
206 * incorrectly. In this case, we need to swap the verifier to match the correct
207 * format of the block being read.
210 xfs_da3_node_read_verify(
213 struct xfs_da_blkinfo *info = bp->b_addr;
216 switch (be16_to_cpu(info->magic)) {
217 case XFS_DA3_NODE_MAGIC:
218 if (!xfs_buf_verify_cksum(bp, XFS_DA3_NODE_CRC_OFF)) {
219 xfs_verifier_error(bp, -EFSBADCRC,
224 case XFS_DA_NODE_MAGIC:
225 fa = xfs_da3_node_verify(bp);
227 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
229 case XFS_ATTR_LEAF_MAGIC:
230 case XFS_ATTR3_LEAF_MAGIC:
231 bp->b_ops = &xfs_attr3_leaf_buf_ops;
232 bp->b_ops->verify_read(bp);
234 case XFS_DIR2_LEAFN_MAGIC:
235 case XFS_DIR3_LEAFN_MAGIC:
236 bp->b_ops = &xfs_dir3_leafn_buf_ops;
237 bp->b_ops->verify_read(bp);
240 xfs_verifier_error(bp, -EFSCORRUPTED, __this_address);
245 /* Verify the structure of a da3 block. */
246 static xfs_failaddr_t
247 xfs_da3_node_verify_struct(
250 struct xfs_da_blkinfo *info = bp->b_addr;
252 switch (be16_to_cpu(info->magic)) {
253 case XFS_DA3_NODE_MAGIC:
254 case XFS_DA_NODE_MAGIC:
255 return xfs_da3_node_verify(bp);
256 case XFS_ATTR_LEAF_MAGIC:
257 case XFS_ATTR3_LEAF_MAGIC:
258 bp->b_ops = &xfs_attr3_leaf_buf_ops;
259 return bp->b_ops->verify_struct(bp);
260 case XFS_DIR2_LEAFN_MAGIC:
261 case XFS_DIR3_LEAFN_MAGIC:
262 bp->b_ops = &xfs_dir3_leafn_buf_ops;
263 return bp->b_ops->verify_struct(bp);
265 return __this_address;
269 const struct xfs_buf_ops xfs_da3_node_buf_ops = {
270 .name = "xfs_da3_node",
271 .magic16 = { cpu_to_be16(XFS_DA_NODE_MAGIC),
272 cpu_to_be16(XFS_DA3_NODE_MAGIC) },
273 .verify_read = xfs_da3_node_read_verify,
274 .verify_write = xfs_da3_node_write_verify,
275 .verify_struct = xfs_da3_node_verify_struct,
280 struct xfs_trans *tp,
281 struct xfs_inode *dp,
283 xfs_daddr_t mappedbno,
284 struct xfs_buf **bpp,
289 err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
290 which_fork, &xfs_da3_node_buf_ops);
291 if (!err && tp && *bpp) {
292 struct xfs_da_blkinfo *info = (*bpp)->b_addr;
295 switch (be16_to_cpu(info->magic)) {
296 case XFS_DA_NODE_MAGIC:
297 case XFS_DA3_NODE_MAGIC:
298 type = XFS_BLFT_DA_NODE_BUF;
300 case XFS_ATTR_LEAF_MAGIC:
301 case XFS_ATTR3_LEAF_MAGIC:
302 type = XFS_BLFT_ATTR_LEAF_BUF;
304 case XFS_DIR2_LEAFN_MAGIC:
305 case XFS_DIR3_LEAFN_MAGIC:
306 type = XFS_BLFT_DIR_LEAFN_BUF;
309 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
310 tp->t_mountp, info, sizeof(*info));
311 xfs_trans_brelse(tp, *bpp);
313 return -EFSCORRUPTED;
315 xfs_trans_buf_set_type(tp, *bpp, type);
320 /*========================================================================
321 * Routines used for growing the Btree.
322 *========================================================================*/
325 * Create the initial contents of an intermediate node.
329 struct xfs_da_args *args,
332 struct xfs_buf **bpp,
335 struct xfs_da_intnode *node;
336 struct xfs_trans *tp = args->trans;
337 struct xfs_mount *mp = tp->t_mountp;
338 struct xfs_da3_icnode_hdr ichdr = {0};
341 struct xfs_inode *dp = args->dp;
343 trace_xfs_da_node_create(args);
344 ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
346 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
349 bp->b_ops = &xfs_da3_node_buf_ops;
350 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
353 if (xfs_sb_version_hascrc(&mp->m_sb)) {
354 struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
356 memset(hdr3, 0, sizeof(struct xfs_da3_node_hdr));
357 ichdr.magic = XFS_DA3_NODE_MAGIC;
358 hdr3->info.blkno = cpu_to_be64(bp->b_bn);
359 hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
360 uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_meta_uuid);
362 ichdr.magic = XFS_DA_NODE_MAGIC;
366 dp->d_ops->node_hdr_to_disk(node, &ichdr);
367 xfs_trans_log_buf(tp, bp,
368 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
375 * Split a leaf node, rebalance, then possibly split
376 * intermediate nodes, rebalance, etc.
380 struct xfs_da_state *state)
382 struct xfs_da_state_blk *oldblk;
383 struct xfs_da_state_blk *newblk;
384 struct xfs_da_state_blk *addblk;
385 struct xfs_da_intnode *node;
391 trace_xfs_da_split(state->args);
394 * Walk back up the tree splitting/inserting/adjusting as necessary.
395 * If we need to insert and there isn't room, split the node, then
396 * decide which fragment to insert the new block from below into.
397 * Note that we may split the root this way, but we need more fixup.
399 max = state->path.active - 1;
400 ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
401 ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
402 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
404 addblk = &state->path.blk[max]; /* initial dummy value */
405 for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
406 oldblk = &state->path.blk[i];
407 newblk = &state->altpath.blk[i];
410 * If a leaf node then
411 * Allocate a new leaf node, then rebalance across them.
412 * else if an intermediate node then
413 * We split on the last layer, must we split the node?
415 switch (oldblk->magic) {
416 case XFS_ATTR_LEAF_MAGIC:
417 error = xfs_attr3_leaf_split(state, oldblk, newblk);
418 if ((error != 0) && (error != -ENOSPC)) {
419 return error; /* GROT: attr is inconsistent */
426 * Entry wouldn't fit, split the leaf again. The new
427 * extrablk will be consumed by xfs_da3_node_split if
430 state->extravalid = 1;
432 state->extraafter = 0; /* before newblk */
433 trace_xfs_attr_leaf_split_before(state->args);
434 error = xfs_attr3_leaf_split(state, oldblk,
437 state->extraafter = 1; /* after newblk */
438 trace_xfs_attr_leaf_split_after(state->args);
439 error = xfs_attr3_leaf_split(state, newblk,
443 return error; /* GROT: attr inconsistent */
446 case XFS_DIR2_LEAFN_MAGIC:
447 error = xfs_dir2_leafn_split(state, oldblk, newblk);
452 case XFS_DA_NODE_MAGIC:
453 error = xfs_da3_node_split(state, oldblk, newblk, addblk,
457 return error; /* GROT: dir is inconsistent */
459 * Record the newly split block for the next time thru?
469 * Update the btree to show the new hashval for this child.
471 xfs_da3_fixhashpath(state, &state->path);
477 * xfs_da3_node_split() should have consumed any extra blocks we added
478 * during a double leaf split in the attr fork. This is guaranteed as
479 * we can't be here if the attr fork only has a single leaf block.
481 ASSERT(state->extravalid == 0 ||
482 state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
485 * Split the root node.
487 ASSERT(state->path.active == 0);
488 oldblk = &state->path.blk[0];
489 error = xfs_da3_root_split(state, oldblk, addblk);
494 * Update pointers to the node which used to be block 0 and just got
495 * bumped because of the addition of a new root node. Note that the
496 * original block 0 could be at any position in the list of blocks in
499 * Note: the magic numbers and sibling pointers are in the same physical
500 * place for both v2 and v3 headers (by design). Hence it doesn't matter
501 * which version of the xfs_da_intnode structure we use here as the
502 * result will be the same using either structure.
504 node = oldblk->bp->b_addr;
505 if (node->hdr.info.forw) {
506 if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
507 error = -EFSCORRUPTED;
510 node = addblk->bp->b_addr;
511 node->hdr.info.back = cpu_to_be32(oldblk->blkno);
512 xfs_trans_log_buf(state->args->trans, addblk->bp,
513 XFS_DA_LOGRANGE(node, &node->hdr.info,
514 sizeof(node->hdr.info)));
516 node = oldblk->bp->b_addr;
517 if (node->hdr.info.back) {
518 if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
519 error = -EFSCORRUPTED;
522 node = addblk->bp->b_addr;
523 node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
524 xfs_trans_log_buf(state->args->trans, addblk->bp,
525 XFS_DA_LOGRANGE(node, &node->hdr.info,
526 sizeof(node->hdr.info)));
534 * Split the root. We have to create a new root and point to the two
535 * parts (the split old root) that we just created. Copy block zero to
536 * the EOF, extending the inode in process.
538 STATIC int /* error */
540 struct xfs_da_state *state,
541 struct xfs_da_state_blk *blk1,
542 struct xfs_da_state_blk *blk2)
544 struct xfs_da_intnode *node;
545 struct xfs_da_intnode *oldroot;
546 struct xfs_da_node_entry *btree;
547 struct xfs_da3_icnode_hdr nodehdr;
548 struct xfs_da_args *args;
550 struct xfs_inode *dp;
551 struct xfs_trans *tp;
552 struct xfs_dir2_leaf *leaf;
558 trace_xfs_da_root_split(state->args);
561 * Copy the existing (incorrect) block from the root node position
562 * to a free space somewhere.
565 error = xfs_da_grow_inode(args, &blkno);
571 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
575 oldroot = blk1->bp->b_addr;
576 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
577 oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
578 struct xfs_da3_icnode_hdr icnodehdr;
580 dp->d_ops->node_hdr_from_disk(&icnodehdr, oldroot);
581 btree = dp->d_ops->node_tree_p(oldroot);
582 size = (int)((char *)&btree[icnodehdr.count] - (char *)oldroot);
583 level = icnodehdr.level;
586 * we are about to copy oldroot to bp, so set up the type
587 * of bp while we know exactly what it will be.
589 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
591 struct xfs_dir3_icleaf_hdr leafhdr;
592 struct xfs_dir2_leaf_entry *ents;
594 leaf = (xfs_dir2_leaf_t *)oldroot;
595 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
596 ents = dp->d_ops->leaf_ents_p(leaf);
598 ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
599 leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
600 size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
604 * we are about to copy oldroot to bp, so set up the type
605 * of bp while we know exactly what it will be.
607 xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
611 * we can copy most of the information in the node from one block to
612 * another, but for CRC enabled headers we have to make sure that the
613 * block specific identifiers are kept intact. We update the buffer
616 memcpy(node, oldroot, size);
617 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
618 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
619 struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
621 node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
623 xfs_trans_log_buf(tp, bp, 0, size - 1);
625 bp->b_ops = blk1->bp->b_ops;
626 xfs_trans_buf_copy_type(bp, blk1->bp);
631 * Set up the new root node.
633 error = xfs_da3_node_create(args,
634 (args->whichfork == XFS_DATA_FORK) ? args->geo->leafblk : 0,
635 level + 1, &bp, args->whichfork);
640 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
641 btree = dp->d_ops->node_tree_p(node);
642 btree[0].hashval = cpu_to_be32(blk1->hashval);
643 btree[0].before = cpu_to_be32(blk1->blkno);
644 btree[1].hashval = cpu_to_be32(blk2->hashval);
645 btree[1].before = cpu_to_be32(blk2->blkno);
647 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
650 if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
651 oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
652 ASSERT(blk1->blkno >= args->geo->leafblk &&
653 blk1->blkno < args->geo->freeblk);
654 ASSERT(blk2->blkno >= args->geo->leafblk &&
655 blk2->blkno < args->geo->freeblk);
659 /* Header is already logged by xfs_da_node_create */
660 xfs_trans_log_buf(tp, bp,
661 XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
667 * Split the node, rebalance, then add the new entry.
669 STATIC int /* error */
671 struct xfs_da_state *state,
672 struct xfs_da_state_blk *oldblk,
673 struct xfs_da_state_blk *newblk,
674 struct xfs_da_state_blk *addblk,
678 struct xfs_da_intnode *node;
679 struct xfs_da3_icnode_hdr nodehdr;
684 struct xfs_inode *dp = state->args->dp;
686 trace_xfs_da_node_split(state->args);
688 node = oldblk->bp->b_addr;
689 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
692 * With V2 dirs the extra block is data or freespace.
694 useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
695 newcount = 1 + useextra;
697 * Do we have to split the node?
699 if (nodehdr.count + newcount > state->args->geo->node_ents) {
701 * Allocate a new node, add to the doubly linked chain of
702 * nodes, then move some of our excess entries into it.
704 error = xfs_da_grow_inode(state->args, &blkno);
706 return error; /* GROT: dir is inconsistent */
708 error = xfs_da3_node_create(state->args, blkno, treelevel,
709 &newblk->bp, state->args->whichfork);
711 return error; /* GROT: dir is inconsistent */
712 newblk->blkno = blkno;
713 newblk->magic = XFS_DA_NODE_MAGIC;
714 xfs_da3_node_rebalance(state, oldblk, newblk);
715 error = xfs_da3_blk_link(state, oldblk, newblk);
724 * Insert the new entry(s) into the correct block
725 * (updating last hashval in the process).
727 * xfs_da3_node_add() inserts BEFORE the given index,
728 * and as a result of using node_lookup_int() we always
729 * point to a valid entry (not after one), but a split
730 * operation always results in a new block whose hashvals
731 * FOLLOW the current block.
733 * If we had double-split op below us, then add the extra block too.
735 node = oldblk->bp->b_addr;
736 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
737 if (oldblk->index <= nodehdr.count) {
739 xfs_da3_node_add(state, oldblk, addblk);
741 if (state->extraafter)
743 xfs_da3_node_add(state, oldblk, &state->extrablk);
744 state->extravalid = 0;
748 xfs_da3_node_add(state, newblk, addblk);
750 if (state->extraafter)
752 xfs_da3_node_add(state, newblk, &state->extrablk);
753 state->extravalid = 0;
761 * Balance the btree elements between two intermediate nodes,
762 * usually one full and one empty.
764 * NOTE: if blk2 is empty, then it will get the upper half of blk1.
767 xfs_da3_node_rebalance(
768 struct xfs_da_state *state,
769 struct xfs_da_state_blk *blk1,
770 struct xfs_da_state_blk *blk2)
772 struct xfs_da_intnode *node1;
773 struct xfs_da_intnode *node2;
774 struct xfs_da_intnode *tmpnode;
775 struct xfs_da_node_entry *btree1;
776 struct xfs_da_node_entry *btree2;
777 struct xfs_da_node_entry *btree_s;
778 struct xfs_da_node_entry *btree_d;
779 struct xfs_da3_icnode_hdr nodehdr1;
780 struct xfs_da3_icnode_hdr nodehdr2;
781 struct xfs_trans *tp;
785 struct xfs_inode *dp = state->args->dp;
787 trace_xfs_da_node_rebalance(state->args);
789 node1 = blk1->bp->b_addr;
790 node2 = blk2->bp->b_addr;
791 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
792 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
793 btree1 = dp->d_ops->node_tree_p(node1);
794 btree2 = dp->d_ops->node_tree_p(node2);
797 * Figure out how many entries need to move, and in which direction.
798 * Swap the nodes around if that makes it simpler.
800 if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
801 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
802 (be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
803 be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
807 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
808 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
809 btree1 = dp->d_ops->node_tree_p(node1);
810 btree2 = dp->d_ops->node_tree_p(node2);
814 count = (nodehdr1.count - nodehdr2.count) / 2;
817 tp = state->args->trans;
819 * Two cases: high-to-low and low-to-high.
823 * Move elements in node2 up to make a hole.
825 tmp = nodehdr2.count;
827 tmp *= (uint)sizeof(xfs_da_node_entry_t);
828 btree_s = &btree2[0];
829 btree_d = &btree2[count];
830 memmove(btree_d, btree_s, tmp);
834 * Move the req'd B-tree elements from high in node1 to
837 nodehdr2.count += count;
838 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
839 btree_s = &btree1[nodehdr1.count - count];
840 btree_d = &btree2[0];
841 memcpy(btree_d, btree_s, tmp);
842 nodehdr1.count -= count;
845 * Move the req'd B-tree elements from low in node2 to
849 tmp = count * (uint)sizeof(xfs_da_node_entry_t);
850 btree_s = &btree2[0];
851 btree_d = &btree1[nodehdr1.count];
852 memcpy(btree_d, btree_s, tmp);
853 nodehdr1.count += count;
855 xfs_trans_log_buf(tp, blk1->bp,
856 XFS_DA_LOGRANGE(node1, btree_d, tmp));
859 * Move elements in node2 down to fill the hole.
861 tmp = nodehdr2.count - count;
862 tmp *= (uint)sizeof(xfs_da_node_entry_t);
863 btree_s = &btree2[count];
864 btree_d = &btree2[0];
865 memmove(btree_d, btree_s, tmp);
866 nodehdr2.count -= count;
870 * Log header of node 1 and all current bits of node 2.
872 dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
873 xfs_trans_log_buf(tp, blk1->bp,
874 XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
876 dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
877 xfs_trans_log_buf(tp, blk2->bp,
878 XFS_DA_LOGRANGE(node2, &node2->hdr,
879 dp->d_ops->node_hdr_size +
880 (sizeof(btree2[0]) * nodehdr2.count)));
883 * Record the last hashval from each block for upward propagation.
884 * (note: don't use the swapped node pointers)
887 node1 = blk1->bp->b_addr;
888 node2 = blk2->bp->b_addr;
889 dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
890 dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
891 btree1 = dp->d_ops->node_tree_p(node1);
892 btree2 = dp->d_ops->node_tree_p(node2);
894 blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
895 blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
898 * Adjust the expected index for insertion.
900 if (blk1->index >= nodehdr1.count) {
901 blk2->index = blk1->index - nodehdr1.count;
902 blk1->index = nodehdr1.count + 1; /* make it invalid */
907 * Add a new entry to an intermediate node.
911 struct xfs_da_state *state,
912 struct xfs_da_state_blk *oldblk,
913 struct xfs_da_state_blk *newblk)
915 struct xfs_da_intnode *node;
916 struct xfs_da3_icnode_hdr nodehdr;
917 struct xfs_da_node_entry *btree;
919 struct xfs_inode *dp = state->args->dp;
921 trace_xfs_da_node_add(state->args);
923 node = oldblk->bp->b_addr;
924 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
925 btree = dp->d_ops->node_tree_p(node);
927 ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
928 ASSERT(newblk->blkno != 0);
929 if (state->args->whichfork == XFS_DATA_FORK)
930 ASSERT(newblk->blkno >= state->args->geo->leafblk &&
931 newblk->blkno < state->args->geo->freeblk);
934 * We may need to make some room before we insert the new node.
937 if (oldblk->index < nodehdr.count) {
938 tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
939 memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
941 btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
942 btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
943 xfs_trans_log_buf(state->args->trans, oldblk->bp,
944 XFS_DA_LOGRANGE(node, &btree[oldblk->index],
945 tmp + sizeof(*btree)));
948 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
949 xfs_trans_log_buf(state->args->trans, oldblk->bp,
950 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
953 * Copy the last hash value from the oldblk to propagate upwards.
955 oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
958 /*========================================================================
959 * Routines used for shrinking the Btree.
960 *========================================================================*/
963 * Deallocate an empty leaf node, remove it from its parent,
964 * possibly deallocating that block, etc...
968 struct xfs_da_state *state)
970 struct xfs_da_state_blk *drop_blk;
971 struct xfs_da_state_blk *save_blk;
975 trace_xfs_da_join(state->args);
977 drop_blk = &state->path.blk[ state->path.active-1 ];
978 save_blk = &state->altpath.blk[ state->path.active-1 ];
979 ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
980 ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
981 drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
984 * Walk back up the tree joining/deallocating as necessary.
985 * When we stop dropping blocks, break out.
987 for ( ; state->path.active >= 2; drop_blk--, save_blk--,
988 state->path.active--) {
990 * See if we can combine the block with a neighbor.
991 * (action == 0) => no options, just leave
992 * (action == 1) => coalesce, then unlink
993 * (action == 2) => block empty, unlink it
995 switch (drop_blk->magic) {
996 case XFS_ATTR_LEAF_MAGIC:
997 error = xfs_attr3_leaf_toosmall(state, &action);
1002 xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
1004 case XFS_DIR2_LEAFN_MAGIC:
1005 error = xfs_dir2_leafn_toosmall(state, &action);
1010 xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
1012 case XFS_DA_NODE_MAGIC:
1014 * Remove the offending node, fixup hashvals,
1015 * check for a toosmall neighbor.
1017 xfs_da3_node_remove(state, drop_blk);
1018 xfs_da3_fixhashpath(state, &state->path);
1019 error = xfs_da3_node_toosmall(state, &action);
1024 xfs_da3_node_unbalance(state, drop_blk, save_blk);
1027 xfs_da3_fixhashpath(state, &state->altpath);
1028 error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
1029 xfs_da_state_kill_altpath(state);
1032 error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
1034 drop_blk->bp = NULL;
1039 * We joined all the way to the top. If it turns out that
1040 * we only have one entry in the root, make the child block
1043 xfs_da3_node_remove(state, drop_blk);
1044 xfs_da3_fixhashpath(state, &state->path);
1045 error = xfs_da3_root_join(state, &state->path.blk[0]);
1051 xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
1053 __be16 magic = blkinfo->magic;
1056 ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1057 magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1058 magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1059 magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1061 ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1062 magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
1064 ASSERT(!blkinfo->forw);
1065 ASSERT(!blkinfo->back);
1068 #define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
1072 * We have only one entry in the root. Copy the only remaining child of
1073 * the old root to block 0 as the new root node.
1077 struct xfs_da_state *state,
1078 struct xfs_da_state_blk *root_blk)
1080 struct xfs_da_intnode *oldroot;
1081 struct xfs_da_args *args;
1084 struct xfs_da3_icnode_hdr oldroothdr;
1085 struct xfs_da_node_entry *btree;
1087 struct xfs_inode *dp = state->args->dp;
1089 trace_xfs_da_root_join(state->args);
1091 ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
1094 oldroot = root_blk->bp->b_addr;
1095 dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
1096 ASSERT(oldroothdr.forw == 0);
1097 ASSERT(oldroothdr.back == 0);
1100 * If the root has more than one child, then don't do anything.
1102 if (oldroothdr.count > 1)
1106 * Read in the (only) child block, then copy those bytes into
1107 * the root block's buffer and free the original child block.
1109 btree = dp->d_ops->node_tree_p(oldroot);
1110 child = be32_to_cpu(btree[0].before);
1112 error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
1116 xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
1119 * This could be copying a leaf back into the root block in the case of
1120 * there only being a single leaf block left in the tree. Hence we have
1121 * to update the b_ops pointer as well to match the buffer type change
1122 * that could occur. For dir3 blocks we also need to update the block
1123 * number in the buffer header.
1125 memcpy(root_blk->bp->b_addr, bp->b_addr, args->geo->blksize);
1126 root_blk->bp->b_ops = bp->b_ops;
1127 xfs_trans_buf_copy_type(root_blk->bp, bp);
1128 if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
1129 struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
1130 da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
1132 xfs_trans_log_buf(args->trans, root_blk->bp, 0,
1133 args->geo->blksize - 1);
1134 error = xfs_da_shrink_inode(args, child, bp);
1139 * Check a node block and its neighbors to see if the block should be
1140 * collapsed into one or the other neighbor. Always keep the block
1141 * with the smaller block number.
1142 * If the current block is over 50% full, don't try to join it, return 0.
1143 * If the block is empty, fill in the state structure and return 2.
1144 * If it can be collapsed, fill in the state structure and return 1.
1145 * If nothing can be done, return 0.
1148 xfs_da3_node_toosmall(
1149 struct xfs_da_state *state,
1152 struct xfs_da_intnode *node;
1153 struct xfs_da_state_blk *blk;
1154 struct xfs_da_blkinfo *info;
1157 struct xfs_da3_icnode_hdr nodehdr;
1163 struct xfs_inode *dp = state->args->dp;
1165 trace_xfs_da_node_toosmall(state->args);
1168 * Check for the degenerate case of the block being over 50% full.
1169 * If so, it's not worth even looking to see if we might be able
1170 * to coalesce with a sibling.
1172 blk = &state->path.blk[ state->path.active-1 ];
1173 info = blk->bp->b_addr;
1174 node = (xfs_da_intnode_t *)info;
1175 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1176 if (nodehdr.count > (state->args->geo->node_ents >> 1)) {
1177 *action = 0; /* blk over 50%, don't try to join */
1178 return 0; /* blk over 50%, don't try to join */
1182 * Check for the degenerate case of the block being empty.
1183 * If the block is empty, we'll simply delete it, no need to
1184 * coalesce it with a sibling block. We choose (arbitrarily)
1185 * to merge with the forward block unless it is NULL.
1187 if (nodehdr.count == 0) {
1189 * Make altpath point to the block we want to keep and
1190 * path point to the block we want to drop (this one).
1192 forward = (info->forw != 0);
1193 memcpy(&state->altpath, &state->path, sizeof(state->path));
1194 error = xfs_da3_path_shift(state, &state->altpath, forward,
1207 * Examine each sibling block to see if we can coalesce with
1208 * at least 25% free space to spare. We need to figure out
1209 * whether to merge with the forward or the backward block.
1210 * We prefer coalescing with the lower numbered sibling so as
1211 * to shrink a directory over time.
1213 count = state->args->geo->node_ents;
1214 count -= state->args->geo->node_ents >> 2;
1215 count -= nodehdr.count;
1217 /* start with smaller blk num */
1218 forward = nodehdr.forw < nodehdr.back;
1219 for (i = 0; i < 2; forward = !forward, i++) {
1220 struct xfs_da3_icnode_hdr thdr;
1222 blkno = nodehdr.forw;
1224 blkno = nodehdr.back;
1227 error = xfs_da3_node_read(state->args->trans, dp,
1228 blkno, -1, &bp, state->args->whichfork);
1233 dp->d_ops->node_hdr_from_disk(&thdr, node);
1234 xfs_trans_brelse(state->args->trans, bp);
1236 if (count - thdr.count >= 0)
1237 break; /* fits with at least 25% to spare */
1245 * Make altpath point to the block we want to keep (the lower
1246 * numbered block) and path point to the block we want to drop.
1248 memcpy(&state->altpath, &state->path, sizeof(state->path));
1249 if (blkno < blk->blkno) {
1250 error = xfs_da3_path_shift(state, &state->altpath, forward,
1253 error = xfs_da3_path_shift(state, &state->path, forward,
1267 * Pick up the last hashvalue from an intermediate node.
1270 xfs_da3_node_lasthash(
1271 struct xfs_inode *dp,
1275 struct xfs_da_intnode *node;
1276 struct xfs_da_node_entry *btree;
1277 struct xfs_da3_icnode_hdr nodehdr;
1280 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1282 *count = nodehdr.count;
1285 btree = dp->d_ops->node_tree_p(node);
1286 return be32_to_cpu(btree[nodehdr.count - 1].hashval);
1290 * Walk back up the tree adjusting hash values as necessary,
1291 * when we stop making changes, return.
1294 xfs_da3_fixhashpath(
1295 struct xfs_da_state *state,
1296 struct xfs_da_state_path *path)
1298 struct xfs_da_state_blk *blk;
1299 struct xfs_da_intnode *node;
1300 struct xfs_da_node_entry *btree;
1301 xfs_dahash_t lasthash=0;
1304 struct xfs_inode *dp = state->args->dp;
1306 trace_xfs_da_fixhashpath(state->args);
1308 level = path->active-1;
1309 blk = &path->blk[ level ];
1310 switch (blk->magic) {
1311 case XFS_ATTR_LEAF_MAGIC:
1312 lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
1316 case XFS_DIR2_LEAFN_MAGIC:
1317 lasthash = xfs_dir2_leaf_lasthash(dp, blk->bp, &count);
1321 case XFS_DA_NODE_MAGIC:
1322 lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
1327 for (blk--, level--; level >= 0; blk--, level--) {
1328 struct xfs_da3_icnode_hdr nodehdr;
1330 node = blk->bp->b_addr;
1331 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1332 btree = dp->d_ops->node_tree_p(node);
1333 if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
1335 blk->hashval = lasthash;
1336 btree[blk->index].hashval = cpu_to_be32(lasthash);
1337 xfs_trans_log_buf(state->args->trans, blk->bp,
1338 XFS_DA_LOGRANGE(node, &btree[blk->index],
1341 lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1346 * Remove an entry from an intermediate node.
1349 xfs_da3_node_remove(
1350 struct xfs_da_state *state,
1351 struct xfs_da_state_blk *drop_blk)
1353 struct xfs_da_intnode *node;
1354 struct xfs_da3_icnode_hdr nodehdr;
1355 struct xfs_da_node_entry *btree;
1358 struct xfs_inode *dp = state->args->dp;
1360 trace_xfs_da_node_remove(state->args);
1362 node = drop_blk->bp->b_addr;
1363 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1364 ASSERT(drop_blk->index < nodehdr.count);
1365 ASSERT(drop_blk->index >= 0);
1368 * Copy over the offending entry, or just zero it out.
1370 index = drop_blk->index;
1371 btree = dp->d_ops->node_tree_p(node);
1372 if (index < nodehdr.count - 1) {
1373 tmp = nodehdr.count - index - 1;
1374 tmp *= (uint)sizeof(xfs_da_node_entry_t);
1375 memmove(&btree[index], &btree[index + 1], tmp);
1376 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1377 XFS_DA_LOGRANGE(node, &btree[index], tmp));
1378 index = nodehdr.count - 1;
1380 memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
1381 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1382 XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
1384 dp->d_ops->node_hdr_to_disk(node, &nodehdr);
1385 xfs_trans_log_buf(state->args->trans, drop_blk->bp,
1386 XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
1389 * Copy the last hash value from the block to propagate upwards.
1391 drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
1395 * Unbalance the elements between two intermediate nodes,
1396 * move all Btree elements from one node into another.
1399 xfs_da3_node_unbalance(
1400 struct xfs_da_state *state,
1401 struct xfs_da_state_blk *drop_blk,
1402 struct xfs_da_state_blk *save_blk)
1404 struct xfs_da_intnode *drop_node;
1405 struct xfs_da_intnode *save_node;
1406 struct xfs_da_node_entry *drop_btree;
1407 struct xfs_da_node_entry *save_btree;
1408 struct xfs_da3_icnode_hdr drop_hdr;
1409 struct xfs_da3_icnode_hdr save_hdr;
1410 struct xfs_trans *tp;
1413 struct xfs_inode *dp = state->args->dp;
1415 trace_xfs_da_node_unbalance(state->args);
1417 drop_node = drop_blk->bp->b_addr;
1418 save_node = save_blk->bp->b_addr;
1419 dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
1420 dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
1421 drop_btree = dp->d_ops->node_tree_p(drop_node);
1422 save_btree = dp->d_ops->node_tree_p(save_node);
1423 tp = state->args->trans;
1426 * If the dying block has lower hashvals, then move all the
1427 * elements in the remaining block up to make a hole.
1429 if ((be32_to_cpu(drop_btree[0].hashval) <
1430 be32_to_cpu(save_btree[0].hashval)) ||
1431 (be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
1432 be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
1433 /* XXX: check this - is memmove dst correct? */
1434 tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
1435 memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
1438 xfs_trans_log_buf(tp, save_blk->bp,
1439 XFS_DA_LOGRANGE(save_node, &save_btree[0],
1440 (save_hdr.count + drop_hdr.count) *
1441 sizeof(xfs_da_node_entry_t)));
1443 sindex = save_hdr.count;
1444 xfs_trans_log_buf(tp, save_blk->bp,
1445 XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
1446 drop_hdr.count * sizeof(xfs_da_node_entry_t)));
1450 * Move all the B-tree elements from drop_blk to save_blk.
1452 tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
1453 memcpy(&save_btree[sindex], &drop_btree[0], tmp);
1454 save_hdr.count += drop_hdr.count;
1456 dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
1457 xfs_trans_log_buf(tp, save_blk->bp,
1458 XFS_DA_LOGRANGE(save_node, &save_node->hdr,
1459 dp->d_ops->node_hdr_size));
1462 * Save the last hashval in the remaining block for upward propagation.
1464 save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
1467 /*========================================================================
1468 * Routines used for finding things in the Btree.
1469 *========================================================================*/
1472 * Walk down the Btree looking for a particular filename, filling
1473 * in the state structure as we go.
1475 * We will set the state structure to point to each of the elements
1476 * in each of the nodes where either the hashval is or should be.
1478 * We support duplicate hashval's so for each entry in the current
1479 * node that could contain the desired hashval, descend. This is a
1480 * pruned depth-first tree search.
1483 xfs_da3_node_lookup_int(
1484 struct xfs_da_state *state,
1487 struct xfs_da_state_blk *blk;
1488 struct xfs_da_blkinfo *curr;
1489 struct xfs_da_intnode *node;
1490 struct xfs_da_node_entry *btree;
1491 struct xfs_da3_icnode_hdr nodehdr;
1492 struct xfs_da_args *args;
1494 xfs_dahash_t hashval;
1495 xfs_dahash_t btreehashval;
1501 unsigned int expected_level = 0;
1503 struct xfs_inode *dp = state->args->dp;
1508 * Descend thru the B-tree searching each level for the right
1509 * node to use, until the right hashval is found.
1511 blkno = args->geo->leafblk;
1512 for (blk = &state->path.blk[0], state->path.active = 1;
1513 state->path.active <= XFS_DA_NODE_MAXDEPTH;
1514 blk++, state->path.active++) {
1516 * Read the next node down in the tree.
1519 error = xfs_da3_node_read(args->trans, args->dp, blkno,
1520 -1, &blk->bp, args->whichfork);
1523 state->path.active--;
1526 curr = blk->bp->b_addr;
1527 magic = be16_to_cpu(curr->magic);
1529 if (magic == XFS_ATTR_LEAF_MAGIC ||
1530 magic == XFS_ATTR3_LEAF_MAGIC) {
1531 blk->magic = XFS_ATTR_LEAF_MAGIC;
1532 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1536 if (magic == XFS_DIR2_LEAFN_MAGIC ||
1537 magic == XFS_DIR3_LEAFN_MAGIC) {
1538 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1539 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
1544 if (magic != XFS_DA_NODE_MAGIC && magic != XFS_DA3_NODE_MAGIC)
1545 return -EFSCORRUPTED;
1547 blk->magic = XFS_DA_NODE_MAGIC;
1550 * Search an intermediate node for a match.
1552 node = blk->bp->b_addr;
1553 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1554 btree = dp->d_ops->node_tree_p(node);
1556 /* Tree taller than we can handle; bail out! */
1557 if (nodehdr.level >= XFS_DA_NODE_MAXDEPTH)
1558 return -EFSCORRUPTED;
1560 /* Check the level from the root. */
1561 if (blkno == args->geo->leafblk)
1562 expected_level = nodehdr.level - 1;
1563 else if (expected_level != nodehdr.level)
1564 return -EFSCORRUPTED;
1568 max = nodehdr.count;
1569 blk->hashval = be32_to_cpu(btree[max - 1].hashval);
1572 * Binary search. (note: small blocks will skip loop)
1574 probe = span = max / 2;
1575 hashval = args->hashval;
1578 btreehashval = be32_to_cpu(btree[probe].hashval);
1579 if (btreehashval < hashval)
1581 else if (btreehashval > hashval)
1586 ASSERT((probe >= 0) && (probe < max));
1587 ASSERT((span <= 4) ||
1588 (be32_to_cpu(btree[probe].hashval) == hashval));
1591 * Since we may have duplicate hashval's, find the first
1592 * matching hashval in the node.
1595 be32_to_cpu(btree[probe].hashval) >= hashval) {
1598 while (probe < max &&
1599 be32_to_cpu(btree[probe].hashval) < hashval) {
1604 * Pick the right block to descend on.
1607 blk->index = max - 1;
1608 blkno = be32_to_cpu(btree[max - 1].before);
1611 blkno = be32_to_cpu(btree[probe].before);
1614 /* We can't point back to the root. */
1615 if (blkno == args->geo->leafblk)
1616 return -EFSCORRUPTED;
1619 if (expected_level != 0)
1620 return -EFSCORRUPTED;
1623 * A leaf block that ends in the hashval that we are interested in
1624 * (final hashval == search hashval) means that the next block may
1625 * contain more entries with the same hashval, shift upward to the
1626 * next leaf and keep searching.
1629 if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
1630 retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
1631 &blk->index, state);
1632 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1633 retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
1634 blk->index = args->index;
1635 args->blkno = blk->blkno;
1638 return -EFSCORRUPTED;
1640 if (((retval == -ENOENT) || (retval == -ENOATTR)) &&
1641 (blk->hashval == args->hashval)) {
1642 error = xfs_da3_path_shift(state, &state->path, 1, 1,
1648 } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
1649 /* path_shift() gives ENOENT */
1659 /*========================================================================
1661 *========================================================================*/
1664 * Compare two intermediate nodes for "order".
1668 struct xfs_inode *dp,
1669 struct xfs_buf *node1_bp,
1670 struct xfs_buf *node2_bp)
1672 struct xfs_da_intnode *node1;
1673 struct xfs_da_intnode *node2;
1674 struct xfs_da_node_entry *btree1;
1675 struct xfs_da_node_entry *btree2;
1676 struct xfs_da3_icnode_hdr node1hdr;
1677 struct xfs_da3_icnode_hdr node2hdr;
1679 node1 = node1_bp->b_addr;
1680 node2 = node2_bp->b_addr;
1681 dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
1682 dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
1683 btree1 = dp->d_ops->node_tree_p(node1);
1684 btree2 = dp->d_ops->node_tree_p(node2);
1686 if (node1hdr.count > 0 && node2hdr.count > 0 &&
1687 ((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
1688 (be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
1689 be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
1696 * Link a new block into a doubly linked list of blocks (of whatever type).
1700 struct xfs_da_state *state,
1701 struct xfs_da_state_blk *old_blk,
1702 struct xfs_da_state_blk *new_blk)
1704 struct xfs_da_blkinfo *old_info;
1705 struct xfs_da_blkinfo *new_info;
1706 struct xfs_da_blkinfo *tmp_info;
1707 struct xfs_da_args *args;
1711 struct xfs_inode *dp = state->args->dp;
1714 * Set up environment.
1717 ASSERT(args != NULL);
1718 old_info = old_blk->bp->b_addr;
1719 new_info = new_blk->bp->b_addr;
1720 ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
1721 old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1722 old_blk->magic == XFS_ATTR_LEAF_MAGIC);
1724 switch (old_blk->magic) {
1725 case XFS_ATTR_LEAF_MAGIC:
1726 before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
1728 case XFS_DIR2_LEAFN_MAGIC:
1729 before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
1731 case XFS_DA_NODE_MAGIC:
1732 before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
1737 * Link blocks in appropriate order.
1741 * Link new block in before existing block.
1743 trace_xfs_da_link_before(args);
1744 new_info->forw = cpu_to_be32(old_blk->blkno);
1745 new_info->back = old_info->back;
1746 if (old_info->back) {
1747 error = xfs_da3_node_read(args->trans, dp,
1748 be32_to_cpu(old_info->back),
1749 -1, &bp, args->whichfork);
1753 tmp_info = bp->b_addr;
1754 ASSERT(tmp_info->magic == old_info->magic);
1755 ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
1756 tmp_info->forw = cpu_to_be32(new_blk->blkno);
1757 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1759 old_info->back = cpu_to_be32(new_blk->blkno);
1762 * Link new block in after existing block.
1764 trace_xfs_da_link_after(args);
1765 new_info->forw = old_info->forw;
1766 new_info->back = cpu_to_be32(old_blk->blkno);
1767 if (old_info->forw) {
1768 error = xfs_da3_node_read(args->trans, dp,
1769 be32_to_cpu(old_info->forw),
1770 -1, &bp, args->whichfork);
1774 tmp_info = bp->b_addr;
1775 ASSERT(tmp_info->magic == old_info->magic);
1776 ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
1777 tmp_info->back = cpu_to_be32(new_blk->blkno);
1778 xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
1780 old_info->forw = cpu_to_be32(new_blk->blkno);
1783 xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
1784 xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
1789 * Unlink a block from a doubly linked list of blocks.
1791 STATIC int /* error */
1793 struct xfs_da_state *state,
1794 struct xfs_da_state_blk *drop_blk,
1795 struct xfs_da_state_blk *save_blk)
1797 struct xfs_da_blkinfo *drop_info;
1798 struct xfs_da_blkinfo *save_info;
1799 struct xfs_da_blkinfo *tmp_info;
1800 struct xfs_da_args *args;
1805 * Set up environment.
1808 ASSERT(args != NULL);
1809 save_info = save_blk->bp->b_addr;
1810 drop_info = drop_blk->bp->b_addr;
1811 ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
1812 save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
1813 save_blk->magic == XFS_ATTR_LEAF_MAGIC);
1814 ASSERT(save_blk->magic == drop_blk->magic);
1815 ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
1816 (be32_to_cpu(save_info->back) == drop_blk->blkno));
1817 ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
1818 (be32_to_cpu(drop_info->back) == save_blk->blkno));
1821 * Unlink the leaf block from the doubly linked chain of leaves.
1823 if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
1824 trace_xfs_da_unlink_back(args);
1825 save_info->back = drop_info->back;
1826 if (drop_info->back) {
1827 error = xfs_da3_node_read(args->trans, args->dp,
1828 be32_to_cpu(drop_info->back),
1829 -1, &bp, args->whichfork);
1833 tmp_info = bp->b_addr;
1834 ASSERT(tmp_info->magic == save_info->magic);
1835 ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
1836 tmp_info->forw = cpu_to_be32(save_blk->blkno);
1837 xfs_trans_log_buf(args->trans, bp, 0,
1838 sizeof(*tmp_info) - 1);
1841 trace_xfs_da_unlink_forward(args);
1842 save_info->forw = drop_info->forw;
1843 if (drop_info->forw) {
1844 error = xfs_da3_node_read(args->trans, args->dp,
1845 be32_to_cpu(drop_info->forw),
1846 -1, &bp, args->whichfork);
1850 tmp_info = bp->b_addr;
1851 ASSERT(tmp_info->magic == save_info->magic);
1852 ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
1853 tmp_info->back = cpu_to_be32(save_blk->blkno);
1854 xfs_trans_log_buf(args->trans, bp, 0,
1855 sizeof(*tmp_info) - 1);
1859 xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
1864 * Move a path "forward" or "!forward" one block at the current level.
1866 * This routine will adjust a "path" to point to the next block
1867 * "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
1868 * Btree, including updating pointers to the intermediate nodes between
1869 * the new bottom and the root.
1873 struct xfs_da_state *state,
1874 struct xfs_da_state_path *path,
1879 struct xfs_da_state_blk *blk;
1880 struct xfs_da_blkinfo *info;
1881 struct xfs_da_intnode *node;
1882 struct xfs_da_args *args;
1883 struct xfs_da_node_entry *btree;
1884 struct xfs_da3_icnode_hdr nodehdr;
1886 xfs_dablk_t blkno = 0;
1889 struct xfs_inode *dp = state->args->dp;
1891 trace_xfs_da_path_shift(state->args);
1894 * Roll up the Btree looking for the first block where our
1895 * current index is not at the edge of the block. Note that
1896 * we skip the bottom layer because we want the sibling block.
1899 ASSERT(args != NULL);
1900 ASSERT(path != NULL);
1901 ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
1902 level = (path->active-1) - 1; /* skip bottom layer in path */
1903 for (blk = &path->blk[level]; level >= 0; blk--, level--) {
1904 node = blk->bp->b_addr;
1905 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1906 btree = dp->d_ops->node_tree_p(node);
1908 if (forward && (blk->index < nodehdr.count - 1)) {
1910 blkno = be32_to_cpu(btree[blk->index].before);
1912 } else if (!forward && (blk->index > 0)) {
1914 blkno = be32_to_cpu(btree[blk->index].before);
1919 *result = -ENOENT; /* we're out of our tree */
1920 ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
1925 * Roll down the edge of the subtree until we reach the
1926 * same depth we were at originally.
1928 for (blk++, level++; level < path->active; blk++, level++) {
1930 * Read the next child block into a local buffer.
1932 error = xfs_da3_node_read(args->trans, dp, blkno, -1, &bp,
1938 * Release the old block (if it's dirty, the trans doesn't
1939 * actually let go) and swap the local buffer into the path
1940 * structure. This ensures failure of the above read doesn't set
1941 * a NULL buffer in an active slot in the path.
1944 xfs_trans_brelse(args->trans, blk->bp);
1948 info = blk->bp->b_addr;
1949 ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
1950 info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
1951 info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
1952 info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
1953 info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
1954 info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
1958 * Note: we flatten the magic number to a single type so we
1959 * don't have to compare against crc/non-crc types elsewhere.
1961 switch (be16_to_cpu(info->magic)) {
1962 case XFS_DA_NODE_MAGIC:
1963 case XFS_DA3_NODE_MAGIC:
1964 blk->magic = XFS_DA_NODE_MAGIC;
1965 node = (xfs_da_intnode_t *)info;
1966 dp->d_ops->node_hdr_from_disk(&nodehdr, node);
1967 btree = dp->d_ops->node_tree_p(node);
1968 blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
1972 blk->index = nodehdr.count - 1;
1973 blkno = be32_to_cpu(btree[blk->index].before);
1975 case XFS_ATTR_LEAF_MAGIC:
1976 case XFS_ATTR3_LEAF_MAGIC:
1977 blk->magic = XFS_ATTR_LEAF_MAGIC;
1978 ASSERT(level == path->active-1);
1980 blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
1982 case XFS_DIR2_LEAFN_MAGIC:
1983 case XFS_DIR3_LEAFN_MAGIC:
1984 blk->magic = XFS_DIR2_LEAFN_MAGIC;
1985 ASSERT(level == path->active-1);
1987 blk->hashval = xfs_dir2_leaf_lasthash(args->dp,
2000 /*========================================================================
2002 *========================================================================*/
2005 * Implement a simple hash on a character string.
2006 * Rotate the hash value by 7 bits, then XOR each character in.
2007 * This is implemented with some source-level loop unrolling.
2010 xfs_da_hashname(const uint8_t *name, int namelen)
2015 * Do four characters at a time as long as we can.
2017 for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
2018 hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
2019 (name[3] << 0) ^ rol32(hash, 7 * 4);
2022 * Now do the rest of the characters.
2026 return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
2029 return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
2031 return (name[0] << 0) ^ rol32(hash, 7 * 1);
2032 default: /* case 0: */
2039 struct xfs_da_args *args,
2040 const unsigned char *name,
2043 return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
2044 XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
2048 xfs_default_hashname(
2049 struct xfs_name *name)
2051 return xfs_da_hashname(name->name, name->len);
2054 const struct xfs_nameops xfs_default_nameops = {
2055 .hashname = xfs_default_hashname,
2056 .compname = xfs_da_compname
2060 xfs_da_grow_inode_int(
2061 struct xfs_da_args *args,
2065 struct xfs_trans *tp = args->trans;
2066 struct xfs_inode *dp = args->dp;
2067 int w = args->whichfork;
2068 xfs_rfsblock_t nblks = dp->i_d.di_nblocks;
2069 struct xfs_bmbt_irec map, *mapp;
2070 int nmap, error, got, i, mapi;
2073 * Find a spot in the file space to put the new block.
2075 error = xfs_bmap_first_unused(tp, dp, count, bno, w);
2080 * Try mapping it in one filesystem block.
2083 error = xfs_bmapi_write(tp, dp, *bno, count,
2084 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
2085 args->total, &map, &nmap);
2093 } else if (nmap == 0 && count > 1) {
2098 * If we didn't get it and the block might work if fragmented,
2099 * try without the CONTIG flag. Loop until we get it all.
2101 mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
2102 for (b = *bno, mapi = 0; b < *bno + count; ) {
2103 nmap = min(XFS_BMAP_MAX_NMAP, count);
2104 c = (int)(*bno + count - b);
2105 error = xfs_bmapi_write(tp, dp, b, c,
2106 xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
2107 args->total, &mapp[mapi], &nmap);
2113 b = mapp[mapi - 1].br_startoff +
2114 mapp[mapi - 1].br_blockcount;
2122 * Count the blocks we got, make sure it matches the total.
2124 for (i = 0, got = 0; i < mapi; i++)
2125 got += mapp[i].br_blockcount;
2126 if (got != count || mapp[0].br_startoff != *bno ||
2127 mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
2133 /* account for newly allocated blocks in reserved blocks total */
2134 args->total -= dp->i_d.di_nblocks - nblks;
2143 * Add a block to the btree ahead of the file.
2144 * Return the new block number to the caller.
2148 struct xfs_da_args *args,
2149 xfs_dablk_t *new_blkno)
2154 trace_xfs_da_grow_inode(args);
2156 bno = args->geo->leafblk;
2157 error = xfs_da_grow_inode_int(args, &bno, args->geo->fsbcount);
2159 *new_blkno = (xfs_dablk_t)bno;
2164 * Ick. We need to always be able to remove a btree block, even
2165 * if there's no space reservation because the filesystem is full.
2166 * This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
2167 * It swaps the target block with the last block in the file. The
2168 * last block in the file can always be removed since it can't cause
2169 * a bmap btree split to do that.
2172 xfs_da3_swap_lastblock(
2173 struct xfs_da_args *args,
2174 xfs_dablk_t *dead_blknop,
2175 struct xfs_buf **dead_bufp)
2177 struct xfs_da_blkinfo *dead_info;
2178 struct xfs_da_blkinfo *sib_info;
2179 struct xfs_da_intnode *par_node;
2180 struct xfs_da_intnode *dead_node;
2181 struct xfs_dir2_leaf *dead_leaf2;
2182 struct xfs_da_node_entry *btree;
2183 struct xfs_da3_icnode_hdr par_hdr;
2184 struct xfs_inode *dp;
2185 struct xfs_trans *tp;
2186 struct xfs_mount *mp;
2187 struct xfs_buf *dead_buf;
2188 struct xfs_buf *last_buf;
2189 struct xfs_buf *sib_buf;
2190 struct xfs_buf *par_buf;
2191 xfs_dahash_t dead_hash;
2192 xfs_fileoff_t lastoff;
2193 xfs_dablk_t dead_blkno;
2194 xfs_dablk_t last_blkno;
2195 xfs_dablk_t sib_blkno;
2196 xfs_dablk_t par_blkno;
2203 trace_xfs_da_swap_lastblock(args);
2205 dead_buf = *dead_bufp;
2206 dead_blkno = *dead_blknop;
2209 w = args->whichfork;
2210 ASSERT(w == XFS_DATA_FORK);
2212 lastoff = args->geo->freeblk;
2213 error = xfs_bmap_last_before(tp, dp, &lastoff, w);
2216 if (unlikely(lastoff == 0)) {
2217 XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
2219 return -EFSCORRUPTED;
2222 * Read the last block in the btree space.
2224 last_blkno = (xfs_dablk_t)lastoff - args->geo->fsbcount;
2225 error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
2229 * Copy the last block into the dead buffer and log it.
2231 memcpy(dead_buf->b_addr, last_buf->b_addr, args->geo->blksize);
2232 xfs_trans_log_buf(tp, dead_buf, 0, args->geo->blksize - 1);
2233 dead_info = dead_buf->b_addr;
2235 * Get values from the moved block.
2237 if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
2238 dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
2239 struct xfs_dir3_icleaf_hdr leafhdr;
2240 struct xfs_dir2_leaf_entry *ents;
2242 dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
2243 dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
2244 ents = dp->d_ops->leaf_ents_p(dead_leaf2);
2246 dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
2248 struct xfs_da3_icnode_hdr deadhdr;
2250 dead_node = (xfs_da_intnode_t *)dead_info;
2251 dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
2252 btree = dp->d_ops->node_tree_p(dead_node);
2253 dead_level = deadhdr.level;
2254 dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
2256 sib_buf = par_buf = NULL;
2258 * If the moved block has a left sibling, fix up the pointers.
2260 if ((sib_blkno = be32_to_cpu(dead_info->back))) {
2261 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2264 sib_info = sib_buf->b_addr;
2266 be32_to_cpu(sib_info->forw) != last_blkno ||
2267 sib_info->magic != dead_info->magic)) {
2268 XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
2269 XFS_ERRLEVEL_LOW, mp);
2270 error = -EFSCORRUPTED;
2273 sib_info->forw = cpu_to_be32(dead_blkno);
2274 xfs_trans_log_buf(tp, sib_buf,
2275 XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
2276 sizeof(sib_info->forw)));
2280 * If the moved block has a right sibling, fix up the pointers.
2282 if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
2283 error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
2286 sib_info = sib_buf->b_addr;
2288 be32_to_cpu(sib_info->back) != last_blkno ||
2289 sib_info->magic != dead_info->magic)) {
2290 XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
2291 XFS_ERRLEVEL_LOW, mp);
2292 error = -EFSCORRUPTED;
2295 sib_info->back = cpu_to_be32(dead_blkno);
2296 xfs_trans_log_buf(tp, sib_buf,
2297 XFS_DA_LOGRANGE(sib_info, &sib_info->back,
2298 sizeof(sib_info->back)));
2301 par_blkno = args->geo->leafblk;
2304 * Walk down the tree looking for the parent of the moved block.
2307 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2310 par_node = par_buf->b_addr;
2311 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2312 if (level >= 0 && level != par_hdr.level + 1) {
2313 XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
2314 XFS_ERRLEVEL_LOW, mp);
2315 error = -EFSCORRUPTED;
2318 level = par_hdr.level;
2319 btree = dp->d_ops->node_tree_p(par_node);
2321 entno < par_hdr.count &&
2322 be32_to_cpu(btree[entno].hashval) < dead_hash;
2325 if (entno == par_hdr.count) {
2326 XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
2327 XFS_ERRLEVEL_LOW, mp);
2328 error = -EFSCORRUPTED;
2331 par_blkno = be32_to_cpu(btree[entno].before);
2332 if (level == dead_level + 1)
2334 xfs_trans_brelse(tp, par_buf);
2338 * We're in the right parent block.
2339 * Look for the right entry.
2343 entno < par_hdr.count &&
2344 be32_to_cpu(btree[entno].before) != last_blkno;
2347 if (entno < par_hdr.count)
2349 par_blkno = par_hdr.forw;
2350 xfs_trans_brelse(tp, par_buf);
2352 if (unlikely(par_blkno == 0)) {
2353 XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
2354 XFS_ERRLEVEL_LOW, mp);
2355 error = -EFSCORRUPTED;
2358 error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
2361 par_node = par_buf->b_addr;
2362 dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
2363 if (par_hdr.level != level) {
2364 XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
2365 XFS_ERRLEVEL_LOW, mp);
2366 error = -EFSCORRUPTED;
2369 btree = dp->d_ops->node_tree_p(par_node);
2373 * Update the parent entry pointing to the moved block.
2375 btree[entno].before = cpu_to_be32(dead_blkno);
2376 xfs_trans_log_buf(tp, par_buf,
2377 XFS_DA_LOGRANGE(par_node, &btree[entno].before,
2378 sizeof(btree[entno].before)));
2379 *dead_blknop = last_blkno;
2380 *dead_bufp = last_buf;
2384 xfs_trans_brelse(tp, par_buf);
2386 xfs_trans_brelse(tp, sib_buf);
2387 xfs_trans_brelse(tp, last_buf);
2392 * Remove a btree block from a directory or attribute.
2395 xfs_da_shrink_inode(
2396 struct xfs_da_args *args,
2397 xfs_dablk_t dead_blkno,
2398 struct xfs_buf *dead_buf)
2400 struct xfs_inode *dp;
2401 int done, error, w, count;
2402 struct xfs_trans *tp;
2404 trace_xfs_da_shrink_inode(args);
2407 w = args->whichfork;
2409 count = args->geo->fsbcount;
2412 * Remove extents. If we get ENOSPC for a dir we have to move
2413 * the last block to the place we want to kill.
2415 error = xfs_bunmapi(tp, dp, dead_blkno, count,
2416 xfs_bmapi_aflag(w), 0, &done);
2417 if (error == -ENOSPC) {
2418 if (w != XFS_DATA_FORK)
2420 error = xfs_da3_swap_lastblock(args, &dead_blkno,
2428 xfs_trans_binval(tp, dead_buf);
2433 * See if the mapping(s) for this btree block are valid, i.e.
2434 * don't contain holes, are logically contiguous, and cover the whole range.
2437 xfs_da_map_covers_blocks(
2439 xfs_bmbt_irec_t *mapp,
2446 for (i = 0, off = bno; i < nmap; i++) {
2447 if (mapp[i].br_startblock == HOLESTARTBLOCK ||
2448 mapp[i].br_startblock == DELAYSTARTBLOCK) {
2451 if (off != mapp[i].br_startoff) {
2454 off += mapp[i].br_blockcount;
2456 return off == bno + count;
2460 * Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
2462 * For the single map case, it is assumed that the caller has provided a pointer
2463 * to a valid xfs_buf_map. For the multiple map case, this function will
2464 * allocate the xfs_buf_map to hold all the maps and replace the caller's single
2465 * map pointer with the allocated map.
2468 xfs_buf_map_from_irec(
2469 struct xfs_mount *mp,
2470 struct xfs_buf_map **mapp,
2472 struct xfs_bmbt_irec *irecs,
2475 struct xfs_buf_map *map;
2478 ASSERT(*nmaps == 1);
2479 ASSERT(nirecs >= 1);
2482 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2483 KM_SLEEP | KM_NOFS);
2491 for (i = 0; i < *nmaps; i++) {
2492 ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
2493 irecs[i].br_startblock != HOLESTARTBLOCK);
2494 map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
2495 map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
2501 * Map the block we are given ready for reading. There are three possible return
2503 * -1 - will be returned if we land in a hole and mappedbno == -2 so the
2504 * caller knows not to execute a subsequent read.
2505 * 0 - if we mapped the block successfully
2506 * >0 - positive error number if there was an error.
2510 struct xfs_inode *dp,
2512 xfs_daddr_t mappedbno,
2514 struct xfs_buf_map **map,
2517 struct xfs_mount *mp = dp->i_mount;
2520 struct xfs_bmbt_irec irec;
2521 struct xfs_bmbt_irec *irecs = &irec;
2524 ASSERT(map && *map);
2525 ASSERT(*nmaps == 1);
2527 if (whichfork == XFS_DATA_FORK)
2528 nfsb = mp->m_dir_geo->fsbcount;
2530 nfsb = mp->m_attr_geo->fsbcount;
2533 * Caller doesn't have a mapping. -2 means don't complain
2534 * if we land in a hole.
2536 if (mappedbno == -1 || mappedbno == -2) {
2538 * Optimize the one-block case.
2541 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2542 KM_SLEEP | KM_NOFS);
2545 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
2546 &nirecs, xfs_bmapi_aflag(whichfork));
2550 irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
2551 irecs->br_startoff = (xfs_fileoff_t)bno;
2552 irecs->br_blockcount = nfsb;
2553 irecs->br_state = 0;
2557 if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
2558 error = mappedbno == -2 ? -1 : -EFSCORRUPTED;
2559 if (unlikely(error == -EFSCORRUPTED)) {
2560 if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
2562 xfs_alert(mp, "%s: bno %lld dir: inode %lld",
2563 __func__, (long long)bno,
2564 (long long)dp->i_ino);
2565 for (i = 0; i < *nmaps; i++) {
2567 "[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
2569 (long long)irecs[i].br_startoff,
2570 (long long)irecs[i].br_startblock,
2571 (long long)irecs[i].br_blockcount,
2575 XFS_ERROR_REPORT("xfs_da_do_buf(1)",
2576 XFS_ERRLEVEL_LOW, mp);
2580 error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
2588 * Get a buffer for the dir/attr block.
2592 struct xfs_trans *trans,
2593 struct xfs_inode *dp,
2595 xfs_daddr_t mappedbno,
2596 struct xfs_buf **bpp,
2600 struct xfs_buf_map map;
2601 struct xfs_buf_map *mapp;
2608 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2611 /* mapping a hole is not an error, but we don't continue */
2617 bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
2619 error = bp ? bp->b_error : -EIO;
2622 xfs_trans_brelse(trans, bp);
2636 * Get a buffer for the dir/attr block, fill in the contents.
2640 struct xfs_trans *trans,
2641 struct xfs_inode *dp,
2643 xfs_daddr_t mappedbno,
2644 struct xfs_buf **bpp,
2646 const struct xfs_buf_ops *ops)
2649 struct xfs_buf_map map;
2650 struct xfs_buf_map *mapp;
2657 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2660 /* mapping a hole is not an error, but we don't continue */
2666 error = xfs_trans_read_buf_map(dp->i_mount, trans,
2667 dp->i_mount->m_ddev_targp,
2668 mapp, nmap, 0, &bp, ops);
2672 if (whichfork == XFS_ATTR_FORK)
2673 xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
2675 xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
2685 * Readahead the dir/attr block.
2689 struct xfs_inode *dp,
2691 xfs_daddr_t mappedbno,
2693 const struct xfs_buf_ops *ops)
2695 struct xfs_buf_map map;
2696 struct xfs_buf_map *mapp;
2702 error = xfs_dabuf_map(dp, bno, mappedbno, whichfork,
2705 /* mapping a hole is not an error, but we don't continue */
2711 mappedbno = mapp[0].bm_bn;
2712 xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);