1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
15 #include "print-tree.h"
17 #include "compression.h"
19 #include "inode-map.h"
21 /* magic values for the inode_only field in btrfs_log_inode:
23 * LOG_INODE_ALL means to log everything
24 * LOG_INODE_EXISTS means to log just enough to recreate the inode
27 #define LOG_INODE_ALL 0
28 #define LOG_INODE_EXISTS 1
29 #define LOG_OTHER_INODE 2
32 * directory trouble cases
34 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
35 * log, we must force a full commit before doing an fsync of the directory
36 * where the unlink was done.
37 * ---> record transid of last unlink/rename per directory
41 * rename foo/some_dir foo2/some_dir
43 * fsync foo/some_dir/some_file
45 * The fsync above will unlink the original some_dir without recording
46 * it in its new location (foo2). After a crash, some_dir will be gone
47 * unless the fsync of some_file forces a full commit
49 * 2) we must log any new names for any file or dir that is in the fsync
50 * log. ---> check inode while renaming/linking.
52 * 2a) we must log any new names for any file or dir during rename
53 * when the directory they are being removed from was logged.
54 * ---> check inode and old parent dir during rename
56 * 2a is actually the more important variant. With the extra logging
57 * a crash might unlink the old name without recreating the new one
59 * 3) after a crash, we must go through any directories with a link count
60 * of zero and redo the rm -rf
67 * The directory f1 was fully removed from the FS, but fsync was never
68 * called on f1, only its parent dir. After a crash the rm -rf must
69 * be replayed. This must be able to recurse down the entire
70 * directory tree. The inode link count fixup code takes care of the
75 * stages for the tree walking. The first
76 * stage (0) is to only pin down the blocks we find
77 * the second stage (1) is to make sure that all the inodes
78 * we find in the log are created in the subvolume.
80 * The last stage is to deal with directories and links and extents
81 * and all the other fun semantics
83 #define LOG_WALK_PIN_ONLY 0
84 #define LOG_WALK_REPLAY_INODES 1
85 #define LOG_WALK_REPLAY_DIR_INDEX 2
86 #define LOG_WALK_REPLAY_ALL 3
88 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root, struct btrfs_inode *inode,
93 struct btrfs_log_ctx *ctx);
94 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
95 struct btrfs_root *root,
96 struct btrfs_path *path, u64 objectid);
97 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root,
99 struct btrfs_root *log,
100 struct btrfs_path *path,
101 u64 dirid, int del_all);
104 * tree logging is a special write ahead log used to make sure that
105 * fsyncs and O_SYNCs can happen without doing full tree commits.
107 * Full tree commits are expensive because they require commonly
108 * modified blocks to be recowed, creating many dirty pages in the
109 * extent tree an 4x-6x higher write load than ext3.
111 * Instead of doing a tree commit on every fsync, we use the
112 * key ranges and transaction ids to find items for a given file or directory
113 * that have changed in this transaction. Those items are copied into
114 * a special tree (one per subvolume root), that tree is written to disk
115 * and then the fsync is considered complete.
117 * After a crash, items are copied out of the log-tree back into the
118 * subvolume tree. Any file data extents found are recorded in the extent
119 * allocation tree, and the log-tree freed.
121 * The log tree is read three times, once to pin down all the extents it is
122 * using in ram and once, once to create all the inodes logged in the tree
123 * and once to do all the other items.
127 * start a sub transaction and setup the log tree
128 * this increments the log tree writer count to make the people
129 * syncing the tree wait for us to finish
131 static int start_log_trans(struct btrfs_trans_handle *trans,
132 struct btrfs_root *root,
133 struct btrfs_log_ctx *ctx)
135 struct btrfs_fs_info *fs_info = root->fs_info;
138 mutex_lock(&root->log_mutex);
140 if (root->log_root) {
141 if (btrfs_need_log_full_commit(fs_info, trans)) {
146 if (!root->log_start_pid) {
147 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
148 root->log_start_pid = current->pid;
149 } else if (root->log_start_pid != current->pid) {
150 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
153 mutex_lock(&fs_info->tree_log_mutex);
154 if (!fs_info->log_root_tree)
155 ret = btrfs_init_log_root_tree(trans, fs_info);
156 mutex_unlock(&fs_info->tree_log_mutex);
160 ret = btrfs_add_log_tree(trans, root);
164 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
165 root->log_start_pid = current->pid;
168 atomic_inc(&root->log_batch);
169 atomic_inc(&root->log_writers);
171 int index = root->log_transid % 2;
172 list_add_tail(&ctx->list, &root->log_ctxs[index]);
173 ctx->log_transid = root->log_transid;
177 mutex_unlock(&root->log_mutex);
182 * returns 0 if there was a log transaction running and we were able
183 * to join, or returns -ENOENT if there were not transactions
186 static int join_running_log_trans(struct btrfs_root *root)
194 mutex_lock(&root->log_mutex);
195 if (root->log_root) {
197 atomic_inc(&root->log_writers);
199 mutex_unlock(&root->log_mutex);
204 * This either makes the current running log transaction wait
205 * until you call btrfs_end_log_trans() or it makes any future
206 * log transactions wait until you call btrfs_end_log_trans()
208 void btrfs_pin_log_trans(struct btrfs_root *root)
210 mutex_lock(&root->log_mutex);
211 atomic_inc(&root->log_writers);
212 mutex_unlock(&root->log_mutex);
216 * indicate we're done making changes to the log tree
217 * and wake up anyone waiting to do a sync
219 void btrfs_end_log_trans(struct btrfs_root *root)
221 if (atomic_dec_and_test(&root->log_writers)) {
222 /* atomic_dec_and_test implies a barrier */
223 cond_wake_up_nomb(&root->log_writer_wait);
229 * the walk control struct is used to pass state down the chain when
230 * processing the log tree. The stage field tells us which part
231 * of the log tree processing we are currently doing. The others
232 * are state fields used for that specific part
234 struct walk_control {
235 /* should we free the extent on disk when done? This is used
236 * at transaction commit time while freeing a log tree
240 /* should we write out the extent buffer? This is used
241 * while flushing the log tree to disk during a sync
245 /* should we wait for the extent buffer io to finish? Also used
246 * while flushing the log tree to disk for a sync
250 /* pin only walk, we record which extents on disk belong to the
255 /* what stage of the replay code we're currently in */
259 * Ignore any items from the inode currently being processed. Needs
260 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in
261 * the LOG_WALK_REPLAY_INODES stage.
263 bool ignore_cur_inode;
265 /* the root we are currently replaying */
266 struct btrfs_root *replay_dest;
268 /* the trans handle for the current replay */
269 struct btrfs_trans_handle *trans;
271 /* the function that gets used to process blocks we find in the
272 * tree. Note the extent_buffer might not be up to date when it is
273 * passed in, and it must be checked or read if you need the data
276 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
277 struct walk_control *wc, u64 gen, int level);
281 * process_func used to pin down extents, write them or wait on them
283 static int process_one_buffer(struct btrfs_root *log,
284 struct extent_buffer *eb,
285 struct walk_control *wc, u64 gen, int level)
287 struct btrfs_fs_info *fs_info = log->fs_info;
291 * If this fs is mixed then we need to be able to process the leaves to
292 * pin down any logged extents, so we have to read the block.
294 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
295 ret = btrfs_read_buffer(eb, gen, level, NULL);
301 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
304 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
305 if (wc->pin && btrfs_header_level(eb) == 0)
306 ret = btrfs_exclude_logged_extents(fs_info, eb);
308 btrfs_write_tree_block(eb);
310 btrfs_wait_tree_block_writeback(eb);
316 * Item overwrite used by replay and tree logging. eb, slot and key all refer
317 * to the src data we are copying out.
319 * root is the tree we are copying into, and path is a scratch
320 * path for use in this function (it should be released on entry and
321 * will be released on exit).
323 * If the key is already in the destination tree the existing item is
324 * overwritten. If the existing item isn't big enough, it is extended.
325 * If it is too large, it is truncated.
327 * If the key isn't in the destination yet, a new item is inserted.
329 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
330 struct btrfs_root *root,
331 struct btrfs_path *path,
332 struct extent_buffer *eb, int slot,
333 struct btrfs_key *key)
335 struct btrfs_fs_info *fs_info = root->fs_info;
338 u64 saved_i_size = 0;
339 int save_old_i_size = 0;
340 unsigned long src_ptr;
341 unsigned long dst_ptr;
342 int overwrite_root = 0;
343 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
345 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
348 item_size = btrfs_item_size_nr(eb, slot);
349 src_ptr = btrfs_item_ptr_offset(eb, slot);
351 /* look for the key in the destination tree */
352 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
359 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
361 if (dst_size != item_size)
364 if (item_size == 0) {
365 btrfs_release_path(path);
368 dst_copy = kmalloc(item_size, GFP_NOFS);
369 src_copy = kmalloc(item_size, GFP_NOFS);
370 if (!dst_copy || !src_copy) {
371 btrfs_release_path(path);
377 read_extent_buffer(eb, src_copy, src_ptr, item_size);
379 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
380 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
382 ret = memcmp(dst_copy, src_copy, item_size);
387 * they have the same contents, just return, this saves
388 * us from cowing blocks in the destination tree and doing
389 * extra writes that may not have been done by a previous
393 btrfs_release_path(path);
398 * We need to load the old nbytes into the inode so when we
399 * replay the extents we've logged we get the right nbytes.
402 struct btrfs_inode_item *item;
406 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
407 struct btrfs_inode_item);
408 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
409 item = btrfs_item_ptr(eb, slot,
410 struct btrfs_inode_item);
411 btrfs_set_inode_nbytes(eb, item, nbytes);
414 * If this is a directory we need to reset the i_size to
415 * 0 so that we can set it up properly when replaying
416 * the rest of the items in this log.
418 mode = btrfs_inode_mode(eb, item);
420 btrfs_set_inode_size(eb, item, 0);
422 } else if (inode_item) {
423 struct btrfs_inode_item *item;
427 * New inode, set nbytes to 0 so that the nbytes comes out
428 * properly when we replay the extents.
430 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
431 btrfs_set_inode_nbytes(eb, item, 0);
434 * If this is a directory we need to reset the i_size to 0 so
435 * that we can set it up properly when replaying the rest of
436 * the items in this log.
438 mode = btrfs_inode_mode(eb, item);
440 btrfs_set_inode_size(eb, item, 0);
443 btrfs_release_path(path);
444 /* try to insert the key into the destination tree */
445 path->skip_release_on_error = 1;
446 ret = btrfs_insert_empty_item(trans, root, path,
448 path->skip_release_on_error = 0;
450 /* make sure any existing item is the correct size */
451 if (ret == -EEXIST || ret == -EOVERFLOW) {
453 found_size = btrfs_item_size_nr(path->nodes[0],
455 if (found_size > item_size)
456 btrfs_truncate_item(fs_info, path, item_size, 1);
457 else if (found_size < item_size)
458 btrfs_extend_item(fs_info, path,
459 item_size - found_size);
463 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
466 /* don't overwrite an existing inode if the generation number
467 * was logged as zero. This is done when the tree logging code
468 * is just logging an inode to make sure it exists after recovery.
470 * Also, don't overwrite i_size on directories during replay.
471 * log replay inserts and removes directory items based on the
472 * state of the tree found in the subvolume, and i_size is modified
475 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
476 struct btrfs_inode_item *src_item;
477 struct btrfs_inode_item *dst_item;
479 src_item = (struct btrfs_inode_item *)src_ptr;
480 dst_item = (struct btrfs_inode_item *)dst_ptr;
482 if (btrfs_inode_generation(eb, src_item) == 0) {
483 struct extent_buffer *dst_eb = path->nodes[0];
484 const u64 ino_size = btrfs_inode_size(eb, src_item);
487 * For regular files an ino_size == 0 is used only when
488 * logging that an inode exists, as part of a directory
489 * fsync, and the inode wasn't fsynced before. In this
490 * case don't set the size of the inode in the fs/subvol
491 * tree, otherwise we would be throwing valid data away.
493 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
494 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
496 struct btrfs_map_token token;
498 btrfs_init_map_token(&token);
499 btrfs_set_token_inode_size(dst_eb, dst_item,
505 if (overwrite_root &&
506 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
507 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
509 saved_i_size = btrfs_inode_size(path->nodes[0],
514 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
517 if (save_old_i_size) {
518 struct btrfs_inode_item *dst_item;
519 dst_item = (struct btrfs_inode_item *)dst_ptr;
520 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
523 /* make sure the generation is filled in */
524 if (key->type == BTRFS_INODE_ITEM_KEY) {
525 struct btrfs_inode_item *dst_item;
526 dst_item = (struct btrfs_inode_item *)dst_ptr;
527 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
528 btrfs_set_inode_generation(path->nodes[0], dst_item,
533 btrfs_mark_buffer_dirty(path->nodes[0]);
534 btrfs_release_path(path);
539 * simple helper to read an inode off the disk from a given root
540 * This can only be called for subvolume roots and not for the log
542 static noinline struct inode *read_one_inode(struct btrfs_root *root,
545 struct btrfs_key key;
548 key.objectid = objectid;
549 key.type = BTRFS_INODE_ITEM_KEY;
551 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
557 /* replays a single extent in 'eb' at 'slot' with 'key' into the
558 * subvolume 'root'. path is released on entry and should be released
561 * extents in the log tree have not been allocated out of the extent
562 * tree yet. So, this completes the allocation, taking a reference
563 * as required if the extent already exists or creating a new extent
564 * if it isn't in the extent allocation tree yet.
566 * The extent is inserted into the file, dropping any existing extents
567 * from the file that overlap the new one.
569 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
570 struct btrfs_root *root,
571 struct btrfs_path *path,
572 struct extent_buffer *eb, int slot,
573 struct btrfs_key *key)
575 struct btrfs_fs_info *fs_info = root->fs_info;
578 u64 start = key->offset;
580 struct btrfs_file_extent_item *item;
581 struct inode *inode = NULL;
585 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
586 found_type = btrfs_file_extent_type(eb, item);
588 if (found_type == BTRFS_FILE_EXTENT_REG ||
589 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
590 nbytes = btrfs_file_extent_num_bytes(eb, item);
591 extent_end = start + nbytes;
594 * We don't add to the inodes nbytes if we are prealloc or a
597 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
599 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
600 size = btrfs_file_extent_ram_bytes(eb, item);
601 nbytes = btrfs_file_extent_ram_bytes(eb, item);
602 extent_end = ALIGN(start + size,
603 fs_info->sectorsize);
609 inode = read_one_inode(root, key->objectid);
616 * first check to see if we already have this extent in the
617 * file. This must be done before the btrfs_drop_extents run
618 * so we don't try to drop this extent.
620 ret = btrfs_lookup_file_extent(trans, root, path,
621 btrfs_ino(BTRFS_I(inode)), start, 0);
624 (found_type == BTRFS_FILE_EXTENT_REG ||
625 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
626 struct btrfs_file_extent_item cmp1;
627 struct btrfs_file_extent_item cmp2;
628 struct btrfs_file_extent_item *existing;
629 struct extent_buffer *leaf;
631 leaf = path->nodes[0];
632 existing = btrfs_item_ptr(leaf, path->slots[0],
633 struct btrfs_file_extent_item);
635 read_extent_buffer(eb, &cmp1, (unsigned long)item,
637 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
641 * we already have a pointer to this exact extent,
642 * we don't have to do anything
644 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
645 btrfs_release_path(path);
649 btrfs_release_path(path);
651 /* drop any overlapping extents */
652 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
656 if (found_type == BTRFS_FILE_EXTENT_REG ||
657 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
659 unsigned long dest_offset;
660 struct btrfs_key ins;
662 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
663 btrfs_fs_incompat(fs_info, NO_HOLES))
666 ret = btrfs_insert_empty_item(trans, root, path, key,
670 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
672 copy_extent_buffer(path->nodes[0], eb, dest_offset,
673 (unsigned long)item, sizeof(*item));
675 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
676 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
677 ins.type = BTRFS_EXTENT_ITEM_KEY;
678 offset = key->offset - btrfs_file_extent_offset(eb, item);
681 * Manually record dirty extent, as here we did a shallow
682 * file extent item copy and skip normal backref update,
683 * but modifying extent tree all by ourselves.
684 * So need to manually record dirty extent for qgroup,
685 * as the owner of the file extent changed from log tree
686 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
688 ret = btrfs_qgroup_trace_extent(trans,
689 btrfs_file_extent_disk_bytenr(eb, item),
690 btrfs_file_extent_disk_num_bytes(eb, item),
695 if (ins.objectid > 0) {
698 LIST_HEAD(ordered_sums);
700 * is this extent already allocated in the extent
701 * allocation tree? If so, just add a reference
703 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
706 ret = btrfs_inc_extent_ref(trans, root,
707 ins.objectid, ins.offset,
708 0, root->root_key.objectid,
709 key->objectid, offset);
714 * insert the extent pointer in the extent
717 ret = btrfs_alloc_logged_file_extent(trans,
718 root->root_key.objectid,
719 key->objectid, offset, &ins);
723 btrfs_release_path(path);
725 if (btrfs_file_extent_compression(eb, item)) {
726 csum_start = ins.objectid;
727 csum_end = csum_start + ins.offset;
729 csum_start = ins.objectid +
730 btrfs_file_extent_offset(eb, item);
731 csum_end = csum_start +
732 btrfs_file_extent_num_bytes(eb, item);
735 ret = btrfs_lookup_csums_range(root->log_root,
736 csum_start, csum_end - 1,
741 * Now delete all existing cums in the csum root that
742 * cover our range. We do this because we can have an
743 * extent that is completely referenced by one file
744 * extent item and partially referenced by another
745 * file extent item (like after using the clone or
746 * extent_same ioctls). In this case if we end up doing
747 * the replay of the one that partially references the
748 * extent first, and we do not do the csum deletion
749 * below, we can get 2 csum items in the csum tree that
750 * overlap each other. For example, imagine our log has
751 * the two following file extent items:
753 * key (257 EXTENT_DATA 409600)
754 * extent data disk byte 12845056 nr 102400
755 * extent data offset 20480 nr 20480 ram 102400
757 * key (257 EXTENT_DATA 819200)
758 * extent data disk byte 12845056 nr 102400
759 * extent data offset 0 nr 102400 ram 102400
761 * Where the second one fully references the 100K extent
762 * that starts at disk byte 12845056, and the log tree
763 * has a single csum item that covers the entire range
766 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
768 * After the first file extent item is replayed, the
769 * csum tree gets the following csum item:
771 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
773 * Which covers the 20K sub-range starting at offset 20K
774 * of our extent. Now when we replay the second file
775 * extent item, if we do not delete existing csum items
776 * that cover any of its blocks, we end up getting two
777 * csum items in our csum tree that overlap each other:
779 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
780 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
782 * Which is a problem, because after this anyone trying
783 * to lookup up for the checksum of any block of our
784 * extent starting at an offset of 40K or higher, will
785 * end up looking at the second csum item only, which
786 * does not contain the checksum for any block starting
787 * at offset 40K or higher of our extent.
789 while (!list_empty(&ordered_sums)) {
790 struct btrfs_ordered_sum *sums;
791 sums = list_entry(ordered_sums.next,
792 struct btrfs_ordered_sum,
795 ret = btrfs_del_csums(trans, fs_info,
799 ret = btrfs_csum_file_blocks(trans,
800 fs_info->csum_root, sums);
801 list_del(&sums->list);
807 btrfs_release_path(path);
809 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
810 /* inline extents are easy, we just overwrite them */
811 ret = overwrite_item(trans, root, path, eb, slot, key);
816 inode_add_bytes(inode, nbytes);
818 ret = btrfs_update_inode(trans, root, inode);
826 * when cleaning up conflicts between the directory names in the
827 * subvolume, directory names in the log and directory names in the
828 * inode back references, we may have to unlink inodes from directories.
830 * This is a helper function to do the unlink of a specific directory
833 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
834 struct btrfs_root *root,
835 struct btrfs_path *path,
836 struct btrfs_inode *dir,
837 struct btrfs_dir_item *di)
842 struct extent_buffer *leaf;
843 struct btrfs_key location;
846 leaf = path->nodes[0];
848 btrfs_dir_item_key_to_cpu(leaf, di, &location);
849 name_len = btrfs_dir_name_len(leaf, di);
850 name = kmalloc(name_len, GFP_NOFS);
854 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
855 btrfs_release_path(path);
857 inode = read_one_inode(root, location.objectid);
863 ret = link_to_fixup_dir(trans, root, path, location.objectid);
867 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
872 ret = btrfs_run_delayed_items(trans);
880 * helper function to see if a given name and sequence number found
881 * in an inode back reference are already in a directory and correctly
882 * point to this inode
884 static noinline int inode_in_dir(struct btrfs_root *root,
885 struct btrfs_path *path,
886 u64 dirid, u64 objectid, u64 index,
887 const char *name, int name_len)
889 struct btrfs_dir_item *di;
890 struct btrfs_key location;
893 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
894 index, name, name_len, 0);
895 if (di && !IS_ERR(di)) {
896 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
897 if (location.objectid != objectid)
901 btrfs_release_path(path);
903 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
904 if (di && !IS_ERR(di)) {
905 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
906 if (location.objectid != objectid)
912 btrfs_release_path(path);
917 * helper function to check a log tree for a named back reference in
918 * an inode. This is used to decide if a back reference that is
919 * found in the subvolume conflicts with what we find in the log.
921 * inode backreferences may have multiple refs in a single item,
922 * during replay we process one reference at a time, and we don't
923 * want to delete valid links to a file from the subvolume if that
924 * link is also in the log.
926 static noinline int backref_in_log(struct btrfs_root *log,
927 struct btrfs_key *key,
929 const char *name, int namelen)
931 struct btrfs_path *path;
932 struct btrfs_inode_ref *ref;
934 unsigned long ptr_end;
935 unsigned long name_ptr;
941 path = btrfs_alloc_path();
945 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
949 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
951 if (key->type == BTRFS_INODE_EXTREF_KEY) {
952 if (btrfs_find_name_in_ext_backref(path->nodes[0],
955 name, namelen, NULL))
961 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
962 ptr_end = ptr + item_size;
963 while (ptr < ptr_end) {
964 ref = (struct btrfs_inode_ref *)ptr;
965 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
966 if (found_name_len == namelen) {
967 name_ptr = (unsigned long)(ref + 1);
968 ret = memcmp_extent_buffer(path->nodes[0], name,
975 ptr = (unsigned long)(ref + 1) + found_name_len;
978 btrfs_free_path(path);
982 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
983 struct btrfs_root *root,
984 struct btrfs_path *path,
985 struct btrfs_root *log_root,
986 struct btrfs_inode *dir,
987 struct btrfs_inode *inode,
988 u64 inode_objectid, u64 parent_objectid,
989 u64 ref_index, char *name, int namelen,
995 struct extent_buffer *leaf;
996 struct btrfs_dir_item *di;
997 struct btrfs_key search_key;
998 struct btrfs_inode_extref *extref;
1001 /* Search old style refs */
1002 search_key.objectid = inode_objectid;
1003 search_key.type = BTRFS_INODE_REF_KEY;
1004 search_key.offset = parent_objectid;
1005 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1007 struct btrfs_inode_ref *victim_ref;
1009 unsigned long ptr_end;
1011 leaf = path->nodes[0];
1013 /* are we trying to overwrite a back ref for the root directory
1014 * if so, just jump out, we're done
1016 if (search_key.objectid == search_key.offset)
1019 /* check all the names in this back reference to see
1020 * if they are in the log. if so, we allow them to stay
1021 * otherwise they must be unlinked as a conflict
1023 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1024 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1025 while (ptr < ptr_end) {
1026 victim_ref = (struct btrfs_inode_ref *)ptr;
1027 victim_name_len = btrfs_inode_ref_name_len(leaf,
1029 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1033 read_extent_buffer(leaf, victim_name,
1034 (unsigned long)(victim_ref + 1),
1037 if (!backref_in_log(log_root, &search_key,
1041 inc_nlink(&inode->vfs_inode);
1042 btrfs_release_path(path);
1044 ret = btrfs_unlink_inode(trans, root, dir, inode,
1045 victim_name, victim_name_len);
1049 ret = btrfs_run_delayed_items(trans);
1057 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1061 * NOTE: we have searched root tree and checked the
1062 * corresponding ref, it does not need to check again.
1066 btrfs_release_path(path);
1068 /* Same search but for extended refs */
1069 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1070 inode_objectid, parent_objectid, 0,
1072 if (!IS_ERR_OR_NULL(extref)) {
1076 struct inode *victim_parent;
1078 leaf = path->nodes[0];
1080 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1081 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1083 while (cur_offset < item_size) {
1084 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1086 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1088 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1091 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1094 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1097 search_key.objectid = inode_objectid;
1098 search_key.type = BTRFS_INODE_EXTREF_KEY;
1099 search_key.offset = btrfs_extref_hash(parent_objectid,
1103 if (!backref_in_log(log_root, &search_key,
1104 parent_objectid, victim_name,
1107 victim_parent = read_one_inode(root,
1109 if (victim_parent) {
1110 inc_nlink(&inode->vfs_inode);
1111 btrfs_release_path(path);
1113 ret = btrfs_unlink_inode(trans, root,
1114 BTRFS_I(victim_parent),
1119 ret = btrfs_run_delayed_items(
1122 iput(victim_parent);
1131 cur_offset += victim_name_len + sizeof(*extref);
1135 btrfs_release_path(path);
1137 /* look for a conflicting sequence number */
1138 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1139 ref_index, name, namelen, 0);
1140 if (di && !IS_ERR(di)) {
1141 ret = drop_one_dir_item(trans, root, path, dir, di);
1145 btrfs_release_path(path);
1147 /* look for a conflicting name */
1148 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1150 if (di && !IS_ERR(di)) {
1151 ret = drop_one_dir_item(trans, root, path, dir, di);
1155 btrfs_release_path(path);
1160 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1161 u32 *namelen, char **name, u64 *index,
1162 u64 *parent_objectid)
1164 struct btrfs_inode_extref *extref;
1166 extref = (struct btrfs_inode_extref *)ref_ptr;
1168 *namelen = btrfs_inode_extref_name_len(eb, extref);
1169 *name = kmalloc(*namelen, GFP_NOFS);
1173 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1177 *index = btrfs_inode_extref_index(eb, extref);
1178 if (parent_objectid)
1179 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1184 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1185 u32 *namelen, char **name, u64 *index)
1187 struct btrfs_inode_ref *ref;
1189 ref = (struct btrfs_inode_ref *)ref_ptr;
1191 *namelen = btrfs_inode_ref_name_len(eb, ref);
1192 *name = kmalloc(*namelen, GFP_NOFS);
1196 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1199 *index = btrfs_inode_ref_index(eb, ref);
1205 * Take an inode reference item from the log tree and iterate all names from the
1206 * inode reference item in the subvolume tree with the same key (if it exists).
1207 * For any name that is not in the inode reference item from the log tree, do a
1208 * proper unlink of that name (that is, remove its entry from the inode
1209 * reference item and both dir index keys).
1211 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1212 struct btrfs_root *root,
1213 struct btrfs_path *path,
1214 struct btrfs_inode *inode,
1215 struct extent_buffer *log_eb,
1217 struct btrfs_key *key)
1220 unsigned long ref_ptr;
1221 unsigned long ref_end;
1222 struct extent_buffer *eb;
1225 btrfs_release_path(path);
1226 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1234 eb = path->nodes[0];
1235 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1236 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1237 while (ref_ptr < ref_end) {
1242 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1243 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1246 parent_id = key->offset;
1247 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1253 if (key->type == BTRFS_INODE_EXTREF_KEY)
1254 ret = btrfs_find_name_in_ext_backref(log_eb, log_slot,
1258 ret = btrfs_find_name_in_backref(log_eb, log_slot, name,
1264 btrfs_release_path(path);
1265 dir = read_one_inode(root, parent_id);
1271 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1272 inode, name, namelen);
1282 if (key->type == BTRFS_INODE_EXTREF_KEY)
1283 ref_ptr += sizeof(struct btrfs_inode_extref);
1285 ref_ptr += sizeof(struct btrfs_inode_ref);
1289 btrfs_release_path(path);
1293 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir,
1294 const u8 ref_type, const char *name,
1297 struct btrfs_key key;
1298 struct btrfs_path *path;
1299 const u64 parent_id = btrfs_ino(BTRFS_I(dir));
1302 path = btrfs_alloc_path();
1306 key.objectid = btrfs_ino(BTRFS_I(inode));
1307 key.type = ref_type;
1308 if (key.type == BTRFS_INODE_REF_KEY)
1309 key.offset = parent_id;
1311 key.offset = btrfs_extref_hash(parent_id, name, namelen);
1313 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0);
1320 if (key.type == BTRFS_INODE_EXTREF_KEY)
1321 ret = btrfs_find_name_in_ext_backref(path->nodes[0],
1322 path->slots[0], parent_id,
1323 name, namelen, NULL);
1325 ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0],
1326 name, namelen, NULL);
1329 btrfs_free_path(path);
1333 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1334 struct inode *dir, struct inode *inode, const char *name,
1335 int namelen, u64 ref_index)
1337 struct btrfs_dir_item *dir_item;
1338 struct btrfs_key key;
1339 struct btrfs_path *path;
1340 struct inode *other_inode = NULL;
1343 path = btrfs_alloc_path();
1347 dir_item = btrfs_lookup_dir_item(NULL, root, path,
1348 btrfs_ino(BTRFS_I(dir)),
1351 btrfs_release_path(path);
1353 } else if (IS_ERR(dir_item)) {
1354 ret = PTR_ERR(dir_item);
1359 * Our inode's dentry collides with the dentry of another inode which is
1360 * in the log but not yet processed since it has a higher inode number.
1361 * So delete that other dentry.
1363 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key);
1364 btrfs_release_path(path);
1365 other_inode = read_one_inode(root, key.objectid);
1370 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode),
1375 * If we dropped the link count to 0, bump it so that later the iput()
1376 * on the inode will not free it. We will fixup the link count later.
1378 if (other_inode->i_nlink == 0)
1379 inc_nlink(other_inode);
1381 ret = btrfs_run_delayed_items(trans);
1385 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode),
1386 name, namelen, 0, ref_index);
1389 btrfs_free_path(path);
1395 * replay one inode back reference item found in the log tree.
1396 * eb, slot and key refer to the buffer and key found in the log tree.
1397 * root is the destination we are replaying into, and path is for temp
1398 * use by this function. (it should be released on return).
1400 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1401 struct btrfs_root *root,
1402 struct btrfs_root *log,
1403 struct btrfs_path *path,
1404 struct extent_buffer *eb, int slot,
1405 struct btrfs_key *key)
1407 struct inode *dir = NULL;
1408 struct inode *inode = NULL;
1409 unsigned long ref_ptr;
1410 unsigned long ref_end;
1414 int search_done = 0;
1415 int log_ref_ver = 0;
1416 u64 parent_objectid;
1419 int ref_struct_size;
1421 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1422 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1424 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1425 struct btrfs_inode_extref *r;
1427 ref_struct_size = sizeof(struct btrfs_inode_extref);
1429 r = (struct btrfs_inode_extref *)ref_ptr;
1430 parent_objectid = btrfs_inode_extref_parent(eb, r);
1432 ref_struct_size = sizeof(struct btrfs_inode_ref);
1433 parent_objectid = key->offset;
1435 inode_objectid = key->objectid;
1438 * it is possible that we didn't log all the parent directories
1439 * for a given inode. If we don't find the dir, just don't
1440 * copy the back ref in. The link count fixup code will take
1443 dir = read_one_inode(root, parent_objectid);
1449 inode = read_one_inode(root, inode_objectid);
1455 while (ref_ptr < ref_end) {
1457 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1458 &ref_index, &parent_objectid);
1460 * parent object can change from one array
1464 dir = read_one_inode(root, parent_objectid);
1470 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1476 /* if we already have a perfect match, we're done */
1477 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1478 btrfs_ino(BTRFS_I(inode)), ref_index,
1481 * look for a conflicting back reference in the
1482 * metadata. if we find one we have to unlink that name
1483 * of the file before we add our new link. Later on, we
1484 * overwrite any existing back reference, and we don't
1485 * want to create dangling pointers in the directory.
1489 ret = __add_inode_ref(trans, root, path, log,
1494 ref_index, name, namelen,
1504 * If a reference item already exists for this inode
1505 * with the same parent and name, but different index,
1506 * drop it and the corresponding directory index entries
1507 * from the parent before adding the new reference item
1508 * and dir index entries, otherwise we would fail with
1509 * -EEXIST returned from btrfs_add_link() below.
1511 ret = btrfs_inode_ref_exists(inode, dir, key->type,
1514 ret = btrfs_unlink_inode(trans, root,
1519 * If we dropped the link count to 0, bump it so
1520 * that later the iput() on the inode will not
1521 * free it. We will fixup the link count later.
1523 if (!ret && inode->i_nlink == 0)
1529 /* insert our name */
1530 ret = add_link(trans, root, dir, inode, name, namelen,
1535 btrfs_update_inode(trans, root, inode);
1538 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1548 * Before we overwrite the inode reference item in the subvolume tree
1549 * with the item from the log tree, we must unlink all names from the
1550 * parent directory that are in the subvolume's tree inode reference
1551 * item, otherwise we end up with an inconsistent subvolume tree where
1552 * dir index entries exist for a name but there is no inode reference
1553 * item with the same name.
1555 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1560 /* finally write the back reference in the inode */
1561 ret = overwrite_item(trans, root, path, eb, slot, key);
1563 btrfs_release_path(path);
1570 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1571 struct btrfs_root *root, u64 ino)
1575 ret = btrfs_insert_orphan_item(trans, root, ino);
1582 static int count_inode_extrefs(struct btrfs_root *root,
1583 struct btrfs_inode *inode, struct btrfs_path *path)
1587 unsigned int nlink = 0;
1590 u64 inode_objectid = btrfs_ino(inode);
1593 struct btrfs_inode_extref *extref;
1594 struct extent_buffer *leaf;
1597 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1602 leaf = path->nodes[0];
1603 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1604 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1607 while (cur_offset < item_size) {
1608 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1609 name_len = btrfs_inode_extref_name_len(leaf, extref);
1613 cur_offset += name_len + sizeof(*extref);
1617 btrfs_release_path(path);
1619 btrfs_release_path(path);
1621 if (ret < 0 && ret != -ENOENT)
1626 static int count_inode_refs(struct btrfs_root *root,
1627 struct btrfs_inode *inode, struct btrfs_path *path)
1630 struct btrfs_key key;
1631 unsigned int nlink = 0;
1633 unsigned long ptr_end;
1635 u64 ino = btrfs_ino(inode);
1638 key.type = BTRFS_INODE_REF_KEY;
1639 key.offset = (u64)-1;
1642 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1646 if (path->slots[0] == 0)
1651 btrfs_item_key_to_cpu(path->nodes[0], &key,
1653 if (key.objectid != ino ||
1654 key.type != BTRFS_INODE_REF_KEY)
1656 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1657 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1659 while (ptr < ptr_end) {
1660 struct btrfs_inode_ref *ref;
1662 ref = (struct btrfs_inode_ref *)ptr;
1663 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1665 ptr = (unsigned long)(ref + 1) + name_len;
1669 if (key.offset == 0)
1671 if (path->slots[0] > 0) {
1676 btrfs_release_path(path);
1678 btrfs_release_path(path);
1684 * There are a few corners where the link count of the file can't
1685 * be properly maintained during replay. So, instead of adding
1686 * lots of complexity to the log code, we just scan the backrefs
1687 * for any file that has been through replay.
1689 * The scan will update the link count on the inode to reflect the
1690 * number of back refs found. If it goes down to zero, the iput
1691 * will free the inode.
1693 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1694 struct btrfs_root *root,
1695 struct inode *inode)
1697 struct btrfs_path *path;
1700 u64 ino = btrfs_ino(BTRFS_I(inode));
1702 path = btrfs_alloc_path();
1706 ret = count_inode_refs(root, BTRFS_I(inode), path);
1712 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1720 if (nlink != inode->i_nlink) {
1721 set_nlink(inode, nlink);
1722 btrfs_update_inode(trans, root, inode);
1724 BTRFS_I(inode)->index_cnt = (u64)-1;
1726 if (inode->i_nlink == 0) {
1727 if (S_ISDIR(inode->i_mode)) {
1728 ret = replay_dir_deletes(trans, root, NULL, path,
1733 ret = insert_orphan_item(trans, root, ino);
1737 btrfs_free_path(path);
1741 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1742 struct btrfs_root *root,
1743 struct btrfs_path *path)
1746 struct btrfs_key key;
1747 struct inode *inode;
1749 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1750 key.type = BTRFS_ORPHAN_ITEM_KEY;
1751 key.offset = (u64)-1;
1753 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1758 if (path->slots[0] == 0)
1763 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1764 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1765 key.type != BTRFS_ORPHAN_ITEM_KEY)
1768 ret = btrfs_del_item(trans, root, path);
1772 btrfs_release_path(path);
1773 inode = read_one_inode(root, key.offset);
1777 ret = fixup_inode_link_count(trans, root, inode);
1783 * fixup on a directory may create new entries,
1784 * make sure we always look for the highset possible
1787 key.offset = (u64)-1;
1791 btrfs_release_path(path);
1797 * record a given inode in the fixup dir so we can check its link
1798 * count when replay is done. The link count is incremented here
1799 * so the inode won't go away until we check it
1801 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1802 struct btrfs_root *root,
1803 struct btrfs_path *path,
1806 struct btrfs_key key;
1808 struct inode *inode;
1810 inode = read_one_inode(root, objectid);
1814 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1815 key.type = BTRFS_ORPHAN_ITEM_KEY;
1816 key.offset = objectid;
1818 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1820 btrfs_release_path(path);
1822 if (!inode->i_nlink)
1823 set_nlink(inode, 1);
1826 ret = btrfs_update_inode(trans, root, inode);
1827 } else if (ret == -EEXIST) {
1830 BUG(); /* Logic Error */
1838 * when replaying the log for a directory, we only insert names
1839 * for inodes that actually exist. This means an fsync on a directory
1840 * does not implicitly fsync all the new files in it
1842 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1843 struct btrfs_root *root,
1844 u64 dirid, u64 index,
1845 char *name, int name_len,
1846 struct btrfs_key *location)
1848 struct inode *inode;
1852 inode = read_one_inode(root, location->objectid);
1856 dir = read_one_inode(root, dirid);
1862 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1863 name_len, 1, index);
1865 /* FIXME, put inode into FIXUP list */
1873 * Return true if an inode reference exists in the log for the given name,
1874 * inode and parent inode.
1876 static bool name_in_log_ref(struct btrfs_root *log_root,
1877 const char *name, const int name_len,
1878 const u64 dirid, const u64 ino)
1880 struct btrfs_key search_key;
1882 search_key.objectid = ino;
1883 search_key.type = BTRFS_INODE_REF_KEY;
1884 search_key.offset = dirid;
1885 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1888 search_key.type = BTRFS_INODE_EXTREF_KEY;
1889 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1890 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1897 * take a single entry in a log directory item and replay it into
1900 * if a conflicting item exists in the subdirectory already,
1901 * the inode it points to is unlinked and put into the link count
1904 * If a name from the log points to a file or directory that does
1905 * not exist in the FS, it is skipped. fsyncs on directories
1906 * do not force down inodes inside that directory, just changes to the
1907 * names or unlinks in a directory.
1909 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1910 * non-existing inode) and 1 if the name was replayed.
1912 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1913 struct btrfs_root *root,
1914 struct btrfs_path *path,
1915 struct extent_buffer *eb,
1916 struct btrfs_dir_item *di,
1917 struct btrfs_key *key)
1921 struct btrfs_dir_item *dst_di;
1922 struct btrfs_key found_key;
1923 struct btrfs_key log_key;
1928 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1929 bool name_added = false;
1931 dir = read_one_inode(root, key->objectid);
1935 name_len = btrfs_dir_name_len(eb, di);
1936 name = kmalloc(name_len, GFP_NOFS);
1942 log_type = btrfs_dir_type(eb, di);
1943 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1946 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1947 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1952 btrfs_release_path(path);
1954 if (key->type == BTRFS_DIR_ITEM_KEY) {
1955 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1957 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1958 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1967 if (IS_ERR_OR_NULL(dst_di)) {
1968 /* we need a sequence number to insert, so we only
1969 * do inserts for the BTRFS_DIR_INDEX_KEY types
1971 if (key->type != BTRFS_DIR_INDEX_KEY)
1976 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1977 /* the existing item matches the logged item */
1978 if (found_key.objectid == log_key.objectid &&
1979 found_key.type == log_key.type &&
1980 found_key.offset == log_key.offset &&
1981 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1982 update_size = false;
1987 * don't drop the conflicting directory entry if the inode
1988 * for the new entry doesn't exist
1993 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
1997 if (key->type == BTRFS_DIR_INDEX_KEY)
2000 btrfs_release_path(path);
2001 if (!ret && update_size) {
2002 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
2003 ret = btrfs_update_inode(trans, root, dir);
2007 if (!ret && name_added)
2012 if (name_in_log_ref(root->log_root, name, name_len,
2013 key->objectid, log_key.objectid)) {
2014 /* The dentry will be added later. */
2016 update_size = false;
2019 btrfs_release_path(path);
2020 ret = insert_one_name(trans, root, key->objectid, key->offset,
2021 name, name_len, &log_key);
2022 if (ret && ret != -ENOENT && ret != -EEXIST)
2026 update_size = false;
2032 * find all the names in a directory item and reconcile them into
2033 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
2034 * one name in a directory item, but the same code gets used for
2035 * both directory index types
2037 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
2038 struct btrfs_root *root,
2039 struct btrfs_path *path,
2040 struct extent_buffer *eb, int slot,
2041 struct btrfs_key *key)
2044 u32 item_size = btrfs_item_size_nr(eb, slot);
2045 struct btrfs_dir_item *di;
2048 unsigned long ptr_end;
2049 struct btrfs_path *fixup_path = NULL;
2051 ptr = btrfs_item_ptr_offset(eb, slot);
2052 ptr_end = ptr + item_size;
2053 while (ptr < ptr_end) {
2054 di = (struct btrfs_dir_item *)ptr;
2055 name_len = btrfs_dir_name_len(eb, di);
2056 ret = replay_one_name(trans, root, path, eb, di, key);
2059 ptr = (unsigned long)(di + 1);
2063 * If this entry refers to a non-directory (directories can not
2064 * have a link count > 1) and it was added in the transaction
2065 * that was not committed, make sure we fixup the link count of
2066 * the inode it the entry points to. Otherwise something like
2067 * the following would result in a directory pointing to an
2068 * inode with a wrong link that does not account for this dir
2076 * ln testdir/bar testdir/bar_link
2077 * ln testdir/foo testdir/foo_link
2078 * xfs_io -c "fsync" testdir/bar
2082 * mount fs, log replay happens
2084 * File foo would remain with a link count of 1 when it has two
2085 * entries pointing to it in the directory testdir. This would
2086 * make it impossible to ever delete the parent directory has
2087 * it would result in stale dentries that can never be deleted.
2089 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
2090 struct btrfs_key di_key;
2093 fixup_path = btrfs_alloc_path();
2100 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2101 ret = link_to_fixup_dir(trans, root, fixup_path,
2108 btrfs_free_path(fixup_path);
2113 * directory replay has two parts. There are the standard directory
2114 * items in the log copied from the subvolume, and range items
2115 * created in the log while the subvolume was logged.
2117 * The range items tell us which parts of the key space the log
2118 * is authoritative for. During replay, if a key in the subvolume
2119 * directory is in a logged range item, but not actually in the log
2120 * that means it was deleted from the directory before the fsync
2121 * and should be removed.
2123 static noinline int find_dir_range(struct btrfs_root *root,
2124 struct btrfs_path *path,
2125 u64 dirid, int key_type,
2126 u64 *start_ret, u64 *end_ret)
2128 struct btrfs_key key;
2130 struct btrfs_dir_log_item *item;
2134 if (*start_ret == (u64)-1)
2137 key.objectid = dirid;
2138 key.type = key_type;
2139 key.offset = *start_ret;
2141 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2145 if (path->slots[0] == 0)
2150 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2152 if (key.type != key_type || key.objectid != dirid) {
2156 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2157 struct btrfs_dir_log_item);
2158 found_end = btrfs_dir_log_end(path->nodes[0], item);
2160 if (*start_ret >= key.offset && *start_ret <= found_end) {
2162 *start_ret = key.offset;
2163 *end_ret = found_end;
2168 /* check the next slot in the tree to see if it is a valid item */
2169 nritems = btrfs_header_nritems(path->nodes[0]);
2171 if (path->slots[0] >= nritems) {
2172 ret = btrfs_next_leaf(root, path);
2177 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2179 if (key.type != key_type || key.objectid != dirid) {
2183 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2184 struct btrfs_dir_log_item);
2185 found_end = btrfs_dir_log_end(path->nodes[0], item);
2186 *start_ret = key.offset;
2187 *end_ret = found_end;
2190 btrfs_release_path(path);
2195 * this looks for a given directory item in the log. If the directory
2196 * item is not in the log, the item is removed and the inode it points
2199 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2200 struct btrfs_root *root,
2201 struct btrfs_root *log,
2202 struct btrfs_path *path,
2203 struct btrfs_path *log_path,
2205 struct btrfs_key *dir_key)
2208 struct extent_buffer *eb;
2211 struct btrfs_dir_item *di;
2212 struct btrfs_dir_item *log_di;
2215 unsigned long ptr_end;
2217 struct inode *inode;
2218 struct btrfs_key location;
2221 eb = path->nodes[0];
2222 slot = path->slots[0];
2223 item_size = btrfs_item_size_nr(eb, slot);
2224 ptr = btrfs_item_ptr_offset(eb, slot);
2225 ptr_end = ptr + item_size;
2226 while (ptr < ptr_end) {
2227 di = (struct btrfs_dir_item *)ptr;
2228 name_len = btrfs_dir_name_len(eb, di);
2229 name = kmalloc(name_len, GFP_NOFS);
2234 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2237 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2238 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2241 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2242 log_di = btrfs_lookup_dir_index_item(trans, log,
2248 if (!log_di || log_di == ERR_PTR(-ENOENT)) {
2249 btrfs_dir_item_key_to_cpu(eb, di, &location);
2250 btrfs_release_path(path);
2251 btrfs_release_path(log_path);
2252 inode = read_one_inode(root, location.objectid);
2258 ret = link_to_fixup_dir(trans, root,
2259 path, location.objectid);
2267 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2268 BTRFS_I(inode), name, name_len);
2270 ret = btrfs_run_delayed_items(trans);
2276 /* there might still be more names under this key
2277 * check and repeat if required
2279 ret = btrfs_search_slot(NULL, root, dir_key, path,
2285 } else if (IS_ERR(log_di)) {
2287 return PTR_ERR(log_di);
2289 btrfs_release_path(log_path);
2292 ptr = (unsigned long)(di + 1);
2297 btrfs_release_path(path);
2298 btrfs_release_path(log_path);
2302 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2303 struct btrfs_root *root,
2304 struct btrfs_root *log,
2305 struct btrfs_path *path,
2308 struct btrfs_key search_key;
2309 struct btrfs_path *log_path;
2314 log_path = btrfs_alloc_path();
2318 search_key.objectid = ino;
2319 search_key.type = BTRFS_XATTR_ITEM_KEY;
2320 search_key.offset = 0;
2322 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2326 nritems = btrfs_header_nritems(path->nodes[0]);
2327 for (i = path->slots[0]; i < nritems; i++) {
2328 struct btrfs_key key;
2329 struct btrfs_dir_item *di;
2330 struct btrfs_dir_item *log_di;
2334 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2335 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2340 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2341 total_size = btrfs_item_size_nr(path->nodes[0], i);
2343 while (cur < total_size) {
2344 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2345 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2346 u32 this_len = sizeof(*di) + name_len + data_len;
2349 name = kmalloc(name_len, GFP_NOFS);
2354 read_extent_buffer(path->nodes[0], name,
2355 (unsigned long)(di + 1), name_len);
2357 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2359 btrfs_release_path(log_path);
2361 /* Doesn't exist in log tree, so delete it. */
2362 btrfs_release_path(path);
2363 di = btrfs_lookup_xattr(trans, root, path, ino,
2364 name, name_len, -1);
2371 ret = btrfs_delete_one_dir_name(trans, root,
2375 btrfs_release_path(path);
2380 if (IS_ERR(log_di)) {
2381 ret = PTR_ERR(log_di);
2385 di = (struct btrfs_dir_item *)((char *)di + this_len);
2388 ret = btrfs_next_leaf(root, path);
2394 btrfs_free_path(log_path);
2395 btrfs_release_path(path);
2401 * deletion replay happens before we copy any new directory items
2402 * out of the log or out of backreferences from inodes. It
2403 * scans the log to find ranges of keys that log is authoritative for,
2404 * and then scans the directory to find items in those ranges that are
2405 * not present in the log.
2407 * Anything we don't find in the log is unlinked and removed from the
2410 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2411 struct btrfs_root *root,
2412 struct btrfs_root *log,
2413 struct btrfs_path *path,
2414 u64 dirid, int del_all)
2418 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2420 struct btrfs_key dir_key;
2421 struct btrfs_key found_key;
2422 struct btrfs_path *log_path;
2425 dir_key.objectid = dirid;
2426 dir_key.type = BTRFS_DIR_ITEM_KEY;
2427 log_path = btrfs_alloc_path();
2431 dir = read_one_inode(root, dirid);
2432 /* it isn't an error if the inode isn't there, that can happen
2433 * because we replay the deletes before we copy in the inode item
2437 btrfs_free_path(log_path);
2445 range_end = (u64)-1;
2447 ret = find_dir_range(log, path, dirid, key_type,
2448 &range_start, &range_end);
2453 dir_key.offset = range_start;
2456 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2461 nritems = btrfs_header_nritems(path->nodes[0]);
2462 if (path->slots[0] >= nritems) {
2463 ret = btrfs_next_leaf(root, path);
2469 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2471 if (found_key.objectid != dirid ||
2472 found_key.type != dir_key.type)
2475 if (found_key.offset > range_end)
2478 ret = check_item_in_log(trans, root, log, path,
2483 if (found_key.offset == (u64)-1)
2485 dir_key.offset = found_key.offset + 1;
2487 btrfs_release_path(path);
2488 if (range_end == (u64)-1)
2490 range_start = range_end + 1;
2495 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2496 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2497 dir_key.type = BTRFS_DIR_INDEX_KEY;
2498 btrfs_release_path(path);
2502 btrfs_release_path(path);
2503 btrfs_free_path(log_path);
2509 * the process_func used to replay items from the log tree. This
2510 * gets called in two different stages. The first stage just looks
2511 * for inodes and makes sure they are all copied into the subvolume.
2513 * The second stage copies all the other item types from the log into
2514 * the subvolume. The two stage approach is slower, but gets rid of
2515 * lots of complexity around inodes referencing other inodes that exist
2516 * only in the log (references come from either directory items or inode
2519 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2520 struct walk_control *wc, u64 gen, int level)
2523 struct btrfs_path *path;
2524 struct btrfs_root *root = wc->replay_dest;
2525 struct btrfs_key key;
2529 ret = btrfs_read_buffer(eb, gen, level, NULL);
2533 level = btrfs_header_level(eb);
2538 path = btrfs_alloc_path();
2542 nritems = btrfs_header_nritems(eb);
2543 for (i = 0; i < nritems; i++) {
2544 btrfs_item_key_to_cpu(eb, &key, i);
2546 /* inode keys are done during the first stage */
2547 if (key.type == BTRFS_INODE_ITEM_KEY &&
2548 wc->stage == LOG_WALK_REPLAY_INODES) {
2549 struct btrfs_inode_item *inode_item;
2552 inode_item = btrfs_item_ptr(eb, i,
2553 struct btrfs_inode_item);
2555 * If we have a tmpfile (O_TMPFILE) that got fsync'ed
2556 * and never got linked before the fsync, skip it, as
2557 * replaying it is pointless since it would be deleted
2558 * later. We skip logging tmpfiles, but it's always
2559 * possible we are replaying a log created with a kernel
2560 * that used to log tmpfiles.
2562 if (btrfs_inode_nlink(eb, inode_item) == 0) {
2563 wc->ignore_cur_inode = true;
2566 wc->ignore_cur_inode = false;
2568 ret = replay_xattr_deletes(wc->trans, root, log,
2569 path, key.objectid);
2572 mode = btrfs_inode_mode(eb, inode_item);
2573 if (S_ISDIR(mode)) {
2574 ret = replay_dir_deletes(wc->trans,
2575 root, log, path, key.objectid, 0);
2579 ret = overwrite_item(wc->trans, root, path,
2585 * Before replaying extents, truncate the inode to its
2586 * size. We need to do it now and not after log replay
2587 * because before an fsync we can have prealloc extents
2588 * added beyond the inode's i_size. If we did it after,
2589 * through orphan cleanup for example, we would drop
2590 * those prealloc extents just after replaying them.
2592 if (S_ISREG(mode)) {
2593 struct inode *inode;
2596 inode = read_one_inode(root, key.objectid);
2601 from = ALIGN(i_size_read(inode),
2602 root->fs_info->sectorsize);
2603 ret = btrfs_drop_extents(wc->trans, root, inode,
2606 /* Update the inode's nbytes. */
2607 ret = btrfs_update_inode(wc->trans,
2615 ret = link_to_fixup_dir(wc->trans, root,
2616 path, key.objectid);
2621 if (wc->ignore_cur_inode)
2624 if (key.type == BTRFS_DIR_INDEX_KEY &&
2625 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2626 ret = replay_one_dir_item(wc->trans, root, path,
2632 if (wc->stage < LOG_WALK_REPLAY_ALL)
2635 /* these keys are simply copied */
2636 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2637 ret = overwrite_item(wc->trans, root, path,
2641 } else if (key.type == BTRFS_INODE_REF_KEY ||
2642 key.type == BTRFS_INODE_EXTREF_KEY) {
2643 ret = add_inode_ref(wc->trans, root, log, path,
2645 if (ret && ret != -ENOENT)
2648 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2649 ret = replay_one_extent(wc->trans, root, path,
2653 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2654 ret = replay_one_dir_item(wc->trans, root, path,
2660 btrfs_free_path(path);
2664 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2665 struct btrfs_root *root,
2666 struct btrfs_path *path, int *level,
2667 struct walk_control *wc)
2669 struct btrfs_fs_info *fs_info = root->fs_info;
2673 struct extent_buffer *next;
2674 struct extent_buffer *cur;
2675 struct extent_buffer *parent;
2679 WARN_ON(*level < 0);
2680 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2682 while (*level > 0) {
2683 struct btrfs_key first_key;
2685 WARN_ON(*level < 0);
2686 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2687 cur = path->nodes[*level];
2689 WARN_ON(btrfs_header_level(cur) != *level);
2691 if (path->slots[*level] >=
2692 btrfs_header_nritems(cur))
2695 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2696 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2697 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2698 blocksize = fs_info->nodesize;
2700 parent = path->nodes[*level];
2701 root_owner = btrfs_header_owner(parent);
2703 next = btrfs_find_create_tree_block(fs_info, bytenr);
2705 return PTR_ERR(next);
2708 ret = wc->process_func(root, next, wc, ptr_gen,
2711 free_extent_buffer(next);
2715 path->slots[*level]++;
2717 ret = btrfs_read_buffer(next, ptr_gen,
2718 *level - 1, &first_key);
2720 free_extent_buffer(next);
2725 btrfs_tree_lock(next);
2726 btrfs_set_lock_blocking_write(next);
2727 clean_tree_block(fs_info, next);
2728 btrfs_wait_tree_block_writeback(next);
2729 btrfs_tree_unlock(next);
2731 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2732 clear_extent_buffer_dirty(next);
2735 WARN_ON(root_owner !=
2736 BTRFS_TREE_LOG_OBJECTID);
2737 ret = btrfs_free_and_pin_reserved_extent(
2741 free_extent_buffer(next);
2745 free_extent_buffer(next);
2748 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2750 free_extent_buffer(next);
2754 WARN_ON(*level <= 0);
2755 if (path->nodes[*level-1])
2756 free_extent_buffer(path->nodes[*level-1]);
2757 path->nodes[*level-1] = next;
2758 *level = btrfs_header_level(next);
2759 path->slots[*level] = 0;
2762 WARN_ON(*level < 0);
2763 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2765 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2771 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2772 struct btrfs_root *root,
2773 struct btrfs_path *path, int *level,
2774 struct walk_control *wc)
2776 struct btrfs_fs_info *fs_info = root->fs_info;
2782 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2783 slot = path->slots[i];
2784 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2787 WARN_ON(*level == 0);
2790 struct extent_buffer *parent;
2791 if (path->nodes[*level] == root->node)
2792 parent = path->nodes[*level];
2794 parent = path->nodes[*level + 1];
2796 root_owner = btrfs_header_owner(parent);
2797 ret = wc->process_func(root, path->nodes[*level], wc,
2798 btrfs_header_generation(path->nodes[*level]),
2804 struct extent_buffer *next;
2806 next = path->nodes[*level];
2809 btrfs_tree_lock(next);
2810 btrfs_set_lock_blocking_write(next);
2811 clean_tree_block(fs_info, next);
2812 btrfs_wait_tree_block_writeback(next);
2813 btrfs_tree_unlock(next);
2815 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2816 clear_extent_buffer_dirty(next);
2819 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2820 ret = btrfs_free_and_pin_reserved_extent(
2822 path->nodes[*level]->start,
2823 path->nodes[*level]->len);
2827 free_extent_buffer(path->nodes[*level]);
2828 path->nodes[*level] = NULL;
2836 * drop the reference count on the tree rooted at 'snap'. This traverses
2837 * the tree freeing any blocks that have a ref count of zero after being
2840 static int walk_log_tree(struct btrfs_trans_handle *trans,
2841 struct btrfs_root *log, struct walk_control *wc)
2843 struct btrfs_fs_info *fs_info = log->fs_info;
2847 struct btrfs_path *path;
2850 path = btrfs_alloc_path();
2854 level = btrfs_header_level(log->node);
2856 path->nodes[level] = log->node;
2857 extent_buffer_get(log->node);
2858 path->slots[level] = 0;
2861 wret = walk_down_log_tree(trans, log, path, &level, wc);
2869 wret = walk_up_log_tree(trans, log, path, &level, wc);
2878 /* was the root node processed? if not, catch it here */
2879 if (path->nodes[orig_level]) {
2880 ret = wc->process_func(log, path->nodes[orig_level], wc,
2881 btrfs_header_generation(path->nodes[orig_level]),
2886 struct extent_buffer *next;
2888 next = path->nodes[orig_level];
2891 btrfs_tree_lock(next);
2892 btrfs_set_lock_blocking_write(next);
2893 clean_tree_block(fs_info, next);
2894 btrfs_wait_tree_block_writeback(next);
2895 btrfs_tree_unlock(next);
2897 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2898 clear_extent_buffer_dirty(next);
2901 WARN_ON(log->root_key.objectid !=
2902 BTRFS_TREE_LOG_OBJECTID);
2903 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2904 next->start, next->len);
2911 btrfs_free_path(path);
2916 * helper function to update the item for a given subvolumes log root
2917 * in the tree of log roots
2919 static int update_log_root(struct btrfs_trans_handle *trans,
2920 struct btrfs_root *log)
2922 struct btrfs_fs_info *fs_info = log->fs_info;
2925 if (log->log_transid == 1) {
2926 /* insert root item on the first sync */
2927 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2928 &log->root_key, &log->root_item);
2930 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2931 &log->root_key, &log->root_item);
2936 static void wait_log_commit(struct btrfs_root *root, int transid)
2939 int index = transid % 2;
2942 * we only allow two pending log transactions at a time,
2943 * so we know that if ours is more than 2 older than the
2944 * current transaction, we're done
2947 prepare_to_wait(&root->log_commit_wait[index],
2948 &wait, TASK_UNINTERRUPTIBLE);
2950 if (!(root->log_transid_committed < transid &&
2951 atomic_read(&root->log_commit[index])))
2954 mutex_unlock(&root->log_mutex);
2956 mutex_lock(&root->log_mutex);
2958 finish_wait(&root->log_commit_wait[index], &wait);
2961 static void wait_for_writer(struct btrfs_root *root)
2966 prepare_to_wait(&root->log_writer_wait, &wait,
2967 TASK_UNINTERRUPTIBLE);
2968 if (!atomic_read(&root->log_writers))
2971 mutex_unlock(&root->log_mutex);
2973 mutex_lock(&root->log_mutex);
2975 finish_wait(&root->log_writer_wait, &wait);
2978 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2979 struct btrfs_log_ctx *ctx)
2984 mutex_lock(&root->log_mutex);
2985 list_del_init(&ctx->list);
2986 mutex_unlock(&root->log_mutex);
2990 * Invoked in log mutex context, or be sure there is no other task which
2991 * can access the list.
2993 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2994 int index, int error)
2996 struct btrfs_log_ctx *ctx;
2997 struct btrfs_log_ctx *safe;
2999 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
3000 list_del_init(&ctx->list);
3001 ctx->log_ret = error;
3004 INIT_LIST_HEAD(&root->log_ctxs[index]);
3008 * btrfs_sync_log does sends a given tree log down to the disk and
3009 * updates the super blocks to record it. When this call is done,
3010 * you know that any inodes previously logged are safely on disk only
3013 * Any other return value means you need to call btrfs_commit_transaction.
3014 * Some of the edge cases for fsyncing directories that have had unlinks
3015 * or renames done in the past mean that sometimes the only safe
3016 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
3017 * that has happened.
3019 int btrfs_sync_log(struct btrfs_trans_handle *trans,
3020 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
3026 struct btrfs_fs_info *fs_info = root->fs_info;
3027 struct btrfs_root *log = root->log_root;
3028 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
3029 int log_transid = 0;
3030 struct btrfs_log_ctx root_log_ctx;
3031 struct blk_plug plug;
3033 mutex_lock(&root->log_mutex);
3034 log_transid = ctx->log_transid;
3035 if (root->log_transid_committed >= log_transid) {
3036 mutex_unlock(&root->log_mutex);
3037 return ctx->log_ret;
3040 index1 = log_transid % 2;
3041 if (atomic_read(&root->log_commit[index1])) {
3042 wait_log_commit(root, log_transid);
3043 mutex_unlock(&root->log_mutex);
3044 return ctx->log_ret;
3046 ASSERT(log_transid == root->log_transid);
3047 atomic_set(&root->log_commit[index1], 1);
3049 /* wait for previous tree log sync to complete */
3050 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
3051 wait_log_commit(root, log_transid - 1);
3054 int batch = atomic_read(&root->log_batch);
3055 /* when we're on an ssd, just kick the log commit out */
3056 if (!btrfs_test_opt(fs_info, SSD) &&
3057 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
3058 mutex_unlock(&root->log_mutex);
3059 schedule_timeout_uninterruptible(1);
3060 mutex_lock(&root->log_mutex);
3062 wait_for_writer(root);
3063 if (batch == atomic_read(&root->log_batch))
3067 /* bail out if we need to do a full commit */
3068 if (btrfs_need_log_full_commit(fs_info, trans)) {
3070 mutex_unlock(&root->log_mutex);
3074 if (log_transid % 2 == 0)
3075 mark = EXTENT_DIRTY;
3079 /* we start IO on all the marked extents here, but we don't actually
3080 * wait for them until later.
3082 blk_start_plug(&plug);
3083 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
3085 blk_finish_plug(&plug);
3086 btrfs_abort_transaction(trans, ret);
3087 btrfs_set_log_full_commit(fs_info, trans);
3088 mutex_unlock(&root->log_mutex);
3092 btrfs_set_root_node(&log->root_item, log->node);
3094 root->log_transid++;
3095 log->log_transid = root->log_transid;
3096 root->log_start_pid = 0;
3098 * IO has been started, blocks of the log tree have WRITTEN flag set
3099 * in their headers. new modifications of the log will be written to
3100 * new positions. so it's safe to allow log writers to go in.
3102 mutex_unlock(&root->log_mutex);
3104 btrfs_init_log_ctx(&root_log_ctx, NULL);
3106 mutex_lock(&log_root_tree->log_mutex);
3107 atomic_inc(&log_root_tree->log_batch);
3108 atomic_inc(&log_root_tree->log_writers);
3110 index2 = log_root_tree->log_transid % 2;
3111 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
3112 root_log_ctx.log_transid = log_root_tree->log_transid;
3114 mutex_unlock(&log_root_tree->log_mutex);
3116 ret = update_log_root(trans, log);
3118 mutex_lock(&log_root_tree->log_mutex);
3119 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
3120 /* atomic_dec_and_test implies a barrier */
3121 cond_wake_up_nomb(&log_root_tree->log_writer_wait);
3125 if (!list_empty(&root_log_ctx.list))
3126 list_del_init(&root_log_ctx.list);
3128 blk_finish_plug(&plug);
3129 btrfs_set_log_full_commit(fs_info, trans);
3131 if (ret != -ENOSPC) {
3132 btrfs_abort_transaction(trans, ret);
3133 mutex_unlock(&log_root_tree->log_mutex);
3136 btrfs_wait_tree_log_extents(log, mark);
3137 mutex_unlock(&log_root_tree->log_mutex);
3142 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3143 blk_finish_plug(&plug);
3144 list_del_init(&root_log_ctx.list);
3145 mutex_unlock(&log_root_tree->log_mutex);
3146 ret = root_log_ctx.log_ret;
3150 index2 = root_log_ctx.log_transid % 2;
3151 if (atomic_read(&log_root_tree->log_commit[index2])) {
3152 blk_finish_plug(&plug);
3153 ret = btrfs_wait_tree_log_extents(log, mark);
3154 wait_log_commit(log_root_tree,
3155 root_log_ctx.log_transid);
3156 mutex_unlock(&log_root_tree->log_mutex);
3158 ret = root_log_ctx.log_ret;
3161 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3162 atomic_set(&log_root_tree->log_commit[index2], 1);
3164 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3165 wait_log_commit(log_root_tree,
3166 root_log_ctx.log_transid - 1);
3169 wait_for_writer(log_root_tree);
3172 * now that we've moved on to the tree of log tree roots,
3173 * check the full commit flag again
3175 if (btrfs_need_log_full_commit(fs_info, trans)) {
3176 blk_finish_plug(&plug);
3177 btrfs_wait_tree_log_extents(log, mark);
3178 mutex_unlock(&log_root_tree->log_mutex);
3180 goto out_wake_log_root;
3183 ret = btrfs_write_marked_extents(fs_info,
3184 &log_root_tree->dirty_log_pages,
3185 EXTENT_DIRTY | EXTENT_NEW);
3186 blk_finish_plug(&plug);
3188 btrfs_set_log_full_commit(fs_info, trans);
3189 btrfs_abort_transaction(trans, ret);
3190 mutex_unlock(&log_root_tree->log_mutex);
3191 goto out_wake_log_root;
3193 ret = btrfs_wait_tree_log_extents(log, mark);
3195 ret = btrfs_wait_tree_log_extents(log_root_tree,
3196 EXTENT_NEW | EXTENT_DIRTY);
3198 btrfs_set_log_full_commit(fs_info, trans);
3199 mutex_unlock(&log_root_tree->log_mutex);
3200 goto out_wake_log_root;
3203 btrfs_set_super_log_root(fs_info->super_for_commit,
3204 log_root_tree->node->start);
3205 btrfs_set_super_log_root_level(fs_info->super_for_commit,
3206 btrfs_header_level(log_root_tree->node));
3208 log_root_tree->log_transid++;
3209 mutex_unlock(&log_root_tree->log_mutex);
3212 * Nobody else is going to jump in and write the ctree
3213 * super here because the log_commit atomic below is protecting
3214 * us. We must be called with a transaction handle pinning
3215 * the running transaction open, so a full commit can't hop
3216 * in and cause problems either.
3218 ret = write_all_supers(fs_info, 1);
3220 btrfs_set_log_full_commit(fs_info, trans);
3221 btrfs_abort_transaction(trans, ret);
3222 goto out_wake_log_root;
3225 mutex_lock(&root->log_mutex);
3226 if (root->last_log_commit < log_transid)
3227 root->last_log_commit = log_transid;
3228 mutex_unlock(&root->log_mutex);
3231 mutex_lock(&log_root_tree->log_mutex);
3232 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3234 log_root_tree->log_transid_committed++;
3235 atomic_set(&log_root_tree->log_commit[index2], 0);
3236 mutex_unlock(&log_root_tree->log_mutex);
3239 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3240 * all the updates above are seen by the woken threads. It might not be
3241 * necessary, but proving that seems to be hard.
3243 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3245 mutex_lock(&root->log_mutex);
3246 btrfs_remove_all_log_ctxs(root, index1, ret);
3247 root->log_transid_committed++;
3248 atomic_set(&root->log_commit[index1], 0);
3249 mutex_unlock(&root->log_mutex);
3252 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3253 * all the updates above are seen by the woken threads. It might not be
3254 * necessary, but proving that seems to be hard.
3256 cond_wake_up(&root->log_commit_wait[index1]);
3260 static void free_log_tree(struct btrfs_trans_handle *trans,
3261 struct btrfs_root *log)
3264 struct walk_control wc = {
3266 .process_func = process_one_buffer
3269 ret = walk_log_tree(trans, log, &wc);
3272 btrfs_abort_transaction(trans, ret);
3274 btrfs_handle_fs_error(log->fs_info, ret, NULL);
3277 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1,
3278 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3279 free_extent_buffer(log->node);
3284 * free all the extents used by the tree log. This should be called
3285 * at commit time of the full transaction
3287 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3289 if (root->log_root) {
3290 free_log_tree(trans, root->log_root);
3291 root->log_root = NULL;
3296 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3297 struct btrfs_fs_info *fs_info)
3299 if (fs_info->log_root_tree) {
3300 free_log_tree(trans, fs_info->log_root_tree);
3301 fs_info->log_root_tree = NULL;
3307 * If both a file and directory are logged, and unlinks or renames are
3308 * mixed in, we have a few interesting corners:
3310 * create file X in dir Y
3311 * link file X to X.link in dir Y
3313 * unlink file X but leave X.link
3316 * After a crash we would expect only X.link to exist. But file X
3317 * didn't get fsync'd again so the log has back refs for X and X.link.
3319 * We solve this by removing directory entries and inode backrefs from the
3320 * log when a file that was logged in the current transaction is
3321 * unlinked. Any later fsync will include the updated log entries, and
3322 * we'll be able to reconstruct the proper directory items from backrefs.
3324 * This optimizations allows us to avoid relogging the entire inode
3325 * or the entire directory.
3327 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3328 struct btrfs_root *root,
3329 const char *name, int name_len,
3330 struct btrfs_inode *dir, u64 index)
3332 struct btrfs_root *log;
3333 struct btrfs_dir_item *di;
3334 struct btrfs_path *path;
3338 u64 dir_ino = btrfs_ino(dir);
3340 if (dir->logged_trans < trans->transid)
3343 ret = join_running_log_trans(root);
3347 mutex_lock(&dir->log_mutex);
3349 log = root->log_root;
3350 path = btrfs_alloc_path();
3356 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3357 name, name_len, -1);
3363 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3364 bytes_del += name_len;
3370 btrfs_release_path(path);
3371 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3372 index, name, name_len, -1);
3378 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3379 bytes_del += name_len;
3386 /* update the directory size in the log to reflect the names
3390 struct btrfs_key key;
3392 key.objectid = dir_ino;
3394 key.type = BTRFS_INODE_ITEM_KEY;
3395 btrfs_release_path(path);
3397 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3403 struct btrfs_inode_item *item;
3406 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3407 struct btrfs_inode_item);
3408 i_size = btrfs_inode_size(path->nodes[0], item);
3409 if (i_size > bytes_del)
3410 i_size -= bytes_del;
3413 btrfs_set_inode_size(path->nodes[0], item, i_size);
3414 btrfs_mark_buffer_dirty(path->nodes[0]);
3417 btrfs_release_path(path);
3420 btrfs_free_path(path);
3422 mutex_unlock(&dir->log_mutex);
3423 if (ret == -ENOSPC) {
3424 btrfs_set_log_full_commit(root->fs_info, trans);
3427 btrfs_abort_transaction(trans, ret);
3429 btrfs_end_log_trans(root);
3434 /* see comments for btrfs_del_dir_entries_in_log */
3435 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3436 struct btrfs_root *root,
3437 const char *name, int name_len,
3438 struct btrfs_inode *inode, u64 dirid)
3440 struct btrfs_fs_info *fs_info = root->fs_info;
3441 struct btrfs_root *log;
3445 if (inode->logged_trans < trans->transid)
3448 ret = join_running_log_trans(root);
3451 log = root->log_root;
3452 mutex_lock(&inode->log_mutex);
3454 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3456 mutex_unlock(&inode->log_mutex);
3457 if (ret == -ENOSPC) {
3458 btrfs_set_log_full_commit(fs_info, trans);
3460 } else if (ret < 0 && ret != -ENOENT)
3461 btrfs_abort_transaction(trans, ret);
3462 btrfs_end_log_trans(root);
3468 * creates a range item in the log for 'dirid'. first_offset and
3469 * last_offset tell us which parts of the key space the log should
3470 * be considered authoritative for.
3472 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3473 struct btrfs_root *log,
3474 struct btrfs_path *path,
3475 int key_type, u64 dirid,
3476 u64 first_offset, u64 last_offset)
3479 struct btrfs_key key;
3480 struct btrfs_dir_log_item *item;
3482 key.objectid = dirid;
3483 key.offset = first_offset;
3484 if (key_type == BTRFS_DIR_ITEM_KEY)
3485 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3487 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3488 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3492 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3493 struct btrfs_dir_log_item);
3494 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3495 btrfs_mark_buffer_dirty(path->nodes[0]);
3496 btrfs_release_path(path);
3501 * log all the items included in the current transaction for a given
3502 * directory. This also creates the range items in the log tree required
3503 * to replay anything deleted before the fsync
3505 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3506 struct btrfs_root *root, struct btrfs_inode *inode,
3507 struct btrfs_path *path,
3508 struct btrfs_path *dst_path, int key_type,
3509 struct btrfs_log_ctx *ctx,
3510 u64 min_offset, u64 *last_offset_ret)
3512 struct btrfs_key min_key;
3513 struct btrfs_root *log = root->log_root;
3514 struct extent_buffer *src;
3519 u64 first_offset = min_offset;
3520 u64 last_offset = (u64)-1;
3521 u64 ino = btrfs_ino(inode);
3523 log = root->log_root;
3525 min_key.objectid = ino;
3526 min_key.type = key_type;
3527 min_key.offset = min_offset;
3529 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3532 * we didn't find anything from this transaction, see if there
3533 * is anything at all
3535 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3536 min_key.objectid = ino;
3537 min_key.type = key_type;
3538 min_key.offset = (u64)-1;
3539 btrfs_release_path(path);
3540 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3542 btrfs_release_path(path);
3545 ret = btrfs_previous_item(root, path, ino, key_type);
3547 /* if ret == 0 there are items for this type,
3548 * create a range to tell us the last key of this type.
3549 * otherwise, there are no items in this directory after
3550 * *min_offset, and we create a range to indicate that.
3553 struct btrfs_key tmp;
3554 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3556 if (key_type == tmp.type)
3557 first_offset = max(min_offset, tmp.offset) + 1;
3562 /* go backward to find any previous key */
3563 ret = btrfs_previous_item(root, path, ino, key_type);
3565 struct btrfs_key tmp;
3566 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3567 if (key_type == tmp.type) {
3568 first_offset = tmp.offset;
3569 ret = overwrite_item(trans, log, dst_path,
3570 path->nodes[0], path->slots[0],
3578 btrfs_release_path(path);
3580 /* find the first key from this transaction again */
3581 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3582 if (WARN_ON(ret != 0))
3586 * we have a block from this transaction, log every item in it
3587 * from our directory
3590 struct btrfs_key tmp;
3591 src = path->nodes[0];
3592 nritems = btrfs_header_nritems(src);
3593 for (i = path->slots[0]; i < nritems; i++) {
3594 struct btrfs_dir_item *di;
3596 btrfs_item_key_to_cpu(src, &min_key, i);
3598 if (min_key.objectid != ino || min_key.type != key_type)
3600 ret = overwrite_item(trans, log, dst_path, src, i,
3608 * We must make sure that when we log a directory entry,
3609 * the corresponding inode, after log replay, has a
3610 * matching link count. For example:
3616 * xfs_io -c "fsync" mydir
3618 * <mount fs and log replay>
3620 * Would result in a fsync log that when replayed, our
3621 * file inode would have a link count of 1, but we get
3622 * two directory entries pointing to the same inode.
3623 * After removing one of the names, it would not be
3624 * possible to remove the other name, which resulted
3625 * always in stale file handle errors, and would not
3626 * be possible to rmdir the parent directory, since
3627 * its i_size could never decrement to the value
3628 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3630 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3631 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3633 (btrfs_dir_transid(src, di) == trans->transid ||
3634 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3635 tmp.type != BTRFS_ROOT_ITEM_KEY)
3636 ctx->log_new_dentries = true;
3638 path->slots[0] = nritems;
3641 * look ahead to the next item and see if it is also
3642 * from this directory and from this transaction
3644 ret = btrfs_next_leaf(root, path);
3647 last_offset = (u64)-1;
3652 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3653 if (tmp.objectid != ino || tmp.type != key_type) {
3654 last_offset = (u64)-1;
3657 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3658 ret = overwrite_item(trans, log, dst_path,
3659 path->nodes[0], path->slots[0],
3664 last_offset = tmp.offset;
3669 btrfs_release_path(path);
3670 btrfs_release_path(dst_path);
3673 *last_offset_ret = last_offset;
3675 * insert the log range keys to indicate where the log
3678 ret = insert_dir_log_key(trans, log, path, key_type,
3679 ino, first_offset, last_offset);
3687 * logging directories is very similar to logging inodes, We find all the items
3688 * from the current transaction and write them to the log.
3690 * The recovery code scans the directory in the subvolume, and if it finds a
3691 * key in the range logged that is not present in the log tree, then it means
3692 * that dir entry was unlinked during the transaction.
3694 * In order for that scan to work, we must include one key smaller than
3695 * the smallest logged by this transaction and one key larger than the largest
3696 * key logged by this transaction.
3698 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3699 struct btrfs_root *root, struct btrfs_inode *inode,
3700 struct btrfs_path *path,
3701 struct btrfs_path *dst_path,
3702 struct btrfs_log_ctx *ctx)
3707 int key_type = BTRFS_DIR_ITEM_KEY;
3713 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3714 ctx, min_key, &max_key);
3717 if (max_key == (u64)-1)
3719 min_key = max_key + 1;
3722 if (key_type == BTRFS_DIR_ITEM_KEY) {
3723 key_type = BTRFS_DIR_INDEX_KEY;
3730 * a helper function to drop items from the log before we relog an
3731 * inode. max_key_type indicates the highest item type to remove.
3732 * This cannot be run for file data extents because it does not
3733 * free the extents they point to.
3735 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3736 struct btrfs_root *log,
3737 struct btrfs_path *path,
3738 u64 objectid, int max_key_type)
3741 struct btrfs_key key;
3742 struct btrfs_key found_key;
3745 key.objectid = objectid;
3746 key.type = max_key_type;
3747 key.offset = (u64)-1;
3750 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3751 BUG_ON(ret == 0); /* Logic error */
3755 if (path->slots[0] == 0)
3759 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3762 if (found_key.objectid != objectid)
3765 found_key.offset = 0;
3767 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3770 ret = btrfs_del_items(trans, log, path, start_slot,
3771 path->slots[0] - start_slot + 1);
3773 * If start slot isn't 0 then we don't need to re-search, we've
3774 * found the last guy with the objectid in this tree.
3776 if (ret || start_slot != 0)
3778 btrfs_release_path(path);
3780 btrfs_release_path(path);
3786 static void fill_inode_item(struct btrfs_trans_handle *trans,
3787 struct extent_buffer *leaf,
3788 struct btrfs_inode_item *item,
3789 struct inode *inode, int log_inode_only,
3792 struct btrfs_map_token token;
3794 btrfs_init_map_token(&token);
3796 if (log_inode_only) {
3797 /* set the generation to zero so the recover code
3798 * can tell the difference between an logging
3799 * just to say 'this inode exists' and a logging
3800 * to say 'update this inode with these values'
3802 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3803 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3805 btrfs_set_token_inode_generation(leaf, item,
3806 BTRFS_I(inode)->generation,
3808 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3811 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3812 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3813 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3814 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3816 btrfs_set_token_timespec_sec(leaf, &item->atime,
3817 inode->i_atime.tv_sec, &token);
3818 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3819 inode->i_atime.tv_nsec, &token);
3821 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3822 inode->i_mtime.tv_sec, &token);
3823 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3824 inode->i_mtime.tv_nsec, &token);
3826 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3827 inode->i_ctime.tv_sec, &token);
3828 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3829 inode->i_ctime.tv_nsec, &token);
3831 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3834 btrfs_set_token_inode_sequence(leaf, item,
3835 inode_peek_iversion(inode), &token);
3836 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3837 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3838 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3839 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3842 static int log_inode_item(struct btrfs_trans_handle *trans,
3843 struct btrfs_root *log, struct btrfs_path *path,
3844 struct btrfs_inode *inode)
3846 struct btrfs_inode_item *inode_item;
3849 ret = btrfs_insert_empty_item(trans, log, path,
3850 &inode->location, sizeof(*inode_item));
3851 if (ret && ret != -EEXIST)
3853 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3854 struct btrfs_inode_item);
3855 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3857 btrfs_release_path(path);
3861 static noinline int copy_items(struct btrfs_trans_handle *trans,
3862 struct btrfs_inode *inode,
3863 struct btrfs_path *dst_path,
3864 struct btrfs_path *src_path, u64 *last_extent,
3865 int start_slot, int nr, int inode_only,
3868 struct btrfs_fs_info *fs_info = trans->fs_info;
3869 unsigned long src_offset;
3870 unsigned long dst_offset;
3871 struct btrfs_root *log = inode->root->log_root;
3872 struct btrfs_file_extent_item *extent;
3873 struct btrfs_inode_item *inode_item;
3874 struct extent_buffer *src = src_path->nodes[0];
3875 struct btrfs_key first_key, last_key, key;
3877 struct btrfs_key *ins_keys;
3881 struct list_head ordered_sums;
3882 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3883 bool has_extents = false;
3884 bool need_find_last_extent = true;
3887 INIT_LIST_HEAD(&ordered_sums);
3889 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3890 nr * sizeof(u32), GFP_NOFS);
3894 first_key.objectid = (u64)-1;
3896 ins_sizes = (u32 *)ins_data;
3897 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3899 for (i = 0; i < nr; i++) {
3900 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3901 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3903 ret = btrfs_insert_empty_items(trans, log, dst_path,
3904 ins_keys, ins_sizes, nr);
3910 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3911 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3912 dst_path->slots[0]);
3914 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3917 last_key = ins_keys[i];
3919 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3920 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3922 struct btrfs_inode_item);
3923 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3925 inode_only == LOG_INODE_EXISTS,
3928 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3929 src_offset, ins_sizes[i]);
3933 * We set need_find_last_extent here in case we know we were
3934 * processing other items and then walk into the first extent in
3935 * the inode. If we don't hit an extent then nothing changes,
3936 * we'll do the last search the next time around.
3938 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3940 if (first_key.objectid == (u64)-1)
3941 first_key = ins_keys[i];
3943 need_find_last_extent = false;
3946 /* take a reference on file data extents so that truncates
3947 * or deletes of this inode don't have to relog the inode
3950 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3953 extent = btrfs_item_ptr(src, start_slot + i,
3954 struct btrfs_file_extent_item);
3956 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3959 found_type = btrfs_file_extent_type(src, extent);
3960 if (found_type == BTRFS_FILE_EXTENT_REG) {
3962 ds = btrfs_file_extent_disk_bytenr(src,
3964 /* ds == 0 is a hole */
3968 dl = btrfs_file_extent_disk_num_bytes(src,
3970 cs = btrfs_file_extent_offset(src, extent);
3971 cl = btrfs_file_extent_num_bytes(src,
3973 if (btrfs_file_extent_compression(src,
3979 ret = btrfs_lookup_csums_range(
3981 ds + cs, ds + cs + cl - 1,
3984 btrfs_release_path(dst_path);
3992 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3993 btrfs_release_path(dst_path);
3997 * we have to do this after the loop above to avoid changing the
3998 * log tree while trying to change the log tree.
4001 while (!list_empty(&ordered_sums)) {
4002 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4003 struct btrfs_ordered_sum,
4006 ret = btrfs_csum_file_blocks(trans, log, sums);
4007 list_del(&sums->list);
4014 if (need_find_last_extent && *last_extent == first_key.offset) {
4016 * We don't have any leafs between our current one and the one
4017 * we processed before that can have file extent items for our
4018 * inode (and have a generation number smaller than our current
4021 need_find_last_extent = false;
4025 * Because we use btrfs_search_forward we could skip leaves that were
4026 * not modified and then assume *last_extent is valid when it really
4027 * isn't. So back up to the previous leaf and read the end of the last
4028 * extent before we go and fill in holes.
4030 if (need_find_last_extent) {
4033 ret = btrfs_prev_leaf(inode->root, src_path);
4038 if (src_path->slots[0])
4039 src_path->slots[0]--;
4040 src = src_path->nodes[0];
4041 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
4042 if (key.objectid != btrfs_ino(inode) ||
4043 key.type != BTRFS_EXTENT_DATA_KEY)
4045 extent = btrfs_item_ptr(src, src_path->slots[0],
4046 struct btrfs_file_extent_item);
4047 if (btrfs_file_extent_type(src, extent) ==
4048 BTRFS_FILE_EXTENT_INLINE) {
4049 len = btrfs_file_extent_ram_bytes(src, extent);
4050 *last_extent = ALIGN(key.offset + len,
4051 fs_info->sectorsize);
4053 len = btrfs_file_extent_num_bytes(src, extent);
4054 *last_extent = key.offset + len;
4058 /* So we did prev_leaf, now we need to move to the next leaf, but a few
4059 * things could have happened
4061 * 1) A merge could have happened, so we could currently be on a leaf
4062 * that holds what we were copying in the first place.
4063 * 2) A split could have happened, and now not all of the items we want
4064 * are on the same leaf.
4066 * So we need to adjust how we search for holes, we need to drop the
4067 * path and re-search for the first extent key we found, and then walk
4068 * forward until we hit the last one we copied.
4070 if (need_find_last_extent) {
4071 /* btrfs_prev_leaf could return 1 without releasing the path */
4072 btrfs_release_path(src_path);
4073 ret = btrfs_search_slot(NULL, inode->root, &first_key,
4078 src = src_path->nodes[0];
4079 i = src_path->slots[0];
4085 * Ok so here we need to go through and fill in any holes we may have
4086 * to make sure that holes are punched for those areas in case they had
4087 * extents previously.
4093 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
4094 ret = btrfs_next_leaf(inode->root, src_path);
4098 src = src_path->nodes[0];
4100 need_find_last_extent = true;
4103 btrfs_item_key_to_cpu(src, &key, i);
4104 if (!btrfs_comp_cpu_keys(&key, &last_key))
4106 if (key.objectid != btrfs_ino(inode) ||
4107 key.type != BTRFS_EXTENT_DATA_KEY) {
4111 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
4112 if (btrfs_file_extent_type(src, extent) ==
4113 BTRFS_FILE_EXTENT_INLINE) {
4114 len = btrfs_file_extent_ram_bytes(src, extent);
4115 extent_end = ALIGN(key.offset + len,
4116 fs_info->sectorsize);
4118 len = btrfs_file_extent_num_bytes(src, extent);
4119 extent_end = key.offset + len;
4123 if (*last_extent == key.offset) {
4124 *last_extent = extent_end;
4127 offset = *last_extent;
4128 len = key.offset - *last_extent;
4129 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
4130 offset, 0, 0, len, 0, len, 0, 0, 0);
4133 *last_extent = extent_end;
4137 * Check if there is a hole between the last extent found in our leaf
4138 * and the first extent in the next leaf. If there is one, we need to
4139 * log an explicit hole so that at replay time we can punch the hole.
4142 key.objectid == btrfs_ino(inode) &&
4143 key.type == BTRFS_EXTENT_DATA_KEY &&
4144 i == btrfs_header_nritems(src_path->nodes[0])) {
4145 ret = btrfs_next_leaf(inode->root, src_path);
4146 need_find_last_extent = true;
4149 } else if (ret == 0) {
4150 btrfs_item_key_to_cpu(src_path->nodes[0], &key,
4151 src_path->slots[0]);
4152 if (key.objectid == btrfs_ino(inode) &&
4153 key.type == BTRFS_EXTENT_DATA_KEY &&
4154 *last_extent < key.offset) {
4155 const u64 len = key.offset - *last_extent;
4157 ret = btrfs_insert_file_extent(trans, log,
4166 * Need to let the callers know we dropped the path so they should
4169 if (!ret && need_find_last_extent)
4174 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
4176 struct extent_map *em1, *em2;
4178 em1 = list_entry(a, struct extent_map, list);
4179 em2 = list_entry(b, struct extent_map, list);
4181 if (em1->start < em2->start)
4183 else if (em1->start > em2->start)
4188 static int log_extent_csums(struct btrfs_trans_handle *trans,
4189 struct btrfs_inode *inode,
4190 struct btrfs_root *log_root,
4191 const struct extent_map *em)
4195 LIST_HEAD(ordered_sums);
4198 if (inode->flags & BTRFS_INODE_NODATASUM ||
4199 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4200 em->block_start == EXTENT_MAP_HOLE)
4203 /* If we're compressed we have to save the entire range of csums. */
4204 if (em->compress_type) {
4206 csum_len = max(em->block_len, em->orig_block_len);
4208 csum_offset = em->mod_start - em->start;
4209 csum_len = em->mod_len;
4212 /* block start is already adjusted for the file extent offset. */
4213 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4214 em->block_start + csum_offset,
4215 em->block_start + csum_offset +
4216 csum_len - 1, &ordered_sums, 0);
4220 while (!list_empty(&ordered_sums)) {
4221 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4222 struct btrfs_ordered_sum,
4225 ret = btrfs_csum_file_blocks(trans, log_root, sums);
4226 list_del(&sums->list);
4233 static int log_one_extent(struct btrfs_trans_handle *trans,
4234 struct btrfs_inode *inode, struct btrfs_root *root,
4235 const struct extent_map *em,
4236 struct btrfs_path *path,
4237 struct btrfs_log_ctx *ctx)
4239 struct btrfs_root *log = root->log_root;
4240 struct btrfs_file_extent_item *fi;
4241 struct extent_buffer *leaf;
4242 struct btrfs_map_token token;
4243 struct btrfs_key key;
4244 u64 extent_offset = em->start - em->orig_start;
4247 int extent_inserted = 0;
4249 ret = log_extent_csums(trans, inode, log, em);
4253 btrfs_init_map_token(&token);
4255 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4256 em->start + em->len, NULL, 0, 1,
4257 sizeof(*fi), &extent_inserted);
4261 if (!extent_inserted) {
4262 key.objectid = btrfs_ino(inode);
4263 key.type = BTRFS_EXTENT_DATA_KEY;
4264 key.offset = em->start;
4266 ret = btrfs_insert_empty_item(trans, log, path, &key,
4271 leaf = path->nodes[0];
4272 fi = btrfs_item_ptr(leaf, path->slots[0],
4273 struct btrfs_file_extent_item);
4275 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4277 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4278 btrfs_set_token_file_extent_type(leaf, fi,
4279 BTRFS_FILE_EXTENT_PREALLOC,
4282 btrfs_set_token_file_extent_type(leaf, fi,
4283 BTRFS_FILE_EXTENT_REG,
4286 block_len = max(em->block_len, em->orig_block_len);
4287 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4288 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4291 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4293 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4294 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4296 extent_offset, &token);
4297 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4300 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4301 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4305 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4306 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4307 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4308 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4310 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4311 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4312 btrfs_mark_buffer_dirty(leaf);
4314 btrfs_release_path(path);
4320 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4321 * lose them after doing a fast fsync and replaying the log. We scan the
4322 * subvolume's root instead of iterating the inode's extent map tree because
4323 * otherwise we can log incorrect extent items based on extent map conversion.
4324 * That can happen due to the fact that extent maps are merged when they
4325 * are not in the extent map tree's list of modified extents.
4327 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4328 struct btrfs_inode *inode,
4329 struct btrfs_path *path)
4331 struct btrfs_root *root = inode->root;
4332 struct btrfs_key key;
4333 const u64 i_size = i_size_read(&inode->vfs_inode);
4334 const u64 ino = btrfs_ino(inode);
4335 struct btrfs_path *dst_path = NULL;
4336 u64 last_extent = (u64)-1;
4341 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4345 key.type = BTRFS_EXTENT_DATA_KEY;
4346 key.offset = i_size;
4347 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4352 struct extent_buffer *leaf = path->nodes[0];
4353 int slot = path->slots[0];
4355 if (slot >= btrfs_header_nritems(leaf)) {
4357 ret = copy_items(trans, inode, dst_path, path,
4358 &last_extent, start_slot,
4364 ret = btrfs_next_leaf(root, path);
4374 btrfs_item_key_to_cpu(leaf, &key, slot);
4375 if (key.objectid > ino)
4377 if (WARN_ON_ONCE(key.objectid < ino) ||
4378 key.type < BTRFS_EXTENT_DATA_KEY ||
4379 key.offset < i_size) {
4383 if (last_extent == (u64)-1) {
4384 last_extent = key.offset;
4386 * Avoid logging extent items logged in past fsync calls
4387 * and leading to duplicate keys in the log tree.
4390 ret = btrfs_truncate_inode_items(trans,
4394 BTRFS_EXTENT_DATA_KEY);
4395 } while (ret == -EAGAIN);
4404 dst_path = btrfs_alloc_path();
4412 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4413 start_slot, ins_nr, 1, 0);
4418 btrfs_release_path(path);
4419 btrfs_free_path(dst_path);
4423 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4424 struct btrfs_root *root,
4425 struct btrfs_inode *inode,
4426 struct btrfs_path *path,
4427 struct btrfs_log_ctx *ctx,
4431 struct extent_map *em, *n;
4432 struct list_head extents;
4433 struct extent_map_tree *tree = &inode->extent_tree;
4438 INIT_LIST_HEAD(&extents);
4440 write_lock(&tree->lock);
4441 test_gen = root->fs_info->last_trans_committed;
4443 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4445 * Skip extents outside our logging range. It's important to do
4446 * it for correctness because if we don't ignore them, we may
4447 * log them before their ordered extent completes, and therefore
4448 * we could log them without logging their respective checksums
4449 * (the checksum items are added to the csum tree at the very
4450 * end of btrfs_finish_ordered_io()). Also leave such extents
4451 * outside of our range in the list, since we may have another
4452 * ranged fsync in the near future that needs them. If an extent
4453 * outside our range corresponds to a hole, log it to avoid
4454 * leaving gaps between extents (fsck will complain when we are
4455 * not using the NO_HOLES feature).
4457 if ((em->start > end || em->start + em->len <= start) &&
4458 em->block_start != EXTENT_MAP_HOLE)
4461 list_del_init(&em->list);
4463 * Just an arbitrary number, this can be really CPU intensive
4464 * once we start getting a lot of extents, and really once we
4465 * have a bunch of extents we just want to commit since it will
4468 if (++num > 32768) {
4469 list_del_init(&tree->modified_extents);
4474 if (em->generation <= test_gen)
4477 /* We log prealloc extents beyond eof later. */
4478 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4479 em->start >= i_size_read(&inode->vfs_inode))
4482 /* Need a ref to keep it from getting evicted from cache */
4483 refcount_inc(&em->refs);
4484 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4485 list_add_tail(&em->list, &extents);
4489 list_sort(NULL, &extents, extent_cmp);
4491 while (!list_empty(&extents)) {
4492 em = list_entry(extents.next, struct extent_map, list);
4494 list_del_init(&em->list);
4497 * If we had an error we just need to delete everybody from our
4501 clear_em_logging(tree, em);
4502 free_extent_map(em);
4506 write_unlock(&tree->lock);
4508 ret = log_one_extent(trans, inode, root, em, path, ctx);
4509 write_lock(&tree->lock);
4510 clear_em_logging(tree, em);
4511 free_extent_map(em);
4513 WARN_ON(!list_empty(&extents));
4514 write_unlock(&tree->lock);
4516 btrfs_release_path(path);
4518 ret = btrfs_log_prealloc_extents(trans, inode, path);
4523 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4524 struct btrfs_path *path, u64 *size_ret)
4526 struct btrfs_key key;
4529 key.objectid = btrfs_ino(inode);
4530 key.type = BTRFS_INODE_ITEM_KEY;
4533 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4536 } else if (ret > 0) {
4539 struct btrfs_inode_item *item;
4541 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4542 struct btrfs_inode_item);
4543 *size_ret = btrfs_inode_size(path->nodes[0], item);
4546 btrfs_release_path(path);
4551 * At the moment we always log all xattrs. This is to figure out at log replay
4552 * time which xattrs must have their deletion replayed. If a xattr is missing
4553 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4554 * because if a xattr is deleted, the inode is fsynced and a power failure
4555 * happens, causing the log to be replayed the next time the fs is mounted,
4556 * we want the xattr to not exist anymore (same behaviour as other filesystems
4557 * with a journal, ext3/4, xfs, f2fs, etc).
4559 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4560 struct btrfs_root *root,
4561 struct btrfs_inode *inode,
4562 struct btrfs_path *path,
4563 struct btrfs_path *dst_path)
4566 struct btrfs_key key;
4567 const u64 ino = btrfs_ino(inode);
4572 key.type = BTRFS_XATTR_ITEM_KEY;
4575 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4580 int slot = path->slots[0];
4581 struct extent_buffer *leaf = path->nodes[0];
4582 int nritems = btrfs_header_nritems(leaf);
4584 if (slot >= nritems) {
4586 u64 last_extent = 0;
4588 ret = copy_items(trans, inode, dst_path, path,
4589 &last_extent, start_slot,
4591 /* can't be 1, extent items aren't processed */
4597 ret = btrfs_next_leaf(root, path);
4605 btrfs_item_key_to_cpu(leaf, &key, slot);
4606 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4616 u64 last_extent = 0;
4618 ret = copy_items(trans, inode, dst_path, path,
4619 &last_extent, start_slot,
4621 /* can't be 1, extent items aren't processed */
4631 * If the no holes feature is enabled we need to make sure any hole between the
4632 * last extent and the i_size of our inode is explicitly marked in the log. This
4633 * is to make sure that doing something like:
4635 * 1) create file with 128Kb of data
4636 * 2) truncate file to 64Kb
4637 * 3) truncate file to 256Kb
4639 * 5) <crash/power failure>
4640 * 6) mount fs and trigger log replay
4642 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4643 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4644 * file correspond to a hole. The presence of explicit holes in a log tree is
4645 * what guarantees that log replay will remove/adjust file extent items in the
4648 * Here we do not need to care about holes between extents, that is already done
4649 * by copy_items(). We also only need to do this in the full sync path, where we
4650 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4651 * lookup the list of modified extent maps and if any represents a hole, we
4652 * insert a corresponding extent representing a hole in the log tree.
4654 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4655 struct btrfs_root *root,
4656 struct btrfs_inode *inode,
4657 struct btrfs_path *path)
4659 struct btrfs_fs_info *fs_info = root->fs_info;
4661 struct btrfs_key key;
4664 struct extent_buffer *leaf;
4665 struct btrfs_root *log = root->log_root;
4666 const u64 ino = btrfs_ino(inode);
4667 const u64 i_size = i_size_read(&inode->vfs_inode);
4669 if (!btrfs_fs_incompat(fs_info, NO_HOLES))
4673 key.type = BTRFS_EXTENT_DATA_KEY;
4674 key.offset = (u64)-1;
4676 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4681 ASSERT(path->slots[0] > 0);
4683 leaf = path->nodes[0];
4684 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4686 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4687 /* inode does not have any extents */
4691 struct btrfs_file_extent_item *extent;
4695 * If there's an extent beyond i_size, an explicit hole was
4696 * already inserted by copy_items().
4698 if (key.offset >= i_size)
4701 extent = btrfs_item_ptr(leaf, path->slots[0],
4702 struct btrfs_file_extent_item);
4704 if (btrfs_file_extent_type(leaf, extent) ==
4705 BTRFS_FILE_EXTENT_INLINE) {
4706 len = btrfs_file_extent_ram_bytes(leaf, extent);
4707 ASSERT(len == i_size ||
4708 (len == fs_info->sectorsize &&
4709 btrfs_file_extent_compression(leaf, extent) !=
4710 BTRFS_COMPRESS_NONE) ||
4711 (len < i_size && i_size < fs_info->sectorsize));
4715 len = btrfs_file_extent_num_bytes(leaf, extent);
4716 /* Last extent goes beyond i_size, no need to log a hole. */
4717 if (key.offset + len > i_size)
4719 hole_start = key.offset + len;
4720 hole_size = i_size - hole_start;
4722 btrfs_release_path(path);
4724 /* Last extent ends at i_size. */
4728 hole_size = ALIGN(hole_size, fs_info->sectorsize);
4729 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4730 hole_size, 0, hole_size, 0, 0, 0);
4735 * When we are logging a new inode X, check if it doesn't have a reference that
4736 * matches the reference from some other inode Y created in a past transaction
4737 * and that was renamed in the current transaction. If we don't do this, then at
4738 * log replay time we can lose inode Y (and all its files if it's a directory):
4741 * echo "hello world" > /mnt/x/foobar
4744 * mkdir /mnt/x # or touch /mnt/x
4745 * xfs_io -c fsync /mnt/x
4747 * mount fs, trigger log replay
4749 * After the log replay procedure, we would lose the first directory and all its
4750 * files (file foobar).
4751 * For the case where inode Y is not a directory we simply end up losing it:
4753 * echo "123" > /mnt/foo
4755 * mv /mnt/foo /mnt/bar
4756 * echo "abc" > /mnt/foo
4757 * xfs_io -c fsync /mnt/foo
4760 * We also need this for cases where a snapshot entry is replaced by some other
4761 * entry (file or directory) otherwise we end up with an unreplayable log due to
4762 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4763 * if it were a regular entry:
4766 * btrfs subvolume snapshot /mnt /mnt/x/snap
4767 * btrfs subvolume delete /mnt/x/snap
4770 * fsync /mnt/x or fsync some new file inside it
4773 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4774 * the same transaction.
4776 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4778 const struct btrfs_key *key,
4779 struct btrfs_inode *inode,
4783 struct btrfs_path *search_path;
4786 u32 item_size = btrfs_item_size_nr(eb, slot);
4788 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4790 search_path = btrfs_alloc_path();
4793 search_path->search_commit_root = 1;
4794 search_path->skip_locking = 1;
4796 while (cur_offset < item_size) {
4800 unsigned long name_ptr;
4801 struct btrfs_dir_item *di;
4803 if (key->type == BTRFS_INODE_REF_KEY) {
4804 struct btrfs_inode_ref *iref;
4806 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4807 parent = key->offset;
4808 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4809 name_ptr = (unsigned long)(iref + 1);
4810 this_len = sizeof(*iref) + this_name_len;
4812 struct btrfs_inode_extref *extref;
4814 extref = (struct btrfs_inode_extref *)(ptr +
4816 parent = btrfs_inode_extref_parent(eb, extref);
4817 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4818 name_ptr = (unsigned long)&extref->name;
4819 this_len = sizeof(*extref) + this_name_len;
4822 if (this_name_len > name_len) {
4825 new_name = krealloc(name, this_name_len, GFP_NOFS);
4830 name_len = this_name_len;
4834 read_extent_buffer(eb, name, name_ptr, this_name_len);
4835 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4836 parent, name, this_name_len, 0);
4837 if (di && !IS_ERR(di)) {
4838 struct btrfs_key di_key;
4840 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4842 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4843 if (di_key.objectid != key->objectid) {
4845 *other_ino = di_key.objectid;
4853 } else if (IS_ERR(di)) {
4857 btrfs_release_path(search_path);
4859 cur_offset += this_len;
4863 btrfs_free_path(search_path);
4868 struct btrfs_ino_list {
4870 struct list_head list;
4873 static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
4874 struct btrfs_root *root,
4875 struct btrfs_path *path,
4876 struct btrfs_log_ctx *ctx,
4879 struct btrfs_ino_list *ino_elem;
4880 LIST_HEAD(inode_list);
4883 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4886 ino_elem->ino = ino;
4887 list_add_tail(&ino_elem->list, &inode_list);
4889 while (!list_empty(&inode_list)) {
4890 struct btrfs_fs_info *fs_info = root->fs_info;
4891 struct btrfs_key key;
4892 struct inode *inode;
4894 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list,
4896 ino = ino_elem->ino;
4897 list_del(&ino_elem->list);
4902 btrfs_release_path(path);
4905 key.type = BTRFS_INODE_ITEM_KEY;
4907 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4909 * If the other inode that had a conflicting dir entry was
4910 * deleted in the current transaction, we don't need to do more
4911 * work nor fallback to a transaction commit.
4913 if (IS_ERR(inode)) {
4914 ret = PTR_ERR(inode);
4920 * We are safe logging the other inode without acquiring its
4921 * lock as long as we log with the LOG_INODE_EXISTS mode. We
4922 * are safe against concurrent renames of the other inode as
4923 * well because during a rename we pin the log and update the
4924 * log with the new name before we unpin it.
4926 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
4927 LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
4934 key.type = BTRFS_INODE_REF_KEY;
4936 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4943 struct extent_buffer *leaf = path->nodes[0];
4944 int slot = path->slots[0];
4947 if (slot >= btrfs_header_nritems(leaf)) {
4948 ret = btrfs_next_leaf(root, path);
4951 } else if (ret > 0) {
4958 btrfs_item_key_to_cpu(leaf, &key, slot);
4959 if (key.objectid != ino ||
4960 (key.type != BTRFS_INODE_REF_KEY &&
4961 key.type != BTRFS_INODE_EXTREF_KEY)) {
4966 ret = btrfs_check_ref_name_override(leaf, slot, &key,
4967 BTRFS_I(inode), &other_ino);
4971 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS);
4976 ino_elem->ino = other_ino;
4977 list_add_tail(&ino_elem->list, &inode_list);
4988 /* log a single inode in the tree log.
4989 * At least one parent directory for this inode must exist in the tree
4990 * or be logged already.
4992 * Any items from this inode changed by the current transaction are copied
4993 * to the log tree. An extra reference is taken on any extents in this
4994 * file, allowing us to avoid a whole pile of corner cases around logging
4995 * blocks that have been removed from the tree.
4997 * See LOG_INODE_ALL and related defines for a description of what inode_only
5000 * This handles both files and directories.
5002 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5003 struct btrfs_root *root, struct btrfs_inode *inode,
5007 struct btrfs_log_ctx *ctx)
5009 struct btrfs_fs_info *fs_info = root->fs_info;
5010 struct btrfs_path *path;
5011 struct btrfs_path *dst_path;
5012 struct btrfs_key min_key;
5013 struct btrfs_key max_key;
5014 struct btrfs_root *log = root->log_root;
5015 u64 last_extent = 0;
5019 int ins_start_slot = 0;
5021 bool fast_search = false;
5022 u64 ino = btrfs_ino(inode);
5023 struct extent_map_tree *em_tree = &inode->extent_tree;
5024 u64 logged_isize = 0;
5025 bool need_log_inode_item = true;
5026 bool xattrs_logged = false;
5027 bool recursive_logging = (inode_only == LOG_OTHER_INODE);
5029 path = btrfs_alloc_path();
5032 dst_path = btrfs_alloc_path();
5034 btrfs_free_path(path);
5038 min_key.objectid = ino;
5039 min_key.type = BTRFS_INODE_ITEM_KEY;
5042 max_key.objectid = ino;
5045 /* today the code can only do partial logging of directories */
5046 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5047 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5048 &inode->runtime_flags) &&
5049 inode_only >= LOG_INODE_EXISTS))
5050 max_key.type = BTRFS_XATTR_ITEM_KEY;
5052 max_key.type = (u8)-1;
5053 max_key.offset = (u64)-1;
5056 * Only run delayed items if we are a dir or a new file.
5057 * Otherwise commit the delayed inode only, which is needed in
5058 * order for the log replay code to mark inodes for link count
5059 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
5061 if (S_ISDIR(inode->vfs_inode.i_mode) ||
5062 inode->generation > fs_info->last_trans_committed)
5063 ret = btrfs_commit_inode_delayed_items(trans, inode);
5065 ret = btrfs_commit_inode_delayed_inode(inode);
5068 btrfs_free_path(path);
5069 btrfs_free_path(dst_path);
5073 if (inode_only == LOG_OTHER_INODE) {
5074 inode_only = LOG_INODE_EXISTS;
5075 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
5077 mutex_lock(&inode->log_mutex);
5081 * a brute force approach to making sure we get the most uptodate
5082 * copies of everything.
5084 if (S_ISDIR(inode->vfs_inode.i_mode)) {
5085 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
5087 if (inode_only == LOG_INODE_EXISTS)
5088 max_key_type = BTRFS_XATTR_ITEM_KEY;
5089 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
5091 if (inode_only == LOG_INODE_EXISTS) {
5093 * Make sure the new inode item we write to the log has
5094 * the same isize as the current one (if it exists).
5095 * This is necessary to prevent data loss after log
5096 * replay, and also to prevent doing a wrong expanding
5097 * truncate - for e.g. create file, write 4K into offset
5098 * 0, fsync, write 4K into offset 4096, add hard link,
5099 * fsync some other file (to sync log), power fail - if
5100 * we use the inode's current i_size, after log replay
5101 * we get a 8Kb file, with the last 4Kb extent as a hole
5102 * (zeroes), as if an expanding truncate happened,
5103 * instead of getting a file of 4Kb only.
5105 err = logged_inode_size(log, inode, path, &logged_isize);
5109 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5110 &inode->runtime_flags)) {
5111 if (inode_only == LOG_INODE_EXISTS) {
5112 max_key.type = BTRFS_XATTR_ITEM_KEY;
5113 ret = drop_objectid_items(trans, log, path, ino,
5116 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
5117 &inode->runtime_flags);
5118 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5119 &inode->runtime_flags);
5121 ret = btrfs_truncate_inode_items(trans,
5122 log, &inode->vfs_inode, 0, 0);
5127 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
5128 &inode->runtime_flags) ||
5129 inode_only == LOG_INODE_EXISTS) {
5130 if (inode_only == LOG_INODE_ALL)
5132 max_key.type = BTRFS_XATTR_ITEM_KEY;
5133 ret = drop_objectid_items(trans, log, path, ino,
5136 if (inode_only == LOG_INODE_ALL)
5149 ret = btrfs_search_forward(root, &min_key,
5150 path, trans->transid);
5158 /* note, ins_nr might be > 0 here, cleanup outside the loop */
5159 if (min_key.objectid != ino)
5161 if (min_key.type > max_key.type)
5164 if (min_key.type == BTRFS_INODE_ITEM_KEY)
5165 need_log_inode_item = false;
5167 if ((min_key.type == BTRFS_INODE_REF_KEY ||
5168 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
5169 inode->generation == trans->transid &&
5170 !recursive_logging) {
5173 ret = btrfs_check_ref_name_override(path->nodes[0],
5174 path->slots[0], &min_key, inode,
5179 } else if (ret > 0 && ctx &&
5180 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5185 ins_start_slot = path->slots[0];
5187 ret = copy_items(trans, inode, dst_path, path,
5188 &last_extent, ins_start_slot,
5197 err = log_conflicting_inodes(trans, root, path,
5201 btrfs_release_path(path);
5206 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5207 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
5210 ret = copy_items(trans, inode, dst_path, path,
5211 &last_extent, ins_start_slot,
5212 ins_nr, inode_only, logged_isize);
5219 btrfs_release_path(path);
5225 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5228 } else if (!ins_nr) {
5229 ins_start_slot = path->slots[0];
5234 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5235 ins_start_slot, ins_nr, inode_only,
5243 btrfs_release_path(path);
5247 ins_start_slot = path->slots[0];
5250 nritems = btrfs_header_nritems(path->nodes[0]);
5252 if (path->slots[0] < nritems) {
5253 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
5258 ret = copy_items(trans, inode, dst_path, path,
5259 &last_extent, ins_start_slot,
5260 ins_nr, inode_only, logged_isize);
5268 btrfs_release_path(path);
5270 if (min_key.offset < (u64)-1) {
5272 } else if (min_key.type < max_key.type) {
5280 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5281 ins_start_slot, ins_nr, inode_only,
5291 btrfs_release_path(path);
5292 btrfs_release_path(dst_path);
5293 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5296 xattrs_logged = true;
5297 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5298 btrfs_release_path(path);
5299 btrfs_release_path(dst_path);
5300 err = btrfs_log_trailing_hole(trans, root, inode, path);
5305 btrfs_release_path(path);
5306 btrfs_release_path(dst_path);
5307 if (need_log_inode_item) {
5308 err = log_inode_item(trans, log, dst_path, inode);
5309 if (!err && !xattrs_logged) {
5310 err = btrfs_log_all_xattrs(trans, root, inode, path,
5312 btrfs_release_path(path);
5318 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5324 } else if (inode_only == LOG_INODE_ALL) {
5325 struct extent_map *em, *n;
5327 write_lock(&em_tree->lock);
5329 * We can't just remove every em if we're called for a ranged
5330 * fsync - that is, one that doesn't cover the whole possible
5331 * file range (0 to LLONG_MAX). This is because we can have
5332 * em's that fall outside the range we're logging and therefore
5333 * their ordered operations haven't completed yet
5334 * (btrfs_finish_ordered_io() not invoked yet). This means we
5335 * didn't get their respective file extent item in the fs/subvol
5336 * tree yet, and need to let the next fast fsync (one which
5337 * consults the list of modified extent maps) find the em so
5338 * that it logs a matching file extent item and waits for the
5339 * respective ordered operation to complete (if it's still
5342 * Removing every em outside the range we're logging would make
5343 * the next fast fsync not log their matching file extent items,
5344 * therefore making us lose data after a log replay.
5346 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
5348 const u64 mod_end = em->mod_start + em->mod_len - 1;
5350 if (em->mod_start >= start && mod_end <= end)
5351 list_del_init(&em->list);
5353 write_unlock(&em_tree->lock);
5356 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5357 ret = log_directory_changes(trans, root, inode, path, dst_path,
5365 spin_lock(&inode->lock);
5366 inode->logged_trans = trans->transid;
5367 inode->last_log_commit = inode->last_sub_trans;
5368 spin_unlock(&inode->lock);
5370 mutex_unlock(&inode->log_mutex);
5372 btrfs_free_path(path);
5373 btrfs_free_path(dst_path);
5378 * Check if we must fallback to a transaction commit when logging an inode.
5379 * This must be called after logging the inode and is used only in the context
5380 * when fsyncing an inode requires the need to log some other inode - in which
5381 * case we can't lock the i_mutex of each other inode we need to log as that
5382 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5383 * log inodes up or down in the hierarchy) or rename operations for example. So
5384 * we take the log_mutex of the inode after we have logged it and then check for
5385 * its last_unlink_trans value - this is safe because any task setting
5386 * last_unlink_trans must take the log_mutex and it must do this before it does
5387 * the actual unlink operation, so if we do this check before a concurrent task
5388 * sets last_unlink_trans it means we've logged a consistent version/state of
5389 * all the inode items, otherwise we are not sure and must do a transaction
5390 * commit (the concurrent task might have only updated last_unlink_trans before
5391 * we logged the inode or it might have also done the unlink).
5393 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5394 struct btrfs_inode *inode)
5396 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5399 mutex_lock(&inode->log_mutex);
5400 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5402 * Make sure any commits to the log are forced to be full
5405 btrfs_set_log_full_commit(fs_info, trans);
5408 mutex_unlock(&inode->log_mutex);
5414 * follow the dentry parent pointers up the chain and see if any
5415 * of the directories in it require a full commit before they can
5416 * be logged. Returns zero if nothing special needs to be done or 1 if
5417 * a full commit is required.
5419 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5420 struct btrfs_inode *inode,
5421 struct dentry *parent,
5422 struct super_block *sb,
5426 struct dentry *old_parent = NULL;
5427 struct btrfs_inode *orig_inode = inode;
5430 * for regular files, if its inode is already on disk, we don't
5431 * have to worry about the parents at all. This is because
5432 * we can use the last_unlink_trans field to record renames
5433 * and other fun in this file.
5435 if (S_ISREG(inode->vfs_inode.i_mode) &&
5436 inode->generation <= last_committed &&
5437 inode->last_unlink_trans <= last_committed)
5440 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5441 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5443 inode = BTRFS_I(d_inode(parent));
5448 * If we are logging a directory then we start with our inode,
5449 * not our parent's inode, so we need to skip setting the
5450 * logged_trans so that further down in the log code we don't
5451 * think this inode has already been logged.
5453 if (inode != orig_inode)
5454 inode->logged_trans = trans->transid;
5457 if (btrfs_must_commit_transaction(trans, inode)) {
5462 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5465 if (IS_ROOT(parent)) {
5466 inode = BTRFS_I(d_inode(parent));
5467 if (btrfs_must_commit_transaction(trans, inode))
5472 parent = dget_parent(parent);
5474 old_parent = parent;
5475 inode = BTRFS_I(d_inode(parent));
5483 struct btrfs_dir_list {
5485 struct list_head list;
5489 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5490 * details about the why it is needed.
5491 * This is a recursive operation - if an existing dentry corresponds to a
5492 * directory, that directory's new entries are logged too (same behaviour as
5493 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5494 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5495 * complains about the following circular lock dependency / possible deadlock:
5499 * lock(&type->i_mutex_dir_key#3/2);
5500 * lock(sb_internal#2);
5501 * lock(&type->i_mutex_dir_key#3/2);
5502 * lock(&sb->s_type->i_mutex_key#14);
5504 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5505 * sb_start_intwrite() in btrfs_start_transaction().
5506 * Not locking i_mutex of the inodes is still safe because:
5508 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5509 * that while logging the inode new references (names) are added or removed
5510 * from the inode, leaving the logged inode item with a link count that does
5511 * not match the number of logged inode reference items. This is fine because
5512 * at log replay time we compute the real number of links and correct the
5513 * link count in the inode item (see replay_one_buffer() and
5514 * link_to_fixup_dir());
5516 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5517 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5518 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5519 * has a size that doesn't match the sum of the lengths of all the logged
5520 * names. This does not result in a problem because if a dir_item key is
5521 * logged but its matching dir_index key is not logged, at log replay time we
5522 * don't use it to replay the respective name (see replay_one_name()). On the
5523 * other hand if only the dir_index key ends up being logged, the respective
5524 * name is added to the fs/subvol tree with both the dir_item and dir_index
5525 * keys created (see replay_one_name()).
5526 * The directory's inode item with a wrong i_size is not a problem as well,
5527 * since we don't use it at log replay time to set the i_size in the inode
5528 * item of the fs/subvol tree (see overwrite_item()).
5530 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5531 struct btrfs_root *root,
5532 struct btrfs_inode *start_inode,
5533 struct btrfs_log_ctx *ctx)
5535 struct btrfs_fs_info *fs_info = root->fs_info;
5536 struct btrfs_root *log = root->log_root;
5537 struct btrfs_path *path;
5538 LIST_HEAD(dir_list);
5539 struct btrfs_dir_list *dir_elem;
5542 path = btrfs_alloc_path();
5546 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5548 btrfs_free_path(path);
5551 dir_elem->ino = btrfs_ino(start_inode);
5552 list_add_tail(&dir_elem->list, &dir_list);
5554 while (!list_empty(&dir_list)) {
5555 struct extent_buffer *leaf;
5556 struct btrfs_key min_key;
5560 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5563 goto next_dir_inode;
5565 min_key.objectid = dir_elem->ino;
5566 min_key.type = BTRFS_DIR_ITEM_KEY;
5569 btrfs_release_path(path);
5570 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5572 goto next_dir_inode;
5573 } else if (ret > 0) {
5575 goto next_dir_inode;
5579 leaf = path->nodes[0];
5580 nritems = btrfs_header_nritems(leaf);
5581 for (i = path->slots[0]; i < nritems; i++) {
5582 struct btrfs_dir_item *di;
5583 struct btrfs_key di_key;
5584 struct inode *di_inode;
5585 struct btrfs_dir_list *new_dir_elem;
5586 int log_mode = LOG_INODE_EXISTS;
5589 btrfs_item_key_to_cpu(leaf, &min_key, i);
5590 if (min_key.objectid != dir_elem->ino ||
5591 min_key.type != BTRFS_DIR_ITEM_KEY)
5592 goto next_dir_inode;
5594 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5595 type = btrfs_dir_type(leaf, di);
5596 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5597 type != BTRFS_FT_DIR)
5599 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5600 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5603 btrfs_release_path(path);
5604 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
5605 if (IS_ERR(di_inode)) {
5606 ret = PTR_ERR(di_inode);
5607 goto next_dir_inode;
5610 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5615 ctx->log_new_dentries = false;
5616 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5617 log_mode = LOG_INODE_ALL;
5618 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5619 log_mode, 0, LLONG_MAX, ctx);
5621 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5625 goto next_dir_inode;
5626 if (ctx->log_new_dentries) {
5627 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5629 if (!new_dir_elem) {
5631 goto next_dir_inode;
5633 new_dir_elem->ino = di_key.objectid;
5634 list_add_tail(&new_dir_elem->list, &dir_list);
5639 ret = btrfs_next_leaf(log, path);
5641 goto next_dir_inode;
5642 } else if (ret > 0) {
5644 goto next_dir_inode;
5648 if (min_key.offset < (u64)-1) {
5653 list_del(&dir_elem->list);
5657 btrfs_free_path(path);
5661 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5662 struct btrfs_inode *inode,
5663 struct btrfs_log_ctx *ctx)
5665 struct btrfs_fs_info *fs_info = trans->fs_info;
5667 struct btrfs_path *path;
5668 struct btrfs_key key;
5669 struct btrfs_root *root = inode->root;
5670 const u64 ino = btrfs_ino(inode);
5672 path = btrfs_alloc_path();
5675 path->skip_locking = 1;
5676 path->search_commit_root = 1;
5679 key.type = BTRFS_INODE_REF_KEY;
5681 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5686 struct extent_buffer *leaf = path->nodes[0];
5687 int slot = path->slots[0];
5692 if (slot >= btrfs_header_nritems(leaf)) {
5693 ret = btrfs_next_leaf(root, path);
5701 btrfs_item_key_to_cpu(leaf, &key, slot);
5702 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5703 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5706 item_size = btrfs_item_size_nr(leaf, slot);
5707 ptr = btrfs_item_ptr_offset(leaf, slot);
5708 while (cur_offset < item_size) {
5709 struct btrfs_key inode_key;
5710 struct inode *dir_inode;
5712 inode_key.type = BTRFS_INODE_ITEM_KEY;
5713 inode_key.offset = 0;
5715 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5716 struct btrfs_inode_extref *extref;
5718 extref = (struct btrfs_inode_extref *)
5720 inode_key.objectid = btrfs_inode_extref_parent(
5722 cur_offset += sizeof(*extref);
5723 cur_offset += btrfs_inode_extref_name_len(leaf,
5726 inode_key.objectid = key.offset;
5727 cur_offset = item_size;
5730 dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5733 * If the parent inode was deleted, return an error to
5734 * fallback to a transaction commit. This is to prevent
5735 * getting an inode that was moved from one parent A to
5736 * a parent B, got its former parent A deleted and then
5737 * it got fsync'ed, from existing at both parents after
5738 * a log replay (and the old parent still existing).
5745 * mv /mnt/B/bar /mnt/A/bar
5746 * mv -T /mnt/A /mnt/B
5750 * If we ignore the old parent B which got deleted,
5751 * after a log replay we would have file bar linked
5752 * at both parents and the old parent B would still
5755 if (IS_ERR(dir_inode)) {
5756 ret = PTR_ERR(dir_inode);
5761 ctx->log_new_dentries = false;
5762 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5763 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5765 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5767 if (!ret && ctx && ctx->log_new_dentries)
5768 ret = log_new_dir_dentries(trans, root,
5769 BTRFS_I(dir_inode), ctx);
5778 btrfs_free_path(path);
5783 * helper function around btrfs_log_inode to make sure newly created
5784 * parent directories also end up in the log. A minimal inode and backref
5785 * only logging is done of any parent directories that are older than
5786 * the last committed transaction
5788 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5789 struct btrfs_inode *inode,
5790 struct dentry *parent,
5794 struct btrfs_log_ctx *ctx)
5796 struct btrfs_root *root = inode->root;
5797 struct btrfs_fs_info *fs_info = root->fs_info;
5798 struct super_block *sb;
5799 struct dentry *old_parent = NULL;
5801 u64 last_committed = fs_info->last_trans_committed;
5802 bool log_dentries = false;
5803 struct btrfs_inode *orig_inode = inode;
5805 sb = inode->vfs_inode.i_sb;
5807 if (btrfs_test_opt(fs_info, NOTREELOG)) {
5813 * The prev transaction commit doesn't complete, we need do
5814 * full commit by ourselves.
5816 if (fs_info->last_trans_log_full_commit >
5817 fs_info->last_trans_committed) {
5822 if (btrfs_root_refs(&root->root_item) == 0) {
5827 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
5833 * Skip already logged inodes or inodes corresponding to tmpfiles
5834 * (since logging them is pointless, a link count of 0 means they
5835 * will never be accessible).
5837 if (btrfs_inode_in_log(inode, trans->transid) ||
5838 inode->vfs_inode.i_nlink == 0) {
5839 ret = BTRFS_NO_LOG_SYNC;
5843 ret = start_log_trans(trans, root, ctx);
5847 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5852 * for regular files, if its inode is already on disk, we don't
5853 * have to worry about the parents at all. This is because
5854 * we can use the last_unlink_trans field to record renames
5855 * and other fun in this file.
5857 if (S_ISREG(inode->vfs_inode.i_mode) &&
5858 inode->generation <= last_committed &&
5859 inode->last_unlink_trans <= last_committed) {
5864 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
5865 log_dentries = true;
5868 * On unlink we must make sure all our current and old parent directory
5869 * inodes are fully logged. This is to prevent leaving dangling
5870 * directory index entries in directories that were our parents but are
5871 * not anymore. Not doing this results in old parent directory being
5872 * impossible to delete after log replay (rmdir will always fail with
5873 * error -ENOTEMPTY).
5879 * ln testdir/foo testdir/bar
5881 * unlink testdir/bar
5882 * xfs_io -c fsync testdir/foo
5884 * mount fs, triggers log replay
5886 * If we don't log the parent directory (testdir), after log replay the
5887 * directory still has an entry pointing to the file inode using the bar
5888 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5889 * the file inode has a link count of 1.
5895 * ln foo testdir/foo2
5896 * ln foo testdir/foo3
5898 * unlink testdir/foo3
5899 * xfs_io -c fsync foo
5901 * mount fs, triggers log replay
5903 * Similar as the first example, after log replay the parent directory
5904 * testdir still has an entry pointing to the inode file with name foo3
5905 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5906 * and has a link count of 2.
5908 if (inode->last_unlink_trans > last_committed) {
5909 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5915 * If a new hard link was added to the inode in the current transaction
5916 * and its link count is now greater than 1, we need to fallback to a
5917 * transaction commit, otherwise we can end up not logging all its new
5918 * parents for all the hard links. Here just from the dentry used to
5919 * fsync, we can not visit the ancestor inodes for all the other hard
5920 * links to figure out if any is new, so we fallback to a transaction
5921 * commit (instead of adding a lot of complexity of scanning a btree,
5922 * since this scenario is not a common use case).
5924 if (inode->vfs_inode.i_nlink > 1 &&
5925 inode->last_link_trans > last_committed) {
5931 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5934 inode = BTRFS_I(d_inode(parent));
5935 if (root != inode->root)
5938 if (inode->generation > last_committed) {
5939 ret = btrfs_log_inode(trans, root, inode,
5940 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5944 if (IS_ROOT(parent))
5947 parent = dget_parent(parent);
5949 old_parent = parent;
5952 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5958 btrfs_set_log_full_commit(fs_info, trans);
5963 btrfs_remove_log_ctx(root, ctx);
5964 btrfs_end_log_trans(root);
5970 * it is not safe to log dentry if the chunk root has added new
5971 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5972 * If this returns 1, you must commit the transaction to safely get your
5975 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
5976 struct dentry *dentry,
5979 struct btrfs_log_ctx *ctx)
5981 struct dentry *parent = dget_parent(dentry);
5984 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
5985 start, end, LOG_INODE_ALL, ctx);
5992 * should be called during mount to recover any replay any log trees
5995 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5998 struct btrfs_path *path;
5999 struct btrfs_trans_handle *trans;
6000 struct btrfs_key key;
6001 struct btrfs_key found_key;
6002 struct btrfs_key tmp_key;
6003 struct btrfs_root *log;
6004 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
6005 struct walk_control wc = {
6006 .process_func = process_one_buffer,
6010 path = btrfs_alloc_path();
6014 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6016 trans = btrfs_start_transaction(fs_info->tree_root, 0);
6017 if (IS_ERR(trans)) {
6018 ret = PTR_ERR(trans);
6025 ret = walk_log_tree(trans, log_root_tree, &wc);
6027 btrfs_handle_fs_error(fs_info, ret,
6028 "Failed to pin buffers while recovering log root tree.");
6033 key.objectid = BTRFS_TREE_LOG_OBJECTID;
6034 key.offset = (u64)-1;
6035 key.type = BTRFS_ROOT_ITEM_KEY;
6038 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
6041 btrfs_handle_fs_error(fs_info, ret,
6042 "Couldn't find tree log root.");
6046 if (path->slots[0] == 0)
6050 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
6052 btrfs_release_path(path);
6053 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
6056 log = btrfs_read_fs_root(log_root_tree, &found_key);
6059 btrfs_handle_fs_error(fs_info, ret,
6060 "Couldn't read tree log root.");
6064 tmp_key.objectid = found_key.offset;
6065 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
6066 tmp_key.offset = (u64)-1;
6068 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
6069 if (IS_ERR(wc.replay_dest)) {
6070 ret = PTR_ERR(wc.replay_dest);
6071 free_extent_buffer(log->node);
6072 free_extent_buffer(log->commit_root);
6074 btrfs_handle_fs_error(fs_info, ret,
6075 "Couldn't read target root for tree log recovery.");
6079 wc.replay_dest->log_root = log;
6080 btrfs_record_root_in_trans(trans, wc.replay_dest);
6081 ret = walk_log_tree(trans, log, &wc);
6083 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6084 ret = fixup_inode_link_counts(trans, wc.replay_dest,
6088 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
6089 struct btrfs_root *root = wc.replay_dest;
6091 btrfs_release_path(path);
6094 * We have just replayed everything, and the highest
6095 * objectid of fs roots probably has changed in case
6096 * some inode_item's got replayed.
6098 * root->objectid_mutex is not acquired as log replay
6099 * could only happen during mount.
6101 ret = btrfs_find_highest_objectid(root,
6102 &root->highest_objectid);
6105 key.offset = found_key.offset - 1;
6106 wc.replay_dest->log_root = NULL;
6107 free_extent_buffer(log->node);
6108 free_extent_buffer(log->commit_root);
6114 if (found_key.offset == 0)
6117 btrfs_release_path(path);
6119 /* step one is to pin it all, step two is to replay just inodes */
6122 wc.process_func = replay_one_buffer;
6123 wc.stage = LOG_WALK_REPLAY_INODES;
6126 /* step three is to replay everything */
6127 if (wc.stage < LOG_WALK_REPLAY_ALL) {
6132 btrfs_free_path(path);
6134 /* step 4: commit the transaction, which also unpins the blocks */
6135 ret = btrfs_commit_transaction(trans);
6139 free_extent_buffer(log_root_tree->node);
6140 log_root_tree->log_root = NULL;
6141 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
6142 kfree(log_root_tree);
6147 btrfs_end_transaction(wc.trans);
6148 btrfs_free_path(path);
6153 * there are some corner cases where we want to force a full
6154 * commit instead of allowing a directory to be logged.
6156 * They revolve around files there were unlinked from the directory, and
6157 * this function updates the parent directory so that a full commit is
6158 * properly done if it is fsync'd later after the unlinks are done.
6160 * Must be called before the unlink operations (updates to the subvolume tree,
6161 * inodes, etc) are done.
6163 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
6164 struct btrfs_inode *dir, struct btrfs_inode *inode,
6168 * when we're logging a file, if it hasn't been renamed
6169 * or unlinked, and its inode is fully committed on disk,
6170 * we don't have to worry about walking up the directory chain
6171 * to log its parents.
6173 * So, we use the last_unlink_trans field to put this transid
6174 * into the file. When the file is logged we check it and
6175 * don't log the parents if the file is fully on disk.
6177 mutex_lock(&inode->log_mutex);
6178 inode->last_unlink_trans = trans->transid;
6179 mutex_unlock(&inode->log_mutex);
6182 * if this directory was already logged any new
6183 * names for this file/dir will get recorded
6186 if (dir->logged_trans == trans->transid)
6190 * if the inode we're about to unlink was logged,
6191 * the log will be properly updated for any new names
6193 if (inode->logged_trans == trans->transid)
6197 * when renaming files across directories, if the directory
6198 * there we're unlinking from gets fsync'd later on, there's
6199 * no way to find the destination directory later and fsync it
6200 * properly. So, we have to be conservative and force commits
6201 * so the new name gets discovered.
6206 /* we can safely do the unlink without any special recording */
6210 mutex_lock(&dir->log_mutex);
6211 dir->last_unlink_trans = trans->transid;
6212 mutex_unlock(&dir->log_mutex);
6216 * Make sure that if someone attempts to fsync the parent directory of a deleted
6217 * snapshot, it ends up triggering a transaction commit. This is to guarantee
6218 * that after replaying the log tree of the parent directory's root we will not
6219 * see the snapshot anymore and at log replay time we will not see any log tree
6220 * corresponding to the deleted snapshot's root, which could lead to replaying
6221 * it after replaying the log tree of the parent directory (which would replay
6222 * the snapshot delete operation).
6224 * Must be called before the actual snapshot destroy operation (updates to the
6225 * parent root and tree of tree roots trees, etc) are done.
6227 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
6228 struct btrfs_inode *dir)
6230 mutex_lock(&dir->log_mutex);
6231 dir->last_unlink_trans = trans->transid;
6232 mutex_unlock(&dir->log_mutex);
6236 * Call this after adding a new name for a file and it will properly
6237 * update the log to reflect the new name.
6239 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's
6240 * true (because it's not used).
6242 * Return value depends on whether @sync_log is true or false.
6243 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6244 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT
6246 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to
6247 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log,
6248 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be
6249 * committed (without attempting to sync the log).
6251 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
6252 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
6253 struct dentry *parent,
6254 bool sync_log, struct btrfs_log_ctx *ctx)
6256 struct btrfs_fs_info *fs_info = trans->fs_info;
6260 * this will force the logging code to walk the dentry chain
6263 if (!S_ISDIR(inode->vfs_inode.i_mode))
6264 inode->last_unlink_trans = trans->transid;
6267 * if this inode hasn't been logged and directory we're renaming it
6268 * from hasn't been logged, we don't need to log it
6270 if (inode->logged_trans <= fs_info->last_trans_committed &&
6271 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
6272 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT :
6273 BTRFS_DONT_NEED_LOG_SYNC;
6276 struct btrfs_log_ctx ctx2;
6278 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode);
6279 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6280 LOG_INODE_EXISTS, &ctx2);
6281 if (ret == BTRFS_NO_LOG_SYNC)
6282 return BTRFS_DONT_NEED_TRANS_COMMIT;
6284 return BTRFS_NEED_TRANS_COMMIT;
6286 ret = btrfs_sync_log(trans, inode->root, &ctx2);
6288 return BTRFS_NEED_TRANS_COMMIT;
6289 return BTRFS_DONT_NEED_TRANS_COMMIT;
6293 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
6294 LOG_INODE_EXISTS, ctx);
6295 if (ret == BTRFS_NO_LOG_SYNC)
6296 return BTRFS_DONT_NEED_LOG_SYNC;
6298 return BTRFS_NEED_TRANS_COMMIT;
6300 return BTRFS_NEED_LOG_SYNC;