1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2008 Oracle. All rights reserved.
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/blkdev.h>
9 #include <linux/list_sort.h>
10 #include <linux/iversion.h>
15 #include "print-tree.h"
17 #include "compression.h"
19 #include "inode-map.h"
21 /* magic values for the inode_only field in btrfs_log_inode:
23 * LOG_INODE_ALL means to log everything
24 * LOG_INODE_EXISTS means to log just enough to recreate the inode
27 #define LOG_INODE_ALL 0
28 #define LOG_INODE_EXISTS 1
29 #define LOG_OTHER_INODE 2
32 * directory trouble cases
34 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
35 * log, we must force a full commit before doing an fsync of the directory
36 * where the unlink was done.
37 * ---> record transid of last unlink/rename per directory
41 * rename foo/some_dir foo2/some_dir
43 * fsync foo/some_dir/some_file
45 * The fsync above will unlink the original some_dir without recording
46 * it in its new location (foo2). After a crash, some_dir will be gone
47 * unless the fsync of some_file forces a full commit
49 * 2) we must log any new names for any file or dir that is in the fsync
50 * log. ---> check inode while renaming/linking.
52 * 2a) we must log any new names for any file or dir during rename
53 * when the directory they are being removed from was logged.
54 * ---> check inode and old parent dir during rename
56 * 2a is actually the more important variant. With the extra logging
57 * a crash might unlink the old name without recreating the new one
59 * 3) after a crash, we must go through any directories with a link count
60 * of zero and redo the rm -rf
67 * The directory f1 was fully removed from the FS, but fsync was never
68 * called on f1, only its parent dir. After a crash the rm -rf must
69 * be replayed. This must be able to recurse down the entire
70 * directory tree. The inode link count fixup code takes care of the
75 * stages for the tree walking. The first
76 * stage (0) is to only pin down the blocks we find
77 * the second stage (1) is to make sure that all the inodes
78 * we find in the log are created in the subvolume.
80 * The last stage is to deal with directories and links and extents
81 * and all the other fun semantics
83 #define LOG_WALK_PIN_ONLY 0
84 #define LOG_WALK_REPLAY_INODES 1
85 #define LOG_WALK_REPLAY_DIR_INDEX 2
86 #define LOG_WALK_REPLAY_ALL 3
88 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root, struct btrfs_inode *inode,
93 struct btrfs_log_ctx *ctx);
94 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
95 struct btrfs_root *root,
96 struct btrfs_path *path, u64 objectid);
97 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
98 struct btrfs_root *root,
99 struct btrfs_root *log,
100 struct btrfs_path *path,
101 u64 dirid, int del_all);
104 * tree logging is a special write ahead log used to make sure that
105 * fsyncs and O_SYNCs can happen without doing full tree commits.
107 * Full tree commits are expensive because they require commonly
108 * modified blocks to be recowed, creating many dirty pages in the
109 * extent tree an 4x-6x higher write load than ext3.
111 * Instead of doing a tree commit on every fsync, we use the
112 * key ranges and transaction ids to find items for a given file or directory
113 * that have changed in this transaction. Those items are copied into
114 * a special tree (one per subvolume root), that tree is written to disk
115 * and then the fsync is considered complete.
117 * After a crash, items are copied out of the log-tree back into the
118 * subvolume tree. Any file data extents found are recorded in the extent
119 * allocation tree, and the log-tree freed.
121 * The log tree is read three times, once to pin down all the extents it is
122 * using in ram and once, once to create all the inodes logged in the tree
123 * and once to do all the other items.
127 * start a sub transaction and setup the log tree
128 * this increments the log tree writer count to make the people
129 * syncing the tree wait for us to finish
131 static int start_log_trans(struct btrfs_trans_handle *trans,
132 struct btrfs_root *root,
133 struct btrfs_log_ctx *ctx)
135 struct btrfs_fs_info *fs_info = root->fs_info;
138 mutex_lock(&root->log_mutex);
140 if (root->log_root) {
141 if (btrfs_need_log_full_commit(fs_info, trans)) {
146 if (!root->log_start_pid) {
147 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
148 root->log_start_pid = current->pid;
149 } else if (root->log_start_pid != current->pid) {
150 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
153 mutex_lock(&fs_info->tree_log_mutex);
154 if (!fs_info->log_root_tree)
155 ret = btrfs_init_log_root_tree(trans, fs_info);
156 mutex_unlock(&fs_info->tree_log_mutex);
160 ret = btrfs_add_log_tree(trans, root);
164 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
165 root->log_start_pid = current->pid;
168 atomic_inc(&root->log_batch);
169 atomic_inc(&root->log_writers);
171 int index = root->log_transid % 2;
172 list_add_tail(&ctx->list, &root->log_ctxs[index]);
173 ctx->log_transid = root->log_transid;
177 mutex_unlock(&root->log_mutex);
182 * returns 0 if there was a log transaction running and we were able
183 * to join, or returns -ENOENT if there were not transactions
186 static int join_running_log_trans(struct btrfs_root *root)
194 mutex_lock(&root->log_mutex);
195 if (root->log_root) {
197 atomic_inc(&root->log_writers);
199 mutex_unlock(&root->log_mutex);
204 * This either makes the current running log transaction wait
205 * until you call btrfs_end_log_trans() or it makes any future
206 * log transactions wait until you call btrfs_end_log_trans()
208 int btrfs_pin_log_trans(struct btrfs_root *root)
212 mutex_lock(&root->log_mutex);
213 atomic_inc(&root->log_writers);
214 mutex_unlock(&root->log_mutex);
219 * indicate we're done making changes to the log tree
220 * and wake up anyone waiting to do a sync
222 void btrfs_end_log_trans(struct btrfs_root *root)
224 if (atomic_dec_and_test(&root->log_writers)) {
225 /* atomic_dec_and_test implies a barrier */
226 cond_wake_up_nomb(&root->log_writer_wait);
232 * the walk control struct is used to pass state down the chain when
233 * processing the log tree. The stage field tells us which part
234 * of the log tree processing we are currently doing. The others
235 * are state fields used for that specific part
237 struct walk_control {
238 /* should we free the extent on disk when done? This is used
239 * at transaction commit time while freeing a log tree
243 /* should we write out the extent buffer? This is used
244 * while flushing the log tree to disk during a sync
248 /* should we wait for the extent buffer io to finish? Also used
249 * while flushing the log tree to disk for a sync
253 /* pin only walk, we record which extents on disk belong to the
258 /* what stage of the replay code we're currently in */
261 /* the root we are currently replaying */
262 struct btrfs_root *replay_dest;
264 /* the trans handle for the current replay */
265 struct btrfs_trans_handle *trans;
267 /* the function that gets used to process blocks we find in the
268 * tree. Note the extent_buffer might not be up to date when it is
269 * passed in, and it must be checked or read if you need the data
272 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
273 struct walk_control *wc, u64 gen, int level);
277 * process_func used to pin down extents, write them or wait on them
279 static int process_one_buffer(struct btrfs_root *log,
280 struct extent_buffer *eb,
281 struct walk_control *wc, u64 gen, int level)
283 struct btrfs_fs_info *fs_info = log->fs_info;
287 * If this fs is mixed then we need to be able to process the leaves to
288 * pin down any logged extents, so we have to read the block.
290 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) {
291 ret = btrfs_read_buffer(eb, gen, level, NULL);
297 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start,
300 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
301 if (wc->pin && btrfs_header_level(eb) == 0)
302 ret = btrfs_exclude_logged_extents(fs_info, eb);
304 btrfs_write_tree_block(eb);
306 btrfs_wait_tree_block_writeback(eb);
312 * Item overwrite used by replay and tree logging. eb, slot and key all refer
313 * to the src data we are copying out.
315 * root is the tree we are copying into, and path is a scratch
316 * path for use in this function (it should be released on entry and
317 * will be released on exit).
319 * If the key is already in the destination tree the existing item is
320 * overwritten. If the existing item isn't big enough, it is extended.
321 * If it is too large, it is truncated.
323 * If the key isn't in the destination yet, a new item is inserted.
325 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
326 struct btrfs_root *root,
327 struct btrfs_path *path,
328 struct extent_buffer *eb, int slot,
329 struct btrfs_key *key)
331 struct btrfs_fs_info *fs_info = root->fs_info;
334 u64 saved_i_size = 0;
335 int save_old_i_size = 0;
336 unsigned long src_ptr;
337 unsigned long dst_ptr;
338 int overwrite_root = 0;
339 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
341 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
344 item_size = btrfs_item_size_nr(eb, slot);
345 src_ptr = btrfs_item_ptr_offset(eb, slot);
347 /* look for the key in the destination tree */
348 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
355 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
357 if (dst_size != item_size)
360 if (item_size == 0) {
361 btrfs_release_path(path);
364 dst_copy = kmalloc(item_size, GFP_NOFS);
365 src_copy = kmalloc(item_size, GFP_NOFS);
366 if (!dst_copy || !src_copy) {
367 btrfs_release_path(path);
373 read_extent_buffer(eb, src_copy, src_ptr, item_size);
375 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
376 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
378 ret = memcmp(dst_copy, src_copy, item_size);
383 * they have the same contents, just return, this saves
384 * us from cowing blocks in the destination tree and doing
385 * extra writes that may not have been done by a previous
389 btrfs_release_path(path);
394 * We need to load the old nbytes into the inode so when we
395 * replay the extents we've logged we get the right nbytes.
398 struct btrfs_inode_item *item;
402 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
403 struct btrfs_inode_item);
404 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
405 item = btrfs_item_ptr(eb, slot,
406 struct btrfs_inode_item);
407 btrfs_set_inode_nbytes(eb, item, nbytes);
410 * If this is a directory we need to reset the i_size to
411 * 0 so that we can set it up properly when replaying
412 * the rest of the items in this log.
414 mode = btrfs_inode_mode(eb, item);
416 btrfs_set_inode_size(eb, item, 0);
418 } else if (inode_item) {
419 struct btrfs_inode_item *item;
423 * New inode, set nbytes to 0 so that the nbytes comes out
424 * properly when we replay the extents.
426 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
427 btrfs_set_inode_nbytes(eb, item, 0);
430 * If this is a directory we need to reset the i_size to 0 so
431 * that we can set it up properly when replaying the rest of
432 * the items in this log.
434 mode = btrfs_inode_mode(eb, item);
436 btrfs_set_inode_size(eb, item, 0);
439 btrfs_release_path(path);
440 /* try to insert the key into the destination tree */
441 path->skip_release_on_error = 1;
442 ret = btrfs_insert_empty_item(trans, root, path,
444 path->skip_release_on_error = 0;
446 /* make sure any existing item is the correct size */
447 if (ret == -EEXIST || ret == -EOVERFLOW) {
449 found_size = btrfs_item_size_nr(path->nodes[0],
451 if (found_size > item_size)
452 btrfs_truncate_item(fs_info, path, item_size, 1);
453 else if (found_size < item_size)
454 btrfs_extend_item(fs_info, path,
455 item_size - found_size);
459 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
462 /* don't overwrite an existing inode if the generation number
463 * was logged as zero. This is done when the tree logging code
464 * is just logging an inode to make sure it exists after recovery.
466 * Also, don't overwrite i_size on directories during replay.
467 * log replay inserts and removes directory items based on the
468 * state of the tree found in the subvolume, and i_size is modified
471 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
472 struct btrfs_inode_item *src_item;
473 struct btrfs_inode_item *dst_item;
475 src_item = (struct btrfs_inode_item *)src_ptr;
476 dst_item = (struct btrfs_inode_item *)dst_ptr;
478 if (btrfs_inode_generation(eb, src_item) == 0) {
479 struct extent_buffer *dst_eb = path->nodes[0];
480 const u64 ino_size = btrfs_inode_size(eb, src_item);
483 * For regular files an ino_size == 0 is used only when
484 * logging that an inode exists, as part of a directory
485 * fsync, and the inode wasn't fsynced before. In this
486 * case don't set the size of the inode in the fs/subvol
487 * tree, otherwise we would be throwing valid data away.
489 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
490 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) &&
492 struct btrfs_map_token token;
494 btrfs_init_map_token(&token);
495 btrfs_set_token_inode_size(dst_eb, dst_item,
501 if (overwrite_root &&
502 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
503 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
505 saved_i_size = btrfs_inode_size(path->nodes[0],
510 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
513 if (save_old_i_size) {
514 struct btrfs_inode_item *dst_item;
515 dst_item = (struct btrfs_inode_item *)dst_ptr;
516 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
519 /* make sure the generation is filled in */
520 if (key->type == BTRFS_INODE_ITEM_KEY) {
521 struct btrfs_inode_item *dst_item;
522 dst_item = (struct btrfs_inode_item *)dst_ptr;
523 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
524 btrfs_set_inode_generation(path->nodes[0], dst_item,
529 btrfs_mark_buffer_dirty(path->nodes[0]);
530 btrfs_release_path(path);
535 * simple helper to read an inode off the disk from a given root
536 * This can only be called for subvolume roots and not for the log
538 static noinline struct inode *read_one_inode(struct btrfs_root *root,
541 struct btrfs_key key;
544 key.objectid = objectid;
545 key.type = BTRFS_INODE_ITEM_KEY;
547 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
550 } else if (is_bad_inode(inode)) {
557 /* replays a single extent in 'eb' at 'slot' with 'key' into the
558 * subvolume 'root'. path is released on entry and should be released
561 * extents in the log tree have not been allocated out of the extent
562 * tree yet. So, this completes the allocation, taking a reference
563 * as required if the extent already exists or creating a new extent
564 * if it isn't in the extent allocation tree yet.
566 * The extent is inserted into the file, dropping any existing extents
567 * from the file that overlap the new one.
569 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
570 struct btrfs_root *root,
571 struct btrfs_path *path,
572 struct extent_buffer *eb, int slot,
573 struct btrfs_key *key)
575 struct btrfs_fs_info *fs_info = root->fs_info;
578 u64 start = key->offset;
580 struct btrfs_file_extent_item *item;
581 struct inode *inode = NULL;
585 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
586 found_type = btrfs_file_extent_type(eb, item);
588 if (found_type == BTRFS_FILE_EXTENT_REG ||
589 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
590 nbytes = btrfs_file_extent_num_bytes(eb, item);
591 extent_end = start + nbytes;
594 * We don't add to the inodes nbytes if we are prealloc or a
597 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
599 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
600 size = btrfs_file_extent_ram_bytes(eb, item);
601 nbytes = btrfs_file_extent_ram_bytes(eb, item);
602 extent_end = ALIGN(start + size,
603 fs_info->sectorsize);
609 inode = read_one_inode(root, key->objectid);
616 * first check to see if we already have this extent in the
617 * file. This must be done before the btrfs_drop_extents run
618 * so we don't try to drop this extent.
620 ret = btrfs_lookup_file_extent(trans, root, path,
621 btrfs_ino(BTRFS_I(inode)), start, 0);
624 (found_type == BTRFS_FILE_EXTENT_REG ||
625 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
626 struct btrfs_file_extent_item cmp1;
627 struct btrfs_file_extent_item cmp2;
628 struct btrfs_file_extent_item *existing;
629 struct extent_buffer *leaf;
631 leaf = path->nodes[0];
632 existing = btrfs_item_ptr(leaf, path->slots[0],
633 struct btrfs_file_extent_item);
635 read_extent_buffer(eb, &cmp1, (unsigned long)item,
637 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
641 * we already have a pointer to this exact extent,
642 * we don't have to do anything
644 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
645 btrfs_release_path(path);
649 btrfs_release_path(path);
651 /* drop any overlapping extents */
652 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
656 if (found_type == BTRFS_FILE_EXTENT_REG ||
657 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
659 unsigned long dest_offset;
660 struct btrfs_key ins;
662 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 &&
663 btrfs_fs_incompat(fs_info, NO_HOLES))
666 ret = btrfs_insert_empty_item(trans, root, path, key,
670 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
672 copy_extent_buffer(path->nodes[0], eb, dest_offset,
673 (unsigned long)item, sizeof(*item));
675 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
676 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
677 ins.type = BTRFS_EXTENT_ITEM_KEY;
678 offset = key->offset - btrfs_file_extent_offset(eb, item);
681 * Manually record dirty extent, as here we did a shallow
682 * file extent item copy and skip normal backref update,
683 * but modifying extent tree all by ourselves.
684 * So need to manually record dirty extent for qgroup,
685 * as the owner of the file extent changed from log tree
686 * (doesn't affect qgroup) to fs/file tree(affects qgroup)
688 ret = btrfs_qgroup_trace_extent(trans, fs_info,
689 btrfs_file_extent_disk_bytenr(eb, item),
690 btrfs_file_extent_disk_num_bytes(eb, item),
695 if (ins.objectid > 0) {
698 LIST_HEAD(ordered_sums);
700 * is this extent already allocated in the extent
701 * allocation tree? If so, just add a reference
703 ret = btrfs_lookup_data_extent(fs_info, ins.objectid,
706 ret = btrfs_inc_extent_ref(trans, root,
707 ins.objectid, ins.offset,
708 0, root->root_key.objectid,
709 key->objectid, offset);
714 * insert the extent pointer in the extent
717 ret = btrfs_alloc_logged_file_extent(trans,
718 root->root_key.objectid,
719 key->objectid, offset, &ins);
723 btrfs_release_path(path);
725 if (btrfs_file_extent_compression(eb, item)) {
726 csum_start = ins.objectid;
727 csum_end = csum_start + ins.offset;
729 csum_start = ins.objectid +
730 btrfs_file_extent_offset(eb, item);
731 csum_end = csum_start +
732 btrfs_file_extent_num_bytes(eb, item);
735 ret = btrfs_lookup_csums_range(root->log_root,
736 csum_start, csum_end - 1,
741 * Now delete all existing cums in the csum root that
742 * cover our range. We do this because we can have an
743 * extent that is completely referenced by one file
744 * extent item and partially referenced by another
745 * file extent item (like after using the clone or
746 * extent_same ioctls). In this case if we end up doing
747 * the replay of the one that partially references the
748 * extent first, and we do not do the csum deletion
749 * below, we can get 2 csum items in the csum tree that
750 * overlap each other. For example, imagine our log has
751 * the two following file extent items:
753 * key (257 EXTENT_DATA 409600)
754 * extent data disk byte 12845056 nr 102400
755 * extent data offset 20480 nr 20480 ram 102400
757 * key (257 EXTENT_DATA 819200)
758 * extent data disk byte 12845056 nr 102400
759 * extent data offset 0 nr 102400 ram 102400
761 * Where the second one fully references the 100K extent
762 * that starts at disk byte 12845056, and the log tree
763 * has a single csum item that covers the entire range
766 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
768 * After the first file extent item is replayed, the
769 * csum tree gets the following csum item:
771 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
773 * Which covers the 20K sub-range starting at offset 20K
774 * of our extent. Now when we replay the second file
775 * extent item, if we do not delete existing csum items
776 * that cover any of its blocks, we end up getting two
777 * csum items in our csum tree that overlap each other:
779 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100
780 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20
782 * Which is a problem, because after this anyone trying
783 * to lookup up for the checksum of any block of our
784 * extent starting at an offset of 40K or higher, will
785 * end up looking at the second csum item only, which
786 * does not contain the checksum for any block starting
787 * at offset 40K or higher of our extent.
789 while (!list_empty(&ordered_sums)) {
790 struct btrfs_ordered_sum *sums;
791 sums = list_entry(ordered_sums.next,
792 struct btrfs_ordered_sum,
795 ret = btrfs_del_csums(trans, fs_info,
799 ret = btrfs_csum_file_blocks(trans,
800 fs_info->csum_root, sums);
801 list_del(&sums->list);
807 btrfs_release_path(path);
809 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
810 /* inline extents are easy, we just overwrite them */
811 ret = overwrite_item(trans, root, path, eb, slot, key);
816 inode_add_bytes(inode, nbytes);
818 ret = btrfs_update_inode(trans, root, inode);
826 * when cleaning up conflicts between the directory names in the
827 * subvolume, directory names in the log and directory names in the
828 * inode back references, we may have to unlink inodes from directories.
830 * This is a helper function to do the unlink of a specific directory
833 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
834 struct btrfs_root *root,
835 struct btrfs_path *path,
836 struct btrfs_inode *dir,
837 struct btrfs_dir_item *di)
842 struct extent_buffer *leaf;
843 struct btrfs_key location;
846 leaf = path->nodes[0];
848 btrfs_dir_item_key_to_cpu(leaf, di, &location);
849 name_len = btrfs_dir_name_len(leaf, di);
850 name = kmalloc(name_len, GFP_NOFS);
854 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
855 btrfs_release_path(path);
857 inode = read_one_inode(root, location.objectid);
863 ret = link_to_fixup_dir(trans, root, path, location.objectid);
867 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name,
872 ret = btrfs_run_delayed_items(trans);
880 * helper function to see if a given name and sequence number found
881 * in an inode back reference are already in a directory and correctly
882 * point to this inode
884 static noinline int inode_in_dir(struct btrfs_root *root,
885 struct btrfs_path *path,
886 u64 dirid, u64 objectid, u64 index,
887 const char *name, int name_len)
889 struct btrfs_dir_item *di;
890 struct btrfs_key location;
893 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
894 index, name, name_len, 0);
895 if (di && !IS_ERR(di)) {
896 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
897 if (location.objectid != objectid)
901 btrfs_release_path(path);
903 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
904 if (di && !IS_ERR(di)) {
905 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
906 if (location.objectid != objectid)
912 btrfs_release_path(path);
917 * helper function to check a log tree for a named back reference in
918 * an inode. This is used to decide if a back reference that is
919 * found in the subvolume conflicts with what we find in the log.
921 * inode backreferences may have multiple refs in a single item,
922 * during replay we process one reference at a time, and we don't
923 * want to delete valid links to a file from the subvolume if that
924 * link is also in the log.
926 static noinline int backref_in_log(struct btrfs_root *log,
927 struct btrfs_key *key,
929 const char *name, int namelen)
931 struct btrfs_path *path;
932 struct btrfs_inode_ref *ref;
934 unsigned long ptr_end;
935 unsigned long name_ptr;
941 path = btrfs_alloc_path();
945 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
949 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
951 if (key->type == BTRFS_INODE_EXTREF_KEY) {
952 if (btrfs_find_name_in_ext_backref(path->nodes[0],
955 name, namelen, NULL))
961 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
962 ptr_end = ptr + item_size;
963 while (ptr < ptr_end) {
964 ref = (struct btrfs_inode_ref *)ptr;
965 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
966 if (found_name_len == namelen) {
967 name_ptr = (unsigned long)(ref + 1);
968 ret = memcmp_extent_buffer(path->nodes[0], name,
975 ptr = (unsigned long)(ref + 1) + found_name_len;
978 btrfs_free_path(path);
982 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
983 struct btrfs_root *root,
984 struct btrfs_path *path,
985 struct btrfs_root *log_root,
986 struct btrfs_inode *dir,
987 struct btrfs_inode *inode,
988 u64 inode_objectid, u64 parent_objectid,
989 u64 ref_index, char *name, int namelen,
995 struct extent_buffer *leaf;
996 struct btrfs_dir_item *di;
997 struct btrfs_key search_key;
998 struct btrfs_inode_extref *extref;
1001 /* Search old style refs */
1002 search_key.objectid = inode_objectid;
1003 search_key.type = BTRFS_INODE_REF_KEY;
1004 search_key.offset = parent_objectid;
1005 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
1007 struct btrfs_inode_ref *victim_ref;
1009 unsigned long ptr_end;
1011 leaf = path->nodes[0];
1013 /* are we trying to overwrite a back ref for the root directory
1014 * if so, just jump out, we're done
1016 if (search_key.objectid == search_key.offset)
1019 /* check all the names in this back reference to see
1020 * if they are in the log. if so, we allow them to stay
1021 * otherwise they must be unlinked as a conflict
1023 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1024 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
1025 while (ptr < ptr_end) {
1026 victim_ref = (struct btrfs_inode_ref *)ptr;
1027 victim_name_len = btrfs_inode_ref_name_len(leaf,
1029 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1033 read_extent_buffer(leaf, victim_name,
1034 (unsigned long)(victim_ref + 1),
1037 if (!backref_in_log(log_root, &search_key,
1041 inc_nlink(&inode->vfs_inode);
1042 btrfs_release_path(path);
1044 ret = btrfs_unlink_inode(trans, root, dir, inode,
1045 victim_name, victim_name_len);
1049 ret = btrfs_run_delayed_items(trans);
1057 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
1061 * NOTE: we have searched root tree and checked the
1062 * corresponding ref, it does not need to check again.
1066 btrfs_release_path(path);
1068 /* Same search but for extended refs */
1069 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1070 inode_objectid, parent_objectid, 0,
1072 if (!IS_ERR_OR_NULL(extref)) {
1076 struct inode *victim_parent;
1078 leaf = path->nodes[0];
1080 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1081 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1083 while (cur_offset < item_size) {
1084 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1086 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1088 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1091 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1094 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1097 search_key.objectid = inode_objectid;
1098 search_key.type = BTRFS_INODE_EXTREF_KEY;
1099 search_key.offset = btrfs_extref_hash(parent_objectid,
1103 if (!backref_in_log(log_root, &search_key,
1104 parent_objectid, victim_name,
1107 victim_parent = read_one_inode(root,
1109 if (victim_parent) {
1110 inc_nlink(&inode->vfs_inode);
1111 btrfs_release_path(path);
1113 ret = btrfs_unlink_inode(trans, root,
1114 BTRFS_I(victim_parent),
1119 ret = btrfs_run_delayed_items(
1122 iput(victim_parent);
1131 cur_offset += victim_name_len + sizeof(*extref);
1135 btrfs_release_path(path);
1137 /* look for a conflicting sequence number */
1138 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1139 ref_index, name, namelen, 0);
1140 if (di && !IS_ERR(di)) {
1141 ret = drop_one_dir_item(trans, root, path, dir, di);
1145 btrfs_release_path(path);
1147 /* look for a conflicing name */
1148 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1150 if (di && !IS_ERR(di)) {
1151 ret = drop_one_dir_item(trans, root, path, dir, di);
1155 btrfs_release_path(path);
1160 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1161 u32 *namelen, char **name, u64 *index,
1162 u64 *parent_objectid)
1164 struct btrfs_inode_extref *extref;
1166 extref = (struct btrfs_inode_extref *)ref_ptr;
1168 *namelen = btrfs_inode_extref_name_len(eb, extref);
1169 *name = kmalloc(*namelen, GFP_NOFS);
1173 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1177 *index = btrfs_inode_extref_index(eb, extref);
1178 if (parent_objectid)
1179 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1184 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1185 u32 *namelen, char **name, u64 *index)
1187 struct btrfs_inode_ref *ref;
1189 ref = (struct btrfs_inode_ref *)ref_ptr;
1191 *namelen = btrfs_inode_ref_name_len(eb, ref);
1192 *name = kmalloc(*namelen, GFP_NOFS);
1196 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1199 *index = btrfs_inode_ref_index(eb, ref);
1205 * Take an inode reference item from the log tree and iterate all names from the
1206 * inode reference item in the subvolume tree with the same key (if it exists).
1207 * For any name that is not in the inode reference item from the log tree, do a
1208 * proper unlink of that name (that is, remove its entry from the inode
1209 * reference item and both dir index keys).
1211 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans,
1212 struct btrfs_root *root,
1213 struct btrfs_path *path,
1214 struct btrfs_inode *inode,
1215 struct extent_buffer *log_eb,
1217 struct btrfs_key *key)
1220 unsigned long ref_ptr;
1221 unsigned long ref_end;
1222 struct extent_buffer *eb;
1225 btrfs_release_path(path);
1226 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
1234 eb = path->nodes[0];
1235 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]);
1236 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]);
1237 while (ref_ptr < ref_end) {
1242 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1243 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1246 parent_id = key->offset;
1247 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1253 if (key->type == BTRFS_INODE_EXTREF_KEY)
1254 ret = btrfs_find_name_in_ext_backref(log_eb, log_slot,
1258 ret = btrfs_find_name_in_backref(log_eb, log_slot, name,
1264 btrfs_release_path(path);
1265 dir = read_one_inode(root, parent_id);
1271 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
1272 inode, name, namelen);
1282 if (key->type == BTRFS_INODE_EXTREF_KEY)
1283 ref_ptr += sizeof(struct btrfs_inode_extref);
1285 ref_ptr += sizeof(struct btrfs_inode_ref);
1289 btrfs_release_path(path);
1294 * replay one inode back reference item found in the log tree.
1295 * eb, slot and key refer to the buffer and key found in the log tree.
1296 * root is the destination we are replaying into, and path is for temp
1297 * use by this function. (it should be released on return).
1299 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1300 struct btrfs_root *root,
1301 struct btrfs_root *log,
1302 struct btrfs_path *path,
1303 struct extent_buffer *eb, int slot,
1304 struct btrfs_key *key)
1306 struct inode *dir = NULL;
1307 struct inode *inode = NULL;
1308 unsigned long ref_ptr;
1309 unsigned long ref_end;
1313 int search_done = 0;
1314 int log_ref_ver = 0;
1315 u64 parent_objectid;
1318 int ref_struct_size;
1320 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1321 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1323 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1324 struct btrfs_inode_extref *r;
1326 ref_struct_size = sizeof(struct btrfs_inode_extref);
1328 r = (struct btrfs_inode_extref *)ref_ptr;
1329 parent_objectid = btrfs_inode_extref_parent(eb, r);
1331 ref_struct_size = sizeof(struct btrfs_inode_ref);
1332 parent_objectid = key->offset;
1334 inode_objectid = key->objectid;
1337 * it is possible that we didn't log all the parent directories
1338 * for a given inode. If we don't find the dir, just don't
1339 * copy the back ref in. The link count fixup code will take
1342 dir = read_one_inode(root, parent_objectid);
1348 inode = read_one_inode(root, inode_objectid);
1354 while (ref_ptr < ref_end) {
1356 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1357 &ref_index, &parent_objectid);
1359 * parent object can change from one array
1363 dir = read_one_inode(root, parent_objectid);
1369 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1375 /* if we already have a perfect match, we're done */
1376 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)),
1377 btrfs_ino(BTRFS_I(inode)), ref_index,
1380 * look for a conflicting back reference in the
1381 * metadata. if we find one we have to unlink that name
1382 * of the file before we add our new link. Later on, we
1383 * overwrite any existing back reference, and we don't
1384 * want to create dangling pointers in the directory.
1388 ret = __add_inode_ref(trans, root, path, log,
1393 ref_index, name, namelen,
1402 /* insert our name */
1403 ret = btrfs_add_link(trans, BTRFS_I(dir),
1405 name, namelen, 0, ref_index);
1409 btrfs_update_inode(trans, root, inode);
1412 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1422 * Before we overwrite the inode reference item in the subvolume tree
1423 * with the item from the log tree, we must unlink all names from the
1424 * parent directory that are in the subvolume's tree inode reference
1425 * item, otherwise we end up with an inconsistent subvolume tree where
1426 * dir index entries exist for a name but there is no inode reference
1427 * item with the same name.
1429 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot,
1434 /* finally write the back reference in the inode */
1435 ret = overwrite_item(trans, root, path, eb, slot, key);
1437 btrfs_release_path(path);
1444 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1445 struct btrfs_root *root, u64 ino)
1449 ret = btrfs_insert_orphan_item(trans, root, ino);
1456 static int count_inode_extrefs(struct btrfs_root *root,
1457 struct btrfs_inode *inode, struct btrfs_path *path)
1461 unsigned int nlink = 0;
1464 u64 inode_objectid = btrfs_ino(inode);
1467 struct btrfs_inode_extref *extref;
1468 struct extent_buffer *leaf;
1471 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1476 leaf = path->nodes[0];
1477 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1478 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1481 while (cur_offset < item_size) {
1482 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1483 name_len = btrfs_inode_extref_name_len(leaf, extref);
1487 cur_offset += name_len + sizeof(*extref);
1491 btrfs_release_path(path);
1493 btrfs_release_path(path);
1495 if (ret < 0 && ret != -ENOENT)
1500 static int count_inode_refs(struct btrfs_root *root,
1501 struct btrfs_inode *inode, struct btrfs_path *path)
1504 struct btrfs_key key;
1505 unsigned int nlink = 0;
1507 unsigned long ptr_end;
1509 u64 ino = btrfs_ino(inode);
1512 key.type = BTRFS_INODE_REF_KEY;
1513 key.offset = (u64)-1;
1516 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1520 if (path->slots[0] == 0)
1525 btrfs_item_key_to_cpu(path->nodes[0], &key,
1527 if (key.objectid != ino ||
1528 key.type != BTRFS_INODE_REF_KEY)
1530 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1531 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1533 while (ptr < ptr_end) {
1534 struct btrfs_inode_ref *ref;
1536 ref = (struct btrfs_inode_ref *)ptr;
1537 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1539 ptr = (unsigned long)(ref + 1) + name_len;
1543 if (key.offset == 0)
1545 if (path->slots[0] > 0) {
1550 btrfs_release_path(path);
1552 btrfs_release_path(path);
1558 * There are a few corners where the link count of the file can't
1559 * be properly maintained during replay. So, instead of adding
1560 * lots of complexity to the log code, we just scan the backrefs
1561 * for any file that has been through replay.
1563 * The scan will update the link count on the inode to reflect the
1564 * number of back refs found. If it goes down to zero, the iput
1565 * will free the inode.
1567 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1568 struct btrfs_root *root,
1569 struct inode *inode)
1571 struct btrfs_path *path;
1574 u64 ino = btrfs_ino(BTRFS_I(inode));
1576 path = btrfs_alloc_path();
1580 ret = count_inode_refs(root, BTRFS_I(inode), path);
1586 ret = count_inode_extrefs(root, BTRFS_I(inode), path);
1594 if (nlink != inode->i_nlink) {
1595 set_nlink(inode, nlink);
1596 btrfs_update_inode(trans, root, inode);
1598 BTRFS_I(inode)->index_cnt = (u64)-1;
1600 if (inode->i_nlink == 0) {
1601 if (S_ISDIR(inode->i_mode)) {
1602 ret = replay_dir_deletes(trans, root, NULL, path,
1607 ret = insert_orphan_item(trans, root, ino);
1611 btrfs_free_path(path);
1615 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1616 struct btrfs_root *root,
1617 struct btrfs_path *path)
1620 struct btrfs_key key;
1621 struct inode *inode;
1623 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1624 key.type = BTRFS_ORPHAN_ITEM_KEY;
1625 key.offset = (u64)-1;
1627 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1632 if (path->slots[0] == 0)
1637 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1638 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1639 key.type != BTRFS_ORPHAN_ITEM_KEY)
1642 ret = btrfs_del_item(trans, root, path);
1646 btrfs_release_path(path);
1647 inode = read_one_inode(root, key.offset);
1651 ret = fixup_inode_link_count(trans, root, inode);
1657 * fixup on a directory may create new entries,
1658 * make sure we always look for the highset possible
1661 key.offset = (u64)-1;
1665 btrfs_release_path(path);
1671 * record a given inode in the fixup dir so we can check its link
1672 * count when replay is done. The link count is incremented here
1673 * so the inode won't go away until we check it
1675 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1676 struct btrfs_root *root,
1677 struct btrfs_path *path,
1680 struct btrfs_key key;
1682 struct inode *inode;
1684 inode = read_one_inode(root, objectid);
1688 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1689 key.type = BTRFS_ORPHAN_ITEM_KEY;
1690 key.offset = objectid;
1692 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1694 btrfs_release_path(path);
1696 if (!inode->i_nlink)
1697 set_nlink(inode, 1);
1700 ret = btrfs_update_inode(trans, root, inode);
1701 } else if (ret == -EEXIST) {
1704 BUG(); /* Logic Error */
1712 * when replaying the log for a directory, we only insert names
1713 * for inodes that actually exist. This means an fsync on a directory
1714 * does not implicitly fsync all the new files in it
1716 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1717 struct btrfs_root *root,
1718 u64 dirid, u64 index,
1719 char *name, int name_len,
1720 struct btrfs_key *location)
1722 struct inode *inode;
1726 inode = read_one_inode(root, location->objectid);
1730 dir = read_one_inode(root, dirid);
1736 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name,
1737 name_len, 1, index);
1739 /* FIXME, put inode into FIXUP list */
1747 * Return true if an inode reference exists in the log for the given name,
1748 * inode and parent inode.
1750 static bool name_in_log_ref(struct btrfs_root *log_root,
1751 const char *name, const int name_len,
1752 const u64 dirid, const u64 ino)
1754 struct btrfs_key search_key;
1756 search_key.objectid = ino;
1757 search_key.type = BTRFS_INODE_REF_KEY;
1758 search_key.offset = dirid;
1759 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1762 search_key.type = BTRFS_INODE_EXTREF_KEY;
1763 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1764 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1771 * take a single entry in a log directory item and replay it into
1774 * if a conflicting item exists in the subdirectory already,
1775 * the inode it points to is unlinked and put into the link count
1778 * If a name from the log points to a file or directory that does
1779 * not exist in the FS, it is skipped. fsyncs on directories
1780 * do not force down inodes inside that directory, just changes to the
1781 * names or unlinks in a directory.
1783 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a
1784 * non-existing inode) and 1 if the name was replayed.
1786 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1787 struct btrfs_root *root,
1788 struct btrfs_path *path,
1789 struct extent_buffer *eb,
1790 struct btrfs_dir_item *di,
1791 struct btrfs_key *key)
1795 struct btrfs_dir_item *dst_di;
1796 struct btrfs_key found_key;
1797 struct btrfs_key log_key;
1802 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1803 bool name_added = false;
1805 dir = read_one_inode(root, key->objectid);
1809 name_len = btrfs_dir_name_len(eb, di);
1810 name = kmalloc(name_len, GFP_NOFS);
1816 log_type = btrfs_dir_type(eb, di);
1817 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1820 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1821 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1826 btrfs_release_path(path);
1828 if (key->type == BTRFS_DIR_ITEM_KEY) {
1829 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1831 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1832 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1841 if (IS_ERR_OR_NULL(dst_di)) {
1842 /* we need a sequence number to insert, so we only
1843 * do inserts for the BTRFS_DIR_INDEX_KEY types
1845 if (key->type != BTRFS_DIR_INDEX_KEY)
1850 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1851 /* the existing item matches the logged item */
1852 if (found_key.objectid == log_key.objectid &&
1853 found_key.type == log_key.type &&
1854 found_key.offset == log_key.offset &&
1855 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1856 update_size = false;
1861 * don't drop the conflicting directory entry if the inode
1862 * for the new entry doesn't exist
1867 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di);
1871 if (key->type == BTRFS_DIR_INDEX_KEY)
1874 btrfs_release_path(path);
1875 if (!ret && update_size) {
1876 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2);
1877 ret = btrfs_update_inode(trans, root, dir);
1881 if (!ret && name_added)
1886 if (name_in_log_ref(root->log_root, name, name_len,
1887 key->objectid, log_key.objectid)) {
1888 /* The dentry will be added later. */
1890 update_size = false;
1893 btrfs_release_path(path);
1894 ret = insert_one_name(trans, root, key->objectid, key->offset,
1895 name, name_len, &log_key);
1896 if (ret && ret != -ENOENT && ret != -EEXIST)
1900 update_size = false;
1906 * find all the names in a directory item and reconcile them into
1907 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1908 * one name in a directory item, but the same code gets used for
1909 * both directory index types
1911 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1912 struct btrfs_root *root,
1913 struct btrfs_path *path,
1914 struct extent_buffer *eb, int slot,
1915 struct btrfs_key *key)
1918 u32 item_size = btrfs_item_size_nr(eb, slot);
1919 struct btrfs_dir_item *di;
1922 unsigned long ptr_end;
1923 struct btrfs_path *fixup_path = NULL;
1925 ptr = btrfs_item_ptr_offset(eb, slot);
1926 ptr_end = ptr + item_size;
1927 while (ptr < ptr_end) {
1928 di = (struct btrfs_dir_item *)ptr;
1929 name_len = btrfs_dir_name_len(eb, di);
1930 ret = replay_one_name(trans, root, path, eb, di, key);
1933 ptr = (unsigned long)(di + 1);
1937 * If this entry refers to a non-directory (directories can not
1938 * have a link count > 1) and it was added in the transaction
1939 * that was not committed, make sure we fixup the link count of
1940 * the inode it the entry points to. Otherwise something like
1941 * the following would result in a directory pointing to an
1942 * inode with a wrong link that does not account for this dir
1950 * ln testdir/bar testdir/bar_link
1951 * ln testdir/foo testdir/foo_link
1952 * xfs_io -c "fsync" testdir/bar
1956 * mount fs, log replay happens
1958 * File foo would remain with a link count of 1 when it has two
1959 * entries pointing to it in the directory testdir. This would
1960 * make it impossible to ever delete the parent directory has
1961 * it would result in stale dentries that can never be deleted.
1963 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) {
1964 struct btrfs_key di_key;
1967 fixup_path = btrfs_alloc_path();
1974 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1975 ret = link_to_fixup_dir(trans, root, fixup_path,
1982 btrfs_free_path(fixup_path);
1987 * directory replay has two parts. There are the standard directory
1988 * items in the log copied from the subvolume, and range items
1989 * created in the log while the subvolume was logged.
1991 * The range items tell us which parts of the key space the log
1992 * is authoritative for. During replay, if a key in the subvolume
1993 * directory is in a logged range item, but not actually in the log
1994 * that means it was deleted from the directory before the fsync
1995 * and should be removed.
1997 static noinline int find_dir_range(struct btrfs_root *root,
1998 struct btrfs_path *path,
1999 u64 dirid, int key_type,
2000 u64 *start_ret, u64 *end_ret)
2002 struct btrfs_key key;
2004 struct btrfs_dir_log_item *item;
2008 if (*start_ret == (u64)-1)
2011 key.objectid = dirid;
2012 key.type = key_type;
2013 key.offset = *start_ret;
2015 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2019 if (path->slots[0] == 0)
2024 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2026 if (key.type != key_type || key.objectid != dirid) {
2030 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2031 struct btrfs_dir_log_item);
2032 found_end = btrfs_dir_log_end(path->nodes[0], item);
2034 if (*start_ret >= key.offset && *start_ret <= found_end) {
2036 *start_ret = key.offset;
2037 *end_ret = found_end;
2042 /* check the next slot in the tree to see if it is a valid item */
2043 nritems = btrfs_header_nritems(path->nodes[0]);
2045 if (path->slots[0] >= nritems) {
2046 ret = btrfs_next_leaf(root, path);
2051 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
2053 if (key.type != key_type || key.objectid != dirid) {
2057 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2058 struct btrfs_dir_log_item);
2059 found_end = btrfs_dir_log_end(path->nodes[0], item);
2060 *start_ret = key.offset;
2061 *end_ret = found_end;
2064 btrfs_release_path(path);
2069 * this looks for a given directory item in the log. If the directory
2070 * item is not in the log, the item is removed and the inode it points
2073 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
2074 struct btrfs_root *root,
2075 struct btrfs_root *log,
2076 struct btrfs_path *path,
2077 struct btrfs_path *log_path,
2079 struct btrfs_key *dir_key)
2082 struct extent_buffer *eb;
2085 struct btrfs_dir_item *di;
2086 struct btrfs_dir_item *log_di;
2089 unsigned long ptr_end;
2091 struct inode *inode;
2092 struct btrfs_key location;
2095 eb = path->nodes[0];
2096 slot = path->slots[0];
2097 item_size = btrfs_item_size_nr(eb, slot);
2098 ptr = btrfs_item_ptr_offset(eb, slot);
2099 ptr_end = ptr + item_size;
2100 while (ptr < ptr_end) {
2101 di = (struct btrfs_dir_item *)ptr;
2102 name_len = btrfs_dir_name_len(eb, di);
2103 name = kmalloc(name_len, GFP_NOFS);
2108 read_extent_buffer(eb, name, (unsigned long)(di + 1),
2111 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
2112 log_di = btrfs_lookup_dir_item(trans, log, log_path,
2115 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
2116 log_di = btrfs_lookup_dir_index_item(trans, log,
2122 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
2123 btrfs_dir_item_key_to_cpu(eb, di, &location);
2124 btrfs_release_path(path);
2125 btrfs_release_path(log_path);
2126 inode = read_one_inode(root, location.objectid);
2132 ret = link_to_fixup_dir(trans, root,
2133 path, location.objectid);
2141 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir),
2142 BTRFS_I(inode), name, name_len);
2144 ret = btrfs_run_delayed_items(trans);
2150 /* there might still be more names under this key
2151 * check and repeat if required
2153 ret = btrfs_search_slot(NULL, root, dir_key, path,
2159 } else if (IS_ERR(log_di)) {
2161 return PTR_ERR(log_di);
2163 btrfs_release_path(log_path);
2166 ptr = (unsigned long)(di + 1);
2171 btrfs_release_path(path);
2172 btrfs_release_path(log_path);
2176 static int replay_xattr_deletes(struct btrfs_trans_handle *trans,
2177 struct btrfs_root *root,
2178 struct btrfs_root *log,
2179 struct btrfs_path *path,
2182 struct btrfs_key search_key;
2183 struct btrfs_path *log_path;
2188 log_path = btrfs_alloc_path();
2192 search_key.objectid = ino;
2193 search_key.type = BTRFS_XATTR_ITEM_KEY;
2194 search_key.offset = 0;
2196 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
2200 nritems = btrfs_header_nritems(path->nodes[0]);
2201 for (i = path->slots[0]; i < nritems; i++) {
2202 struct btrfs_key key;
2203 struct btrfs_dir_item *di;
2204 struct btrfs_dir_item *log_di;
2208 btrfs_item_key_to_cpu(path->nodes[0], &key, i);
2209 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) {
2214 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item);
2215 total_size = btrfs_item_size_nr(path->nodes[0], i);
2217 while (cur < total_size) {
2218 u16 name_len = btrfs_dir_name_len(path->nodes[0], di);
2219 u16 data_len = btrfs_dir_data_len(path->nodes[0], di);
2220 u32 this_len = sizeof(*di) + name_len + data_len;
2223 name = kmalloc(name_len, GFP_NOFS);
2228 read_extent_buffer(path->nodes[0], name,
2229 (unsigned long)(di + 1), name_len);
2231 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino,
2233 btrfs_release_path(log_path);
2235 /* Doesn't exist in log tree, so delete it. */
2236 btrfs_release_path(path);
2237 di = btrfs_lookup_xattr(trans, root, path, ino,
2238 name, name_len, -1);
2245 ret = btrfs_delete_one_dir_name(trans, root,
2249 btrfs_release_path(path);
2254 if (IS_ERR(log_di)) {
2255 ret = PTR_ERR(log_di);
2259 di = (struct btrfs_dir_item *)((char *)di + this_len);
2262 ret = btrfs_next_leaf(root, path);
2268 btrfs_free_path(log_path);
2269 btrfs_release_path(path);
2275 * deletion replay happens before we copy any new directory items
2276 * out of the log or out of backreferences from inodes. It
2277 * scans the log to find ranges of keys that log is authoritative for,
2278 * and then scans the directory to find items in those ranges that are
2279 * not present in the log.
2281 * Anything we don't find in the log is unlinked and removed from the
2284 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
2285 struct btrfs_root *root,
2286 struct btrfs_root *log,
2287 struct btrfs_path *path,
2288 u64 dirid, int del_all)
2292 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
2294 struct btrfs_key dir_key;
2295 struct btrfs_key found_key;
2296 struct btrfs_path *log_path;
2299 dir_key.objectid = dirid;
2300 dir_key.type = BTRFS_DIR_ITEM_KEY;
2301 log_path = btrfs_alloc_path();
2305 dir = read_one_inode(root, dirid);
2306 /* it isn't an error if the inode isn't there, that can happen
2307 * because we replay the deletes before we copy in the inode item
2311 btrfs_free_path(log_path);
2319 range_end = (u64)-1;
2321 ret = find_dir_range(log, path, dirid, key_type,
2322 &range_start, &range_end);
2327 dir_key.offset = range_start;
2330 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2335 nritems = btrfs_header_nritems(path->nodes[0]);
2336 if (path->slots[0] >= nritems) {
2337 ret = btrfs_next_leaf(root, path);
2343 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2345 if (found_key.objectid != dirid ||
2346 found_key.type != dir_key.type)
2349 if (found_key.offset > range_end)
2352 ret = check_item_in_log(trans, root, log, path,
2357 if (found_key.offset == (u64)-1)
2359 dir_key.offset = found_key.offset + 1;
2361 btrfs_release_path(path);
2362 if (range_end == (u64)-1)
2364 range_start = range_end + 1;
2369 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2370 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2371 dir_key.type = BTRFS_DIR_INDEX_KEY;
2372 btrfs_release_path(path);
2376 btrfs_release_path(path);
2377 btrfs_free_path(log_path);
2383 * the process_func used to replay items from the log tree. This
2384 * gets called in two different stages. The first stage just looks
2385 * for inodes and makes sure they are all copied into the subvolume.
2387 * The second stage copies all the other item types from the log into
2388 * the subvolume. The two stage approach is slower, but gets rid of
2389 * lots of complexity around inodes referencing other inodes that exist
2390 * only in the log (references come from either directory items or inode
2393 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2394 struct walk_control *wc, u64 gen, int level)
2397 struct btrfs_path *path;
2398 struct btrfs_root *root = wc->replay_dest;
2399 struct btrfs_key key;
2403 ret = btrfs_read_buffer(eb, gen, level, NULL);
2407 level = btrfs_header_level(eb);
2412 path = btrfs_alloc_path();
2416 nritems = btrfs_header_nritems(eb);
2417 for (i = 0; i < nritems; i++) {
2418 btrfs_item_key_to_cpu(eb, &key, i);
2420 /* inode keys are done during the first stage */
2421 if (key.type == BTRFS_INODE_ITEM_KEY &&
2422 wc->stage == LOG_WALK_REPLAY_INODES) {
2423 struct btrfs_inode_item *inode_item;
2426 inode_item = btrfs_item_ptr(eb, i,
2427 struct btrfs_inode_item);
2428 ret = replay_xattr_deletes(wc->trans, root, log,
2429 path, key.objectid);
2432 mode = btrfs_inode_mode(eb, inode_item);
2433 if (S_ISDIR(mode)) {
2434 ret = replay_dir_deletes(wc->trans,
2435 root, log, path, key.objectid, 0);
2439 ret = overwrite_item(wc->trans, root, path,
2445 * Before replaying extents, truncate the inode to its
2446 * size. We need to do it now and not after log replay
2447 * because before an fsync we can have prealloc extents
2448 * added beyond the inode's i_size. If we did it after,
2449 * through orphan cleanup for example, we would drop
2450 * those prealloc extents just after replaying them.
2452 if (S_ISREG(mode)) {
2453 struct inode *inode;
2456 inode = read_one_inode(root, key.objectid);
2461 from = ALIGN(i_size_read(inode),
2462 root->fs_info->sectorsize);
2463 ret = btrfs_drop_extents(wc->trans, root, inode,
2466 * If the nlink count is zero here, the iput
2467 * will free the inode. We bump it to make
2468 * sure it doesn't get freed until the link
2469 * count fixup is done.
2472 if (inode->i_nlink == 0)
2474 /* Update link count and nbytes. */
2475 ret = btrfs_update_inode(wc->trans,
2483 ret = link_to_fixup_dir(wc->trans, root,
2484 path, key.objectid);
2489 if (key.type == BTRFS_DIR_INDEX_KEY &&
2490 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2491 ret = replay_one_dir_item(wc->trans, root, path,
2497 if (wc->stage < LOG_WALK_REPLAY_ALL)
2500 /* these keys are simply copied */
2501 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2502 ret = overwrite_item(wc->trans, root, path,
2506 } else if (key.type == BTRFS_INODE_REF_KEY ||
2507 key.type == BTRFS_INODE_EXTREF_KEY) {
2508 ret = add_inode_ref(wc->trans, root, log, path,
2510 if (ret && ret != -ENOENT)
2513 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2514 ret = replay_one_extent(wc->trans, root, path,
2518 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2519 ret = replay_one_dir_item(wc->trans, root, path,
2525 btrfs_free_path(path);
2529 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2530 struct btrfs_root *root,
2531 struct btrfs_path *path, int *level,
2532 struct walk_control *wc)
2534 struct btrfs_fs_info *fs_info = root->fs_info;
2538 struct extent_buffer *next;
2539 struct extent_buffer *cur;
2540 struct extent_buffer *parent;
2544 WARN_ON(*level < 0);
2545 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2547 while (*level > 0) {
2548 struct btrfs_key first_key;
2550 WARN_ON(*level < 0);
2551 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2552 cur = path->nodes[*level];
2554 WARN_ON(btrfs_header_level(cur) != *level);
2556 if (path->slots[*level] >=
2557 btrfs_header_nritems(cur))
2560 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2561 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2562 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]);
2563 blocksize = fs_info->nodesize;
2565 parent = path->nodes[*level];
2566 root_owner = btrfs_header_owner(parent);
2568 next = btrfs_find_create_tree_block(fs_info, bytenr);
2570 return PTR_ERR(next);
2573 ret = wc->process_func(root, next, wc, ptr_gen,
2576 free_extent_buffer(next);
2580 path->slots[*level]++;
2582 ret = btrfs_read_buffer(next, ptr_gen,
2583 *level - 1, &first_key);
2585 free_extent_buffer(next);
2590 btrfs_tree_lock(next);
2591 btrfs_set_lock_blocking(next);
2592 clean_tree_block(fs_info, next);
2593 btrfs_wait_tree_block_writeback(next);
2594 btrfs_tree_unlock(next);
2596 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2597 clear_extent_buffer_dirty(next);
2600 WARN_ON(root_owner !=
2601 BTRFS_TREE_LOG_OBJECTID);
2602 ret = btrfs_free_and_pin_reserved_extent(
2606 free_extent_buffer(next);
2610 free_extent_buffer(next);
2613 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key);
2615 free_extent_buffer(next);
2619 WARN_ON(*level <= 0);
2620 if (path->nodes[*level-1])
2621 free_extent_buffer(path->nodes[*level-1]);
2622 path->nodes[*level-1] = next;
2623 *level = btrfs_header_level(next);
2624 path->slots[*level] = 0;
2627 WARN_ON(*level < 0);
2628 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2630 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2636 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2637 struct btrfs_root *root,
2638 struct btrfs_path *path, int *level,
2639 struct walk_control *wc)
2641 struct btrfs_fs_info *fs_info = root->fs_info;
2647 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2648 slot = path->slots[i];
2649 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2652 WARN_ON(*level == 0);
2655 struct extent_buffer *parent;
2656 if (path->nodes[*level] == root->node)
2657 parent = path->nodes[*level];
2659 parent = path->nodes[*level + 1];
2661 root_owner = btrfs_header_owner(parent);
2662 ret = wc->process_func(root, path->nodes[*level], wc,
2663 btrfs_header_generation(path->nodes[*level]),
2669 struct extent_buffer *next;
2671 next = path->nodes[*level];
2674 btrfs_tree_lock(next);
2675 btrfs_set_lock_blocking(next);
2676 clean_tree_block(fs_info, next);
2677 btrfs_wait_tree_block_writeback(next);
2678 btrfs_tree_unlock(next);
2680 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2681 clear_extent_buffer_dirty(next);
2684 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2685 ret = btrfs_free_and_pin_reserved_extent(
2687 path->nodes[*level]->start,
2688 path->nodes[*level]->len);
2692 free_extent_buffer(path->nodes[*level]);
2693 path->nodes[*level] = NULL;
2701 * drop the reference count on the tree rooted at 'snap'. This traverses
2702 * the tree freeing any blocks that have a ref count of zero after being
2705 static int walk_log_tree(struct btrfs_trans_handle *trans,
2706 struct btrfs_root *log, struct walk_control *wc)
2708 struct btrfs_fs_info *fs_info = log->fs_info;
2712 struct btrfs_path *path;
2715 path = btrfs_alloc_path();
2719 level = btrfs_header_level(log->node);
2721 path->nodes[level] = log->node;
2722 extent_buffer_get(log->node);
2723 path->slots[level] = 0;
2726 wret = walk_down_log_tree(trans, log, path, &level, wc);
2734 wret = walk_up_log_tree(trans, log, path, &level, wc);
2743 /* was the root node processed? if not, catch it here */
2744 if (path->nodes[orig_level]) {
2745 ret = wc->process_func(log, path->nodes[orig_level], wc,
2746 btrfs_header_generation(path->nodes[orig_level]),
2751 struct extent_buffer *next;
2753 next = path->nodes[orig_level];
2756 btrfs_tree_lock(next);
2757 btrfs_set_lock_blocking(next);
2758 clean_tree_block(fs_info, next);
2759 btrfs_wait_tree_block_writeback(next);
2760 btrfs_tree_unlock(next);
2762 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags))
2763 clear_extent_buffer_dirty(next);
2766 WARN_ON(log->root_key.objectid !=
2767 BTRFS_TREE_LOG_OBJECTID);
2768 ret = btrfs_free_and_pin_reserved_extent(fs_info,
2769 next->start, next->len);
2776 btrfs_free_path(path);
2781 * helper function to update the item for a given subvolumes log root
2782 * in the tree of log roots
2784 static int update_log_root(struct btrfs_trans_handle *trans,
2785 struct btrfs_root *log)
2787 struct btrfs_fs_info *fs_info = log->fs_info;
2790 if (log->log_transid == 1) {
2791 /* insert root item on the first sync */
2792 ret = btrfs_insert_root(trans, fs_info->log_root_tree,
2793 &log->root_key, &log->root_item);
2795 ret = btrfs_update_root(trans, fs_info->log_root_tree,
2796 &log->root_key, &log->root_item);
2801 static void wait_log_commit(struct btrfs_root *root, int transid)
2804 int index = transid % 2;
2807 * we only allow two pending log transactions at a time,
2808 * so we know that if ours is more than 2 older than the
2809 * current transaction, we're done
2812 prepare_to_wait(&root->log_commit_wait[index],
2813 &wait, TASK_UNINTERRUPTIBLE);
2815 if (!(root->log_transid_committed < transid &&
2816 atomic_read(&root->log_commit[index])))
2819 mutex_unlock(&root->log_mutex);
2821 mutex_lock(&root->log_mutex);
2823 finish_wait(&root->log_commit_wait[index], &wait);
2826 static void wait_for_writer(struct btrfs_root *root)
2831 prepare_to_wait(&root->log_writer_wait, &wait,
2832 TASK_UNINTERRUPTIBLE);
2833 if (!atomic_read(&root->log_writers))
2836 mutex_unlock(&root->log_mutex);
2838 mutex_lock(&root->log_mutex);
2840 finish_wait(&root->log_writer_wait, &wait);
2843 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2844 struct btrfs_log_ctx *ctx)
2849 mutex_lock(&root->log_mutex);
2850 list_del_init(&ctx->list);
2851 mutex_unlock(&root->log_mutex);
2855 * Invoked in log mutex context, or be sure there is no other task which
2856 * can access the list.
2858 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2859 int index, int error)
2861 struct btrfs_log_ctx *ctx;
2862 struct btrfs_log_ctx *safe;
2864 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
2865 list_del_init(&ctx->list);
2866 ctx->log_ret = error;
2869 INIT_LIST_HEAD(&root->log_ctxs[index]);
2873 * btrfs_sync_log does sends a given tree log down to the disk and
2874 * updates the super blocks to record it. When this call is done,
2875 * you know that any inodes previously logged are safely on disk only
2878 * Any other return value means you need to call btrfs_commit_transaction.
2879 * Some of the edge cases for fsyncing directories that have had unlinks
2880 * or renames done in the past mean that sometimes the only safe
2881 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2882 * that has happened.
2884 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2885 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2891 struct btrfs_fs_info *fs_info = root->fs_info;
2892 struct btrfs_root *log = root->log_root;
2893 struct btrfs_root *log_root_tree = fs_info->log_root_tree;
2894 int log_transid = 0;
2895 struct btrfs_log_ctx root_log_ctx;
2896 struct blk_plug plug;
2898 mutex_lock(&root->log_mutex);
2899 log_transid = ctx->log_transid;
2900 if (root->log_transid_committed >= log_transid) {
2901 mutex_unlock(&root->log_mutex);
2902 return ctx->log_ret;
2905 index1 = log_transid % 2;
2906 if (atomic_read(&root->log_commit[index1])) {
2907 wait_log_commit(root, log_transid);
2908 mutex_unlock(&root->log_mutex);
2909 return ctx->log_ret;
2911 ASSERT(log_transid == root->log_transid);
2912 atomic_set(&root->log_commit[index1], 1);
2914 /* wait for previous tree log sync to complete */
2915 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2916 wait_log_commit(root, log_transid - 1);
2919 int batch = atomic_read(&root->log_batch);
2920 /* when we're on an ssd, just kick the log commit out */
2921 if (!btrfs_test_opt(fs_info, SSD) &&
2922 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2923 mutex_unlock(&root->log_mutex);
2924 schedule_timeout_uninterruptible(1);
2925 mutex_lock(&root->log_mutex);
2927 wait_for_writer(root);
2928 if (batch == atomic_read(&root->log_batch))
2932 /* bail out if we need to do a full commit */
2933 if (btrfs_need_log_full_commit(fs_info, trans)) {
2935 mutex_unlock(&root->log_mutex);
2939 if (log_transid % 2 == 0)
2940 mark = EXTENT_DIRTY;
2944 /* we start IO on all the marked extents here, but we don't actually
2945 * wait for them until later.
2947 blk_start_plug(&plug);
2948 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark);
2950 blk_finish_plug(&plug);
2951 btrfs_abort_transaction(trans, ret);
2952 btrfs_set_log_full_commit(fs_info, trans);
2953 mutex_unlock(&root->log_mutex);
2957 btrfs_set_root_node(&log->root_item, log->node);
2959 root->log_transid++;
2960 log->log_transid = root->log_transid;
2961 root->log_start_pid = 0;
2963 * IO has been started, blocks of the log tree have WRITTEN flag set
2964 * in their headers. new modifications of the log will be written to
2965 * new positions. so it's safe to allow log writers to go in.
2967 mutex_unlock(&root->log_mutex);
2969 btrfs_init_log_ctx(&root_log_ctx, NULL);
2971 mutex_lock(&log_root_tree->log_mutex);
2972 atomic_inc(&log_root_tree->log_batch);
2973 atomic_inc(&log_root_tree->log_writers);
2975 index2 = log_root_tree->log_transid % 2;
2976 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2977 root_log_ctx.log_transid = log_root_tree->log_transid;
2979 mutex_unlock(&log_root_tree->log_mutex);
2981 ret = update_log_root(trans, log);
2983 mutex_lock(&log_root_tree->log_mutex);
2984 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2985 /* atomic_dec_and_test implies a barrier */
2986 cond_wake_up_nomb(&log_root_tree->log_writer_wait);
2990 if (!list_empty(&root_log_ctx.list))
2991 list_del_init(&root_log_ctx.list);
2993 blk_finish_plug(&plug);
2994 btrfs_set_log_full_commit(fs_info, trans);
2996 if (ret != -ENOSPC) {
2997 btrfs_abort_transaction(trans, ret);
2998 mutex_unlock(&log_root_tree->log_mutex);
3001 btrfs_wait_tree_log_extents(log, mark);
3002 mutex_unlock(&log_root_tree->log_mutex);
3007 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
3008 blk_finish_plug(&plug);
3009 list_del_init(&root_log_ctx.list);
3010 mutex_unlock(&log_root_tree->log_mutex);
3011 ret = root_log_ctx.log_ret;
3015 index2 = root_log_ctx.log_transid % 2;
3016 if (atomic_read(&log_root_tree->log_commit[index2])) {
3017 blk_finish_plug(&plug);
3018 ret = btrfs_wait_tree_log_extents(log, mark);
3019 wait_log_commit(log_root_tree,
3020 root_log_ctx.log_transid);
3021 mutex_unlock(&log_root_tree->log_mutex);
3023 ret = root_log_ctx.log_ret;
3026 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
3027 atomic_set(&log_root_tree->log_commit[index2], 1);
3029 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
3030 wait_log_commit(log_root_tree,
3031 root_log_ctx.log_transid - 1);
3034 wait_for_writer(log_root_tree);
3037 * now that we've moved on to the tree of log tree roots,
3038 * check the full commit flag again
3040 if (btrfs_need_log_full_commit(fs_info, trans)) {
3041 blk_finish_plug(&plug);
3042 btrfs_wait_tree_log_extents(log, mark);
3043 mutex_unlock(&log_root_tree->log_mutex);
3045 goto out_wake_log_root;
3048 ret = btrfs_write_marked_extents(fs_info,
3049 &log_root_tree->dirty_log_pages,
3050 EXTENT_DIRTY | EXTENT_NEW);
3051 blk_finish_plug(&plug);
3053 btrfs_set_log_full_commit(fs_info, trans);
3054 btrfs_abort_transaction(trans, ret);
3055 mutex_unlock(&log_root_tree->log_mutex);
3056 goto out_wake_log_root;
3058 ret = btrfs_wait_tree_log_extents(log, mark);
3060 ret = btrfs_wait_tree_log_extents(log_root_tree,
3061 EXTENT_NEW | EXTENT_DIRTY);
3063 btrfs_set_log_full_commit(fs_info, trans);
3064 mutex_unlock(&log_root_tree->log_mutex);
3065 goto out_wake_log_root;
3068 btrfs_set_super_log_root(fs_info->super_for_commit,
3069 log_root_tree->node->start);
3070 btrfs_set_super_log_root_level(fs_info->super_for_commit,
3071 btrfs_header_level(log_root_tree->node));
3073 log_root_tree->log_transid++;
3074 mutex_unlock(&log_root_tree->log_mutex);
3077 * nobody else is going to jump in and write the the ctree
3078 * super here because the log_commit atomic below is protecting
3079 * us. We must be called with a transaction handle pinning
3080 * the running transaction open, so a full commit can't hop
3081 * in and cause problems either.
3083 ret = write_all_supers(fs_info, 1);
3085 btrfs_set_log_full_commit(fs_info, trans);
3086 btrfs_abort_transaction(trans, ret);
3087 goto out_wake_log_root;
3090 mutex_lock(&root->log_mutex);
3091 if (root->last_log_commit < log_transid)
3092 root->last_log_commit = log_transid;
3093 mutex_unlock(&root->log_mutex);
3096 mutex_lock(&log_root_tree->log_mutex);
3097 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
3099 log_root_tree->log_transid_committed++;
3100 atomic_set(&log_root_tree->log_commit[index2], 0);
3101 mutex_unlock(&log_root_tree->log_mutex);
3104 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3105 * all the updates above are seen by the woken threads. It might not be
3106 * necessary, but proving that seems to be hard.
3108 cond_wake_up(&log_root_tree->log_commit_wait[index2]);
3110 mutex_lock(&root->log_mutex);
3111 btrfs_remove_all_log_ctxs(root, index1, ret);
3112 root->log_transid_committed++;
3113 atomic_set(&root->log_commit[index1], 0);
3114 mutex_unlock(&root->log_mutex);
3117 * The barrier before waitqueue_active (in cond_wake_up) is needed so
3118 * all the updates above are seen by the woken threads. It might not be
3119 * necessary, but proving that seems to be hard.
3121 cond_wake_up(&root->log_commit_wait[index1]);
3125 static void free_log_tree(struct btrfs_trans_handle *trans,
3126 struct btrfs_root *log)
3131 struct walk_control wc = {
3133 .process_func = process_one_buffer
3136 ret = walk_log_tree(trans, log, &wc);
3137 /* I don't think this can happen but just in case */
3139 btrfs_abort_transaction(trans, ret);
3142 ret = find_first_extent_bit(&log->dirty_log_pages,
3144 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT,
3149 clear_extent_bits(&log->dirty_log_pages, start, end,
3150 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT);
3153 free_extent_buffer(log->node);
3158 * free all the extents used by the tree log. This should be called
3159 * at commit time of the full transaction
3161 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
3163 if (root->log_root) {
3164 free_log_tree(trans, root->log_root);
3165 root->log_root = NULL;
3170 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
3171 struct btrfs_fs_info *fs_info)
3173 if (fs_info->log_root_tree) {
3174 free_log_tree(trans, fs_info->log_root_tree);
3175 fs_info->log_root_tree = NULL;
3181 * If both a file and directory are logged, and unlinks or renames are
3182 * mixed in, we have a few interesting corners:
3184 * create file X in dir Y
3185 * link file X to X.link in dir Y
3187 * unlink file X but leave X.link
3190 * After a crash we would expect only X.link to exist. But file X
3191 * didn't get fsync'd again so the log has back refs for X and X.link.
3193 * We solve this by removing directory entries and inode backrefs from the
3194 * log when a file that was logged in the current transaction is
3195 * unlinked. Any later fsync will include the updated log entries, and
3196 * we'll be able to reconstruct the proper directory items from backrefs.
3198 * This optimizations allows us to avoid relogging the entire inode
3199 * or the entire directory.
3201 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
3202 struct btrfs_root *root,
3203 const char *name, int name_len,
3204 struct btrfs_inode *dir, u64 index)
3206 struct btrfs_root *log;
3207 struct btrfs_dir_item *di;
3208 struct btrfs_path *path;
3212 u64 dir_ino = btrfs_ino(dir);
3214 if (dir->logged_trans < trans->transid)
3217 ret = join_running_log_trans(root);
3221 mutex_lock(&dir->log_mutex);
3223 log = root->log_root;
3224 path = btrfs_alloc_path();
3230 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
3231 name, name_len, -1);
3237 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3238 bytes_del += name_len;
3244 btrfs_release_path(path);
3245 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
3246 index, name, name_len, -1);
3252 ret = btrfs_delete_one_dir_name(trans, log, path, di);
3253 bytes_del += name_len;
3260 /* update the directory size in the log to reflect the names
3264 struct btrfs_key key;
3266 key.objectid = dir_ino;
3268 key.type = BTRFS_INODE_ITEM_KEY;
3269 btrfs_release_path(path);
3271 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
3277 struct btrfs_inode_item *item;
3280 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3281 struct btrfs_inode_item);
3282 i_size = btrfs_inode_size(path->nodes[0], item);
3283 if (i_size > bytes_del)
3284 i_size -= bytes_del;
3287 btrfs_set_inode_size(path->nodes[0], item, i_size);
3288 btrfs_mark_buffer_dirty(path->nodes[0]);
3291 btrfs_release_path(path);
3294 btrfs_free_path(path);
3296 mutex_unlock(&dir->log_mutex);
3297 if (ret == -ENOSPC) {
3298 btrfs_set_log_full_commit(root->fs_info, trans);
3301 btrfs_abort_transaction(trans, ret);
3303 btrfs_end_log_trans(root);
3308 /* see comments for btrfs_del_dir_entries_in_log */
3309 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
3310 struct btrfs_root *root,
3311 const char *name, int name_len,
3312 struct btrfs_inode *inode, u64 dirid)
3314 struct btrfs_fs_info *fs_info = root->fs_info;
3315 struct btrfs_root *log;
3319 if (inode->logged_trans < trans->transid)
3322 ret = join_running_log_trans(root);
3325 log = root->log_root;
3326 mutex_lock(&inode->log_mutex);
3328 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
3330 mutex_unlock(&inode->log_mutex);
3331 if (ret == -ENOSPC) {
3332 btrfs_set_log_full_commit(fs_info, trans);
3334 } else if (ret < 0 && ret != -ENOENT)
3335 btrfs_abort_transaction(trans, ret);
3336 btrfs_end_log_trans(root);
3342 * creates a range item in the log for 'dirid'. first_offset and
3343 * last_offset tell us which parts of the key space the log should
3344 * be considered authoritative for.
3346 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
3347 struct btrfs_root *log,
3348 struct btrfs_path *path,
3349 int key_type, u64 dirid,
3350 u64 first_offset, u64 last_offset)
3353 struct btrfs_key key;
3354 struct btrfs_dir_log_item *item;
3356 key.objectid = dirid;
3357 key.offset = first_offset;
3358 if (key_type == BTRFS_DIR_ITEM_KEY)
3359 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3361 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3362 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3366 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3367 struct btrfs_dir_log_item);
3368 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3369 btrfs_mark_buffer_dirty(path->nodes[0]);
3370 btrfs_release_path(path);
3375 * log all the items included in the current transaction for a given
3376 * directory. This also creates the range items in the log tree required
3377 * to replay anything deleted before the fsync
3379 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3380 struct btrfs_root *root, struct btrfs_inode *inode,
3381 struct btrfs_path *path,
3382 struct btrfs_path *dst_path, int key_type,
3383 struct btrfs_log_ctx *ctx,
3384 u64 min_offset, u64 *last_offset_ret)
3386 struct btrfs_key min_key;
3387 struct btrfs_root *log = root->log_root;
3388 struct extent_buffer *src;
3393 u64 first_offset = min_offset;
3394 u64 last_offset = (u64)-1;
3395 u64 ino = btrfs_ino(inode);
3397 log = root->log_root;
3399 min_key.objectid = ino;
3400 min_key.type = key_type;
3401 min_key.offset = min_offset;
3403 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3406 * we didn't find anything from this transaction, see if there
3407 * is anything at all
3409 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3410 min_key.objectid = ino;
3411 min_key.type = key_type;
3412 min_key.offset = (u64)-1;
3413 btrfs_release_path(path);
3414 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3416 btrfs_release_path(path);
3419 ret = btrfs_previous_item(root, path, ino, key_type);
3421 /* if ret == 0 there are items for this type,
3422 * create a range to tell us the last key of this type.
3423 * otherwise, there are no items in this directory after
3424 * *min_offset, and we create a range to indicate that.
3427 struct btrfs_key tmp;
3428 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3430 if (key_type == tmp.type)
3431 first_offset = max(min_offset, tmp.offset) + 1;
3436 /* go backward to find any previous key */
3437 ret = btrfs_previous_item(root, path, ino, key_type);
3439 struct btrfs_key tmp;
3440 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3441 if (key_type == tmp.type) {
3442 first_offset = tmp.offset;
3443 ret = overwrite_item(trans, log, dst_path,
3444 path->nodes[0], path->slots[0],
3452 btrfs_release_path(path);
3454 /* find the first key from this transaction again */
3455 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3456 if (WARN_ON(ret != 0))
3460 * we have a block from this transaction, log every item in it
3461 * from our directory
3464 struct btrfs_key tmp;
3465 src = path->nodes[0];
3466 nritems = btrfs_header_nritems(src);
3467 for (i = path->slots[0]; i < nritems; i++) {
3468 struct btrfs_dir_item *di;
3470 btrfs_item_key_to_cpu(src, &min_key, i);
3472 if (min_key.objectid != ino || min_key.type != key_type)
3474 ret = overwrite_item(trans, log, dst_path, src, i,
3482 * We must make sure that when we log a directory entry,
3483 * the corresponding inode, after log replay, has a
3484 * matching link count. For example:
3490 * xfs_io -c "fsync" mydir
3492 * <mount fs and log replay>
3494 * Would result in a fsync log that when replayed, our
3495 * file inode would have a link count of 1, but we get
3496 * two directory entries pointing to the same inode.
3497 * After removing one of the names, it would not be
3498 * possible to remove the other name, which resulted
3499 * always in stale file handle errors, and would not
3500 * be possible to rmdir the parent directory, since
3501 * its i_size could never decrement to the value
3502 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors.
3504 di = btrfs_item_ptr(src, i, struct btrfs_dir_item);
3505 btrfs_dir_item_key_to_cpu(src, di, &tmp);
3507 (btrfs_dir_transid(src, di) == trans->transid ||
3508 btrfs_dir_type(src, di) == BTRFS_FT_DIR) &&
3509 tmp.type != BTRFS_ROOT_ITEM_KEY)
3510 ctx->log_new_dentries = true;
3512 path->slots[0] = nritems;
3515 * look ahead to the next item and see if it is also
3516 * from this directory and from this transaction
3518 ret = btrfs_next_leaf(root, path);
3521 last_offset = (u64)-1;
3526 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3527 if (tmp.objectid != ino || tmp.type != key_type) {
3528 last_offset = (u64)-1;
3531 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3532 ret = overwrite_item(trans, log, dst_path,
3533 path->nodes[0], path->slots[0],
3538 last_offset = tmp.offset;
3543 btrfs_release_path(path);
3544 btrfs_release_path(dst_path);
3547 *last_offset_ret = last_offset;
3549 * insert the log range keys to indicate where the log
3552 ret = insert_dir_log_key(trans, log, path, key_type,
3553 ino, first_offset, last_offset);
3561 * logging directories is very similar to logging inodes, We find all the items
3562 * from the current transaction and write them to the log.
3564 * The recovery code scans the directory in the subvolume, and if it finds a
3565 * key in the range logged that is not present in the log tree, then it means
3566 * that dir entry was unlinked during the transaction.
3568 * In order for that scan to work, we must include one key smaller than
3569 * the smallest logged by this transaction and one key larger than the largest
3570 * key logged by this transaction.
3572 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3573 struct btrfs_root *root, struct btrfs_inode *inode,
3574 struct btrfs_path *path,
3575 struct btrfs_path *dst_path,
3576 struct btrfs_log_ctx *ctx)
3581 int key_type = BTRFS_DIR_ITEM_KEY;
3587 ret = log_dir_items(trans, root, inode, path, dst_path, key_type,
3588 ctx, min_key, &max_key);
3591 if (max_key == (u64)-1)
3593 min_key = max_key + 1;
3596 if (key_type == BTRFS_DIR_ITEM_KEY) {
3597 key_type = BTRFS_DIR_INDEX_KEY;
3604 * a helper function to drop items from the log before we relog an
3605 * inode. max_key_type indicates the highest item type to remove.
3606 * This cannot be run for file data extents because it does not
3607 * free the extents they point to.
3609 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3610 struct btrfs_root *log,
3611 struct btrfs_path *path,
3612 u64 objectid, int max_key_type)
3615 struct btrfs_key key;
3616 struct btrfs_key found_key;
3619 key.objectid = objectid;
3620 key.type = max_key_type;
3621 key.offset = (u64)-1;
3624 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3625 BUG_ON(ret == 0); /* Logic error */
3629 if (path->slots[0] == 0)
3633 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3636 if (found_key.objectid != objectid)
3639 found_key.offset = 0;
3641 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3644 ret = btrfs_del_items(trans, log, path, start_slot,
3645 path->slots[0] - start_slot + 1);
3647 * If start slot isn't 0 then we don't need to re-search, we've
3648 * found the last guy with the objectid in this tree.
3650 if (ret || start_slot != 0)
3652 btrfs_release_path(path);
3654 btrfs_release_path(path);
3660 static void fill_inode_item(struct btrfs_trans_handle *trans,
3661 struct extent_buffer *leaf,
3662 struct btrfs_inode_item *item,
3663 struct inode *inode, int log_inode_only,
3666 struct btrfs_map_token token;
3668 btrfs_init_map_token(&token);
3670 if (log_inode_only) {
3671 /* set the generation to zero so the recover code
3672 * can tell the difference between an logging
3673 * just to say 'this inode exists' and a logging
3674 * to say 'update this inode with these values'
3676 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3677 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3679 btrfs_set_token_inode_generation(leaf, item,
3680 BTRFS_I(inode)->generation,
3682 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3685 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3686 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3687 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3688 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3690 btrfs_set_token_timespec_sec(leaf, &item->atime,
3691 inode->i_atime.tv_sec, &token);
3692 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3693 inode->i_atime.tv_nsec, &token);
3695 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3696 inode->i_mtime.tv_sec, &token);
3697 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3698 inode->i_mtime.tv_nsec, &token);
3700 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3701 inode->i_ctime.tv_sec, &token);
3702 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3703 inode->i_ctime.tv_nsec, &token);
3705 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3708 btrfs_set_token_inode_sequence(leaf, item,
3709 inode_peek_iversion(inode), &token);
3710 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3711 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3712 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3713 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3716 static int log_inode_item(struct btrfs_trans_handle *trans,
3717 struct btrfs_root *log, struct btrfs_path *path,
3718 struct btrfs_inode *inode)
3720 struct btrfs_inode_item *inode_item;
3723 ret = btrfs_insert_empty_item(trans, log, path,
3724 &inode->location, sizeof(*inode_item));
3725 if (ret && ret != -EEXIST)
3727 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3728 struct btrfs_inode_item);
3729 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode,
3731 btrfs_release_path(path);
3735 static noinline int copy_items(struct btrfs_trans_handle *trans,
3736 struct btrfs_inode *inode,
3737 struct btrfs_path *dst_path,
3738 struct btrfs_path *src_path, u64 *last_extent,
3739 int start_slot, int nr, int inode_only,
3742 struct btrfs_fs_info *fs_info = trans->fs_info;
3743 unsigned long src_offset;
3744 unsigned long dst_offset;
3745 struct btrfs_root *log = inode->root->log_root;
3746 struct btrfs_file_extent_item *extent;
3747 struct btrfs_inode_item *inode_item;
3748 struct extent_buffer *src = src_path->nodes[0];
3749 struct btrfs_key first_key, last_key, key;
3751 struct btrfs_key *ins_keys;
3755 struct list_head ordered_sums;
3756 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM;
3757 bool has_extents = false;
3758 bool need_find_last_extent = true;
3761 INIT_LIST_HEAD(&ordered_sums);
3763 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3764 nr * sizeof(u32), GFP_NOFS);
3768 first_key.objectid = (u64)-1;
3770 ins_sizes = (u32 *)ins_data;
3771 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3773 for (i = 0; i < nr; i++) {
3774 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3775 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3777 ret = btrfs_insert_empty_items(trans, log, dst_path,
3778 ins_keys, ins_sizes, nr);
3784 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3785 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3786 dst_path->slots[0]);
3788 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3791 last_key = ins_keys[i];
3793 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3794 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3796 struct btrfs_inode_item);
3797 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3799 inode_only == LOG_INODE_EXISTS,
3802 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3803 src_offset, ins_sizes[i]);
3807 * We set need_find_last_extent here in case we know we were
3808 * processing other items and then walk into the first extent in
3809 * the inode. If we don't hit an extent then nothing changes,
3810 * we'll do the last search the next time around.
3812 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3814 if (first_key.objectid == (u64)-1)
3815 first_key = ins_keys[i];
3817 need_find_last_extent = false;
3820 /* take a reference on file data extents so that truncates
3821 * or deletes of this inode don't have to relog the inode
3824 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3827 extent = btrfs_item_ptr(src, start_slot + i,
3828 struct btrfs_file_extent_item);
3830 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3833 found_type = btrfs_file_extent_type(src, extent);
3834 if (found_type == BTRFS_FILE_EXTENT_REG) {
3836 ds = btrfs_file_extent_disk_bytenr(src,
3838 /* ds == 0 is a hole */
3842 dl = btrfs_file_extent_disk_num_bytes(src,
3844 cs = btrfs_file_extent_offset(src, extent);
3845 cl = btrfs_file_extent_num_bytes(src,
3847 if (btrfs_file_extent_compression(src,
3853 ret = btrfs_lookup_csums_range(
3855 ds + cs, ds + cs + cl - 1,
3858 btrfs_release_path(dst_path);
3866 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3867 btrfs_release_path(dst_path);
3871 * we have to do this after the loop above to avoid changing the
3872 * log tree while trying to change the log tree.
3875 while (!list_empty(&ordered_sums)) {
3876 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3877 struct btrfs_ordered_sum,
3880 ret = btrfs_csum_file_blocks(trans, log, sums);
3881 list_del(&sums->list);
3888 if (need_find_last_extent && *last_extent == first_key.offset) {
3890 * We don't have any leafs between our current one and the one
3891 * we processed before that can have file extent items for our
3892 * inode (and have a generation number smaller than our current
3895 need_find_last_extent = false;
3899 * Because we use btrfs_search_forward we could skip leaves that were
3900 * not modified and then assume *last_extent is valid when it really
3901 * isn't. So back up to the previous leaf and read the end of the last
3902 * extent before we go and fill in holes.
3904 if (need_find_last_extent) {
3907 ret = btrfs_prev_leaf(inode->root, src_path);
3912 if (src_path->slots[0])
3913 src_path->slots[0]--;
3914 src = src_path->nodes[0];
3915 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3916 if (key.objectid != btrfs_ino(inode) ||
3917 key.type != BTRFS_EXTENT_DATA_KEY)
3919 extent = btrfs_item_ptr(src, src_path->slots[0],
3920 struct btrfs_file_extent_item);
3921 if (btrfs_file_extent_type(src, extent) ==
3922 BTRFS_FILE_EXTENT_INLINE) {
3923 len = btrfs_file_extent_ram_bytes(src, extent);
3924 *last_extent = ALIGN(key.offset + len,
3925 fs_info->sectorsize);
3927 len = btrfs_file_extent_num_bytes(src, extent);
3928 *last_extent = key.offset + len;
3932 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3933 * things could have happened
3935 * 1) A merge could have happened, so we could currently be on a leaf
3936 * that holds what we were copying in the first place.
3937 * 2) A split could have happened, and now not all of the items we want
3938 * are on the same leaf.
3940 * So we need to adjust how we search for holes, we need to drop the
3941 * path and re-search for the first extent key we found, and then walk
3942 * forward until we hit the last one we copied.
3944 if (need_find_last_extent) {
3945 /* btrfs_prev_leaf could return 1 without releasing the path */
3946 btrfs_release_path(src_path);
3947 ret = btrfs_search_slot(NULL, inode->root, &first_key,
3952 src = src_path->nodes[0];
3953 i = src_path->slots[0];
3959 * Ok so here we need to go through and fill in any holes we may have
3960 * to make sure that holes are punched for those areas in case they had
3961 * extents previously.
3967 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3968 ret = btrfs_next_leaf(inode->root, src_path);
3972 src = src_path->nodes[0];
3974 need_find_last_extent = true;
3977 btrfs_item_key_to_cpu(src, &key, i);
3978 if (!btrfs_comp_cpu_keys(&key, &last_key))
3980 if (key.objectid != btrfs_ino(inode) ||
3981 key.type != BTRFS_EXTENT_DATA_KEY) {
3985 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3986 if (btrfs_file_extent_type(src, extent) ==
3987 BTRFS_FILE_EXTENT_INLINE) {
3988 len = btrfs_file_extent_ram_bytes(src, extent);
3989 extent_end = ALIGN(key.offset + len,
3990 fs_info->sectorsize);
3992 len = btrfs_file_extent_num_bytes(src, extent);
3993 extent_end = key.offset + len;
3997 if (*last_extent == key.offset) {
3998 *last_extent = extent_end;
4001 offset = *last_extent;
4002 len = key.offset - *last_extent;
4003 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
4004 offset, 0, 0, len, 0, len, 0, 0, 0);
4007 *last_extent = extent_end;
4011 * Check if there is a hole between the last extent found in our leaf
4012 * and the first extent in the next leaf. If there is one, we need to
4013 * log an explicit hole so that at replay time we can punch the hole.
4016 key.objectid == btrfs_ino(inode) &&
4017 key.type == BTRFS_EXTENT_DATA_KEY &&
4018 i == btrfs_header_nritems(src_path->nodes[0])) {
4019 ret = btrfs_next_leaf(inode->root, src_path);
4020 need_find_last_extent = true;
4023 } else if (ret == 0) {
4024 btrfs_item_key_to_cpu(src_path->nodes[0], &key,
4025 src_path->slots[0]);
4026 if (key.objectid == btrfs_ino(inode) &&
4027 key.type == BTRFS_EXTENT_DATA_KEY &&
4028 *last_extent < key.offset) {
4029 const u64 len = key.offset - *last_extent;
4031 ret = btrfs_insert_file_extent(trans, log,
4040 * Need to let the callers know we dropped the path so they should
4043 if (!ret && need_find_last_extent)
4048 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
4050 struct extent_map *em1, *em2;
4052 em1 = list_entry(a, struct extent_map, list);
4053 em2 = list_entry(b, struct extent_map, list);
4055 if (em1->start < em2->start)
4057 else if (em1->start > em2->start)
4062 static int log_extent_csums(struct btrfs_trans_handle *trans,
4063 struct btrfs_inode *inode,
4064 struct btrfs_root *log_root,
4065 const struct extent_map *em)
4069 LIST_HEAD(ordered_sums);
4072 if (inode->flags & BTRFS_INODE_NODATASUM ||
4073 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
4074 em->block_start == EXTENT_MAP_HOLE)
4077 /* If we're compressed we have to save the entire range of csums. */
4078 if (em->compress_type) {
4080 csum_len = max(em->block_len, em->orig_block_len);
4082 csum_offset = em->mod_start - em->start;
4083 csum_len = em->mod_len;
4086 /* block start is already adjusted for the file extent offset. */
4087 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root,
4088 em->block_start + csum_offset,
4089 em->block_start + csum_offset +
4090 csum_len - 1, &ordered_sums, 0);
4094 while (!list_empty(&ordered_sums)) {
4095 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
4096 struct btrfs_ordered_sum,
4099 ret = btrfs_csum_file_blocks(trans, log_root, sums);
4100 list_del(&sums->list);
4107 static int log_one_extent(struct btrfs_trans_handle *trans,
4108 struct btrfs_inode *inode, struct btrfs_root *root,
4109 const struct extent_map *em,
4110 struct btrfs_path *path,
4111 struct btrfs_log_ctx *ctx)
4113 struct btrfs_root *log = root->log_root;
4114 struct btrfs_file_extent_item *fi;
4115 struct extent_buffer *leaf;
4116 struct btrfs_map_token token;
4117 struct btrfs_key key;
4118 u64 extent_offset = em->start - em->orig_start;
4121 int extent_inserted = 0;
4123 ret = log_extent_csums(trans, inode, log, em);
4127 btrfs_init_map_token(&token);
4129 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start,
4130 em->start + em->len, NULL, 0, 1,
4131 sizeof(*fi), &extent_inserted);
4135 if (!extent_inserted) {
4136 key.objectid = btrfs_ino(inode);
4137 key.type = BTRFS_EXTENT_DATA_KEY;
4138 key.offset = em->start;
4140 ret = btrfs_insert_empty_item(trans, log, path, &key,
4145 leaf = path->nodes[0];
4146 fi = btrfs_item_ptr(leaf, path->slots[0],
4147 struct btrfs_file_extent_item);
4149 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
4151 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
4152 btrfs_set_token_file_extent_type(leaf, fi,
4153 BTRFS_FILE_EXTENT_PREALLOC,
4156 btrfs_set_token_file_extent_type(leaf, fi,
4157 BTRFS_FILE_EXTENT_REG,
4160 block_len = max(em->block_len, em->orig_block_len);
4161 if (em->compress_type != BTRFS_COMPRESS_NONE) {
4162 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4165 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4167 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
4168 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
4170 extent_offset, &token);
4171 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
4174 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
4175 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
4179 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
4180 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
4181 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
4182 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
4184 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
4185 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
4186 btrfs_mark_buffer_dirty(leaf);
4188 btrfs_release_path(path);
4194 * Log all prealloc extents beyond the inode's i_size to make sure we do not
4195 * lose them after doing a fast fsync and replaying the log. We scan the
4196 * subvolume's root instead of iterating the inode's extent map tree because
4197 * otherwise we can log incorrect extent items based on extent map conversion.
4198 * That can happen due to the fact that extent maps are merged when they
4199 * are not in the extent map tree's list of modified extents.
4201 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
4202 struct btrfs_inode *inode,
4203 struct btrfs_path *path)
4205 struct btrfs_root *root = inode->root;
4206 struct btrfs_key key;
4207 const u64 i_size = i_size_read(&inode->vfs_inode);
4208 const u64 ino = btrfs_ino(inode);
4209 struct btrfs_path *dst_path = NULL;
4210 u64 last_extent = (u64)-1;
4215 if (!(inode->flags & BTRFS_INODE_PREALLOC))
4219 key.type = BTRFS_EXTENT_DATA_KEY;
4220 key.offset = i_size;
4221 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4226 struct extent_buffer *leaf = path->nodes[0];
4227 int slot = path->slots[0];
4229 if (slot >= btrfs_header_nritems(leaf)) {
4231 ret = copy_items(trans, inode, dst_path, path,
4232 &last_extent, start_slot,
4238 ret = btrfs_next_leaf(root, path);
4248 btrfs_item_key_to_cpu(leaf, &key, slot);
4249 if (key.objectid > ino)
4251 if (WARN_ON_ONCE(key.objectid < ino) ||
4252 key.type < BTRFS_EXTENT_DATA_KEY ||
4253 key.offset < i_size) {
4257 if (last_extent == (u64)-1) {
4258 last_extent = key.offset;
4260 * Avoid logging extent items logged in past fsync calls
4261 * and leading to duplicate keys in the log tree.
4264 ret = btrfs_truncate_inode_items(trans,
4268 BTRFS_EXTENT_DATA_KEY);
4269 } while (ret == -EAGAIN);
4278 dst_path = btrfs_alloc_path();
4286 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4287 start_slot, ins_nr, 1, 0);
4292 btrfs_release_path(path);
4293 btrfs_free_path(dst_path);
4297 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
4298 struct btrfs_root *root,
4299 struct btrfs_inode *inode,
4300 struct btrfs_path *path,
4301 struct btrfs_log_ctx *ctx,
4305 struct extent_map *em, *n;
4306 struct list_head extents;
4307 struct extent_map_tree *tree = &inode->extent_tree;
4308 u64 logged_start, logged_end;
4313 INIT_LIST_HEAD(&extents);
4315 down_write(&inode->dio_sem);
4316 write_lock(&tree->lock);
4317 test_gen = root->fs_info->last_trans_committed;
4318 logged_start = start;
4321 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
4322 list_del_init(&em->list);
4324 * Just an arbitrary number, this can be really CPU intensive
4325 * once we start getting a lot of extents, and really once we
4326 * have a bunch of extents we just want to commit since it will
4329 if (++num > 32768) {
4330 list_del_init(&tree->modified_extents);
4335 if (em->generation <= test_gen)
4338 /* We log prealloc extents beyond eof later. */
4339 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) &&
4340 em->start >= i_size_read(&inode->vfs_inode))
4343 if (em->start < logged_start)
4344 logged_start = em->start;
4345 if ((em->start + em->len - 1) > logged_end)
4346 logged_end = em->start + em->len - 1;
4348 /* Need a ref to keep it from getting evicted from cache */
4349 refcount_inc(&em->refs);
4350 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
4351 list_add_tail(&em->list, &extents);
4355 list_sort(NULL, &extents, extent_cmp);
4357 while (!list_empty(&extents)) {
4358 em = list_entry(extents.next, struct extent_map, list);
4360 list_del_init(&em->list);
4363 * If we had an error we just need to delete everybody from our
4367 clear_em_logging(tree, em);
4368 free_extent_map(em);
4372 write_unlock(&tree->lock);
4374 ret = log_one_extent(trans, inode, root, em, path, ctx);
4375 write_lock(&tree->lock);
4376 clear_em_logging(tree, em);
4377 free_extent_map(em);
4379 WARN_ON(!list_empty(&extents));
4380 write_unlock(&tree->lock);
4381 up_write(&inode->dio_sem);
4383 btrfs_release_path(path);
4385 ret = btrfs_log_prealloc_extents(trans, inode, path);
4390 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
4391 struct btrfs_path *path, u64 *size_ret)
4393 struct btrfs_key key;
4396 key.objectid = btrfs_ino(inode);
4397 key.type = BTRFS_INODE_ITEM_KEY;
4400 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
4403 } else if (ret > 0) {
4406 struct btrfs_inode_item *item;
4408 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
4409 struct btrfs_inode_item);
4410 *size_ret = btrfs_inode_size(path->nodes[0], item);
4413 btrfs_release_path(path);
4418 * At the moment we always log all xattrs. This is to figure out at log replay
4419 * time which xattrs must have their deletion replayed. If a xattr is missing
4420 * in the log tree and exists in the fs/subvol tree, we delete it. This is
4421 * because if a xattr is deleted, the inode is fsynced and a power failure
4422 * happens, causing the log to be replayed the next time the fs is mounted,
4423 * we want the xattr to not exist anymore (same behaviour as other filesystems
4424 * with a journal, ext3/4, xfs, f2fs, etc).
4426 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans,
4427 struct btrfs_root *root,
4428 struct btrfs_inode *inode,
4429 struct btrfs_path *path,
4430 struct btrfs_path *dst_path)
4433 struct btrfs_key key;
4434 const u64 ino = btrfs_ino(inode);
4439 key.type = BTRFS_XATTR_ITEM_KEY;
4442 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4447 int slot = path->slots[0];
4448 struct extent_buffer *leaf = path->nodes[0];
4449 int nritems = btrfs_header_nritems(leaf);
4451 if (slot >= nritems) {
4453 u64 last_extent = 0;
4455 ret = copy_items(trans, inode, dst_path, path,
4456 &last_extent, start_slot,
4458 /* can't be 1, extent items aren't processed */
4464 ret = btrfs_next_leaf(root, path);
4472 btrfs_item_key_to_cpu(leaf, &key, slot);
4473 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY)
4483 u64 last_extent = 0;
4485 ret = copy_items(trans, inode, dst_path, path,
4486 &last_extent, start_slot,
4488 /* can't be 1, extent items aren't processed */
4498 * If the no holes feature is enabled we need to make sure any hole between the
4499 * last extent and the i_size of our inode is explicitly marked in the log. This
4500 * is to make sure that doing something like:
4502 * 1) create file with 128Kb of data
4503 * 2) truncate file to 64Kb
4504 * 3) truncate file to 256Kb
4506 * 5) <crash/power failure>
4507 * 6) mount fs and trigger log replay
4509 * Will give us a file with a size of 256Kb, the first 64Kb of data match what
4510 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the
4511 * file correspond to a hole. The presence of explicit holes in a log tree is
4512 * what guarantees that log replay will remove/adjust file extent items in the
4515 * Here we do not need to care about holes between extents, that is already done
4516 * by copy_items(). We also only need to do this in the full sync path, where we
4517 * lookup for extents from the fs/subvol tree only. In the fast path case, we
4518 * lookup the list of modified extent maps and if any represents a hole, we
4519 * insert a corresponding extent representing a hole in the log tree.
4521 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
4522 struct btrfs_root *root,
4523 struct btrfs_inode *inode,
4524 struct btrfs_path *path)
4526 struct btrfs_fs_info *fs_info = root->fs_info;
4528 struct btrfs_key key;
4531 struct extent_buffer *leaf;
4532 struct btrfs_root *log = root->log_root;
4533 const u64 ino = btrfs_ino(inode);
4534 const u64 i_size = i_size_read(&inode->vfs_inode);
4536 if (!btrfs_fs_incompat(fs_info, NO_HOLES))
4540 key.type = BTRFS_EXTENT_DATA_KEY;
4541 key.offset = (u64)-1;
4543 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4548 ASSERT(path->slots[0] > 0);
4550 leaf = path->nodes[0];
4551 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4553 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) {
4554 /* inode does not have any extents */
4558 struct btrfs_file_extent_item *extent;
4562 * If there's an extent beyond i_size, an explicit hole was
4563 * already inserted by copy_items().
4565 if (key.offset >= i_size)
4568 extent = btrfs_item_ptr(leaf, path->slots[0],
4569 struct btrfs_file_extent_item);
4571 if (btrfs_file_extent_type(leaf, extent) ==
4572 BTRFS_FILE_EXTENT_INLINE) {
4573 len = btrfs_file_extent_ram_bytes(leaf, extent);
4574 ASSERT(len == i_size ||
4575 (len == fs_info->sectorsize &&
4576 btrfs_file_extent_compression(leaf, extent) !=
4577 BTRFS_COMPRESS_NONE));
4581 len = btrfs_file_extent_num_bytes(leaf, extent);
4582 /* Last extent goes beyond i_size, no need to log a hole. */
4583 if (key.offset + len > i_size)
4585 hole_start = key.offset + len;
4586 hole_size = i_size - hole_start;
4588 btrfs_release_path(path);
4590 /* Last extent ends at i_size. */
4594 hole_size = ALIGN(hole_size, fs_info->sectorsize);
4595 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0,
4596 hole_size, 0, hole_size, 0, 0, 0);
4601 * When we are logging a new inode X, check if it doesn't have a reference that
4602 * matches the reference from some other inode Y created in a past transaction
4603 * and that was renamed in the current transaction. If we don't do this, then at
4604 * log replay time we can lose inode Y (and all its files if it's a directory):
4607 * echo "hello world" > /mnt/x/foobar
4610 * mkdir /mnt/x # or touch /mnt/x
4611 * xfs_io -c fsync /mnt/x
4613 * mount fs, trigger log replay
4615 * After the log replay procedure, we would lose the first directory and all its
4616 * files (file foobar).
4617 * For the case where inode Y is not a directory we simply end up losing it:
4619 * echo "123" > /mnt/foo
4621 * mv /mnt/foo /mnt/bar
4622 * echo "abc" > /mnt/foo
4623 * xfs_io -c fsync /mnt/foo
4626 * We also need this for cases where a snapshot entry is replaced by some other
4627 * entry (file or directory) otherwise we end up with an unreplayable log due to
4628 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as
4629 * if it were a regular entry:
4632 * btrfs subvolume snapshot /mnt /mnt/x/snap
4633 * btrfs subvolume delete /mnt/x/snap
4636 * fsync /mnt/x or fsync some new file inside it
4639 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in
4640 * the same transaction.
4642 static int btrfs_check_ref_name_override(struct extent_buffer *eb,
4644 const struct btrfs_key *key,
4645 struct btrfs_inode *inode,
4649 struct btrfs_path *search_path;
4652 u32 item_size = btrfs_item_size_nr(eb, slot);
4654 unsigned long ptr = btrfs_item_ptr_offset(eb, slot);
4656 search_path = btrfs_alloc_path();
4659 search_path->search_commit_root = 1;
4660 search_path->skip_locking = 1;
4662 while (cur_offset < item_size) {
4666 unsigned long name_ptr;
4667 struct btrfs_dir_item *di;
4669 if (key->type == BTRFS_INODE_REF_KEY) {
4670 struct btrfs_inode_ref *iref;
4672 iref = (struct btrfs_inode_ref *)(ptr + cur_offset);
4673 parent = key->offset;
4674 this_name_len = btrfs_inode_ref_name_len(eb, iref);
4675 name_ptr = (unsigned long)(iref + 1);
4676 this_len = sizeof(*iref) + this_name_len;
4678 struct btrfs_inode_extref *extref;
4680 extref = (struct btrfs_inode_extref *)(ptr +
4682 parent = btrfs_inode_extref_parent(eb, extref);
4683 this_name_len = btrfs_inode_extref_name_len(eb, extref);
4684 name_ptr = (unsigned long)&extref->name;
4685 this_len = sizeof(*extref) + this_name_len;
4688 if (this_name_len > name_len) {
4691 new_name = krealloc(name, this_name_len, GFP_NOFS);
4696 name_len = this_name_len;
4700 read_extent_buffer(eb, name, name_ptr, this_name_len);
4701 di = btrfs_lookup_dir_item(NULL, inode->root, search_path,
4702 parent, name, this_name_len, 0);
4703 if (di && !IS_ERR(di)) {
4704 struct btrfs_key di_key;
4706 btrfs_dir_item_key_to_cpu(search_path->nodes[0],
4708 if (di_key.type == BTRFS_INODE_ITEM_KEY) {
4710 *other_ino = di_key.objectid;
4715 } else if (IS_ERR(di)) {
4719 btrfs_release_path(search_path);
4721 cur_offset += this_len;
4725 btrfs_free_path(search_path);
4730 /* log a single inode in the tree log.
4731 * At least one parent directory for this inode must exist in the tree
4732 * or be logged already.
4734 * Any items from this inode changed by the current transaction are copied
4735 * to the log tree. An extra reference is taken on any extents in this
4736 * file, allowing us to avoid a whole pile of corner cases around logging
4737 * blocks that have been removed from the tree.
4739 * See LOG_INODE_ALL and related defines for a description of what inode_only
4742 * This handles both files and directories.
4744 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
4745 struct btrfs_root *root, struct btrfs_inode *inode,
4749 struct btrfs_log_ctx *ctx)
4751 struct btrfs_fs_info *fs_info = root->fs_info;
4752 struct btrfs_path *path;
4753 struct btrfs_path *dst_path;
4754 struct btrfs_key min_key;
4755 struct btrfs_key max_key;
4756 struct btrfs_root *log = root->log_root;
4757 u64 last_extent = 0;
4761 int ins_start_slot = 0;
4763 bool fast_search = false;
4764 u64 ino = btrfs_ino(inode);
4765 struct extent_map_tree *em_tree = &inode->extent_tree;
4766 u64 logged_isize = 0;
4767 bool need_log_inode_item = true;
4768 bool xattrs_logged = false;
4770 path = btrfs_alloc_path();
4773 dst_path = btrfs_alloc_path();
4775 btrfs_free_path(path);
4779 min_key.objectid = ino;
4780 min_key.type = BTRFS_INODE_ITEM_KEY;
4783 max_key.objectid = ino;
4786 /* today the code can only do partial logging of directories */
4787 if (S_ISDIR(inode->vfs_inode.i_mode) ||
4788 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4789 &inode->runtime_flags) &&
4790 inode_only >= LOG_INODE_EXISTS))
4791 max_key.type = BTRFS_XATTR_ITEM_KEY;
4793 max_key.type = (u8)-1;
4794 max_key.offset = (u64)-1;
4797 * Only run delayed items if we are a dir or a new file.
4798 * Otherwise commit the delayed inode only, which is needed in
4799 * order for the log replay code to mark inodes for link count
4800 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4802 if (S_ISDIR(inode->vfs_inode.i_mode) ||
4803 inode->generation > fs_info->last_trans_committed)
4804 ret = btrfs_commit_inode_delayed_items(trans, inode);
4806 ret = btrfs_commit_inode_delayed_inode(inode);
4809 btrfs_free_path(path);
4810 btrfs_free_path(dst_path);
4814 if (inode_only == LOG_OTHER_INODE) {
4815 inode_only = LOG_INODE_EXISTS;
4816 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING);
4818 mutex_lock(&inode->log_mutex);
4822 * a brute force approach to making sure we get the most uptodate
4823 * copies of everything.
4825 if (S_ISDIR(inode->vfs_inode.i_mode)) {
4826 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4828 if (inode_only == LOG_INODE_EXISTS)
4829 max_key_type = BTRFS_XATTR_ITEM_KEY;
4830 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
4832 if (inode_only == LOG_INODE_EXISTS) {
4834 * Make sure the new inode item we write to the log has
4835 * the same isize as the current one (if it exists).
4836 * This is necessary to prevent data loss after log
4837 * replay, and also to prevent doing a wrong expanding
4838 * truncate - for e.g. create file, write 4K into offset
4839 * 0, fsync, write 4K into offset 4096, add hard link,
4840 * fsync some other file (to sync log), power fail - if
4841 * we use the inode's current i_size, after log replay
4842 * we get a 8Kb file, with the last 4Kb extent as a hole
4843 * (zeroes), as if an expanding truncate happened,
4844 * instead of getting a file of 4Kb only.
4846 err = logged_inode_size(log, inode, path, &logged_isize);
4850 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4851 &inode->runtime_flags)) {
4852 if (inode_only == LOG_INODE_EXISTS) {
4853 max_key.type = BTRFS_XATTR_ITEM_KEY;
4854 ret = drop_objectid_items(trans, log, path, ino,
4857 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4858 &inode->runtime_flags);
4859 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4860 &inode->runtime_flags);
4862 ret = btrfs_truncate_inode_items(trans,
4863 log, &inode->vfs_inode, 0, 0);
4868 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4869 &inode->runtime_flags) ||
4870 inode_only == LOG_INODE_EXISTS) {
4871 if (inode_only == LOG_INODE_ALL)
4873 max_key.type = BTRFS_XATTR_ITEM_KEY;
4874 ret = drop_objectid_items(trans, log, path, ino,
4877 if (inode_only == LOG_INODE_ALL)
4890 ret = btrfs_search_forward(root, &min_key,
4891 path, trans->transid);
4899 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4900 if (min_key.objectid != ino)
4902 if (min_key.type > max_key.type)
4905 if (min_key.type == BTRFS_INODE_ITEM_KEY)
4906 need_log_inode_item = false;
4908 if ((min_key.type == BTRFS_INODE_REF_KEY ||
4909 min_key.type == BTRFS_INODE_EXTREF_KEY) &&
4910 inode->generation == trans->transid) {
4913 ret = btrfs_check_ref_name_override(path->nodes[0],
4914 path->slots[0], &min_key, inode,
4919 } else if (ret > 0 && ctx &&
4920 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
4921 struct btrfs_key inode_key;
4922 struct inode *other_inode;
4928 ins_start_slot = path->slots[0];
4930 ret = copy_items(trans, inode, dst_path, path,
4931 &last_extent, ins_start_slot,
4939 btrfs_release_path(path);
4940 inode_key.objectid = other_ino;
4941 inode_key.type = BTRFS_INODE_ITEM_KEY;
4942 inode_key.offset = 0;
4943 other_inode = btrfs_iget(fs_info->sb,
4947 * If the other inode that had a conflicting dir
4948 * entry was deleted in the current transaction,
4949 * we don't need to do more work nor fallback to
4950 * a transaction commit.
4952 if (IS_ERR(other_inode) &&
4953 PTR_ERR(other_inode) == -ENOENT) {
4955 } else if (IS_ERR(other_inode)) {
4956 err = PTR_ERR(other_inode);
4960 * We are safe logging the other inode without
4961 * acquiring its i_mutex as long as we log with
4962 * the LOG_INODE_EXISTS mode. We're safe against
4963 * concurrent renames of the other inode as well
4964 * because during a rename we pin the log and
4965 * update the log with the new name before we
4968 err = btrfs_log_inode(trans, root,
4969 BTRFS_I(other_inode),
4970 LOG_OTHER_INODE, 0, LLONG_MAX,
4980 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
4981 if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
4984 ret = copy_items(trans, inode, dst_path, path,
4985 &last_extent, ins_start_slot,
4986 ins_nr, inode_only, logged_isize);
4993 btrfs_release_path(path);
4999 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5002 } else if (!ins_nr) {
5003 ins_start_slot = path->slots[0];
5008 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5009 ins_start_slot, ins_nr, inode_only,
5017 btrfs_release_path(path);
5021 ins_start_slot = path->slots[0];
5024 nritems = btrfs_header_nritems(path->nodes[0]);
5026 if (path->slots[0] < nritems) {
5027 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
5032 ret = copy_items(trans, inode, dst_path, path,
5033 &last_extent, ins_start_slot,
5034 ins_nr, inode_only, logged_isize);
5042 btrfs_release_path(path);
5044 if (min_key.offset < (u64)-1) {
5046 } else if (min_key.type < max_key.type) {
5054 ret = copy_items(trans, inode, dst_path, path, &last_extent,
5055 ins_start_slot, ins_nr, inode_only,
5065 btrfs_release_path(path);
5066 btrfs_release_path(dst_path);
5067 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
5070 xattrs_logged = true;
5071 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
5072 btrfs_release_path(path);
5073 btrfs_release_path(dst_path);
5074 err = btrfs_log_trailing_hole(trans, root, inode, path);
5079 btrfs_release_path(path);
5080 btrfs_release_path(dst_path);
5081 if (need_log_inode_item) {
5082 err = log_inode_item(trans, log, dst_path, inode);
5083 if (!err && !xattrs_logged) {
5084 err = btrfs_log_all_xattrs(trans, root, inode, path,
5086 btrfs_release_path(path);
5092 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
5098 } else if (inode_only == LOG_INODE_ALL) {
5099 struct extent_map *em, *n;
5101 write_lock(&em_tree->lock);
5103 * We can't just remove every em if we're called for a ranged
5104 * fsync - that is, one that doesn't cover the whole possible
5105 * file range (0 to LLONG_MAX). This is because we can have
5106 * em's that fall outside the range we're logging and therefore
5107 * their ordered operations haven't completed yet
5108 * (btrfs_finish_ordered_io() not invoked yet). This means we
5109 * didn't get their respective file extent item in the fs/subvol
5110 * tree yet, and need to let the next fast fsync (one which
5111 * consults the list of modified extent maps) find the em so
5112 * that it logs a matching file extent item and waits for the
5113 * respective ordered operation to complete (if it's still
5116 * Removing every em outside the range we're logging would make
5117 * the next fast fsync not log their matching file extent items,
5118 * therefore making us lose data after a log replay.
5120 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
5122 const u64 mod_end = em->mod_start + em->mod_len - 1;
5124 if (em->mod_start >= start && mod_end <= end)
5125 list_del_init(&em->list);
5127 write_unlock(&em_tree->lock);
5130 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) {
5131 ret = log_directory_changes(trans, root, inode, path, dst_path,
5139 spin_lock(&inode->lock);
5140 inode->logged_trans = trans->transid;
5141 inode->last_log_commit = inode->last_sub_trans;
5142 spin_unlock(&inode->lock);
5144 mutex_unlock(&inode->log_mutex);
5146 btrfs_free_path(path);
5147 btrfs_free_path(dst_path);
5152 * Check if we must fallback to a transaction commit when logging an inode.
5153 * This must be called after logging the inode and is used only in the context
5154 * when fsyncing an inode requires the need to log some other inode - in which
5155 * case we can't lock the i_mutex of each other inode we need to log as that
5156 * can lead to deadlocks with concurrent fsync against other inodes (as we can
5157 * log inodes up or down in the hierarchy) or rename operations for example. So
5158 * we take the log_mutex of the inode after we have logged it and then check for
5159 * its last_unlink_trans value - this is safe because any task setting
5160 * last_unlink_trans must take the log_mutex and it must do this before it does
5161 * the actual unlink operation, so if we do this check before a concurrent task
5162 * sets last_unlink_trans it means we've logged a consistent version/state of
5163 * all the inode items, otherwise we are not sure and must do a transaction
5164 * commit (the concurrent task might have only updated last_unlink_trans before
5165 * we logged the inode or it might have also done the unlink).
5167 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans,
5168 struct btrfs_inode *inode)
5170 struct btrfs_fs_info *fs_info = inode->root->fs_info;
5173 mutex_lock(&inode->log_mutex);
5174 if (inode->last_unlink_trans > fs_info->last_trans_committed) {
5176 * Make sure any commits to the log are forced to be full
5179 btrfs_set_log_full_commit(fs_info, trans);
5182 mutex_unlock(&inode->log_mutex);
5188 * follow the dentry parent pointers up the chain and see if any
5189 * of the directories in it require a full commit before they can
5190 * be logged. Returns zero if nothing special needs to be done or 1 if
5191 * a full commit is required.
5193 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
5194 struct btrfs_inode *inode,
5195 struct dentry *parent,
5196 struct super_block *sb,
5200 struct dentry *old_parent = NULL;
5201 struct btrfs_inode *orig_inode = inode;
5204 * for regular files, if its inode is already on disk, we don't
5205 * have to worry about the parents at all. This is because
5206 * we can use the last_unlink_trans field to record renames
5207 * and other fun in this file.
5209 if (S_ISREG(inode->vfs_inode.i_mode) &&
5210 inode->generation <= last_committed &&
5211 inode->last_unlink_trans <= last_committed)
5214 if (!S_ISDIR(inode->vfs_inode.i_mode)) {
5215 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5217 inode = BTRFS_I(d_inode(parent));
5222 * If we are logging a directory then we start with our inode,
5223 * not our parent's inode, so we need to skip setting the
5224 * logged_trans so that further down in the log code we don't
5225 * think this inode has already been logged.
5227 if (inode != orig_inode)
5228 inode->logged_trans = trans->transid;
5231 if (btrfs_must_commit_transaction(trans, inode)) {
5236 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5239 if (IS_ROOT(parent)) {
5240 inode = BTRFS_I(d_inode(parent));
5241 if (btrfs_must_commit_transaction(trans, inode))
5246 parent = dget_parent(parent);
5248 old_parent = parent;
5249 inode = BTRFS_I(d_inode(parent));
5257 struct btrfs_dir_list {
5259 struct list_head list;
5263 * Log the inodes of the new dentries of a directory. See log_dir_items() for
5264 * details about the why it is needed.
5265 * This is a recursive operation - if an existing dentry corresponds to a
5266 * directory, that directory's new entries are logged too (same behaviour as
5267 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes
5268 * the dentries point to we do not lock their i_mutex, otherwise lockdep
5269 * complains about the following circular lock dependency / possible deadlock:
5273 * lock(&type->i_mutex_dir_key#3/2);
5274 * lock(sb_internal#2);
5275 * lock(&type->i_mutex_dir_key#3/2);
5276 * lock(&sb->s_type->i_mutex_key#14);
5278 * Where sb_internal is the lock (a counter that works as a lock) acquired by
5279 * sb_start_intwrite() in btrfs_start_transaction().
5280 * Not locking i_mutex of the inodes is still safe because:
5282 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible
5283 * that while logging the inode new references (names) are added or removed
5284 * from the inode, leaving the logged inode item with a link count that does
5285 * not match the number of logged inode reference items. This is fine because
5286 * at log replay time we compute the real number of links and correct the
5287 * link count in the inode item (see replay_one_buffer() and
5288 * link_to_fixup_dir());
5290 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that
5291 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and
5292 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item
5293 * has a size that doesn't match the sum of the lengths of all the logged
5294 * names. This does not result in a problem because if a dir_item key is
5295 * logged but its matching dir_index key is not logged, at log replay time we
5296 * don't use it to replay the respective name (see replay_one_name()). On the
5297 * other hand if only the dir_index key ends up being logged, the respective
5298 * name is added to the fs/subvol tree with both the dir_item and dir_index
5299 * keys created (see replay_one_name()).
5300 * The directory's inode item with a wrong i_size is not a problem as well,
5301 * since we don't use it at log replay time to set the i_size in the inode
5302 * item of the fs/subvol tree (see overwrite_item()).
5304 static int log_new_dir_dentries(struct btrfs_trans_handle *trans,
5305 struct btrfs_root *root,
5306 struct btrfs_inode *start_inode,
5307 struct btrfs_log_ctx *ctx)
5309 struct btrfs_fs_info *fs_info = root->fs_info;
5310 struct btrfs_root *log = root->log_root;
5311 struct btrfs_path *path;
5312 LIST_HEAD(dir_list);
5313 struct btrfs_dir_list *dir_elem;
5316 path = btrfs_alloc_path();
5320 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS);
5322 btrfs_free_path(path);
5325 dir_elem->ino = btrfs_ino(start_inode);
5326 list_add_tail(&dir_elem->list, &dir_list);
5328 while (!list_empty(&dir_list)) {
5329 struct extent_buffer *leaf;
5330 struct btrfs_key min_key;
5334 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list,
5337 goto next_dir_inode;
5339 min_key.objectid = dir_elem->ino;
5340 min_key.type = BTRFS_DIR_ITEM_KEY;
5343 btrfs_release_path(path);
5344 ret = btrfs_search_forward(log, &min_key, path, trans->transid);
5346 goto next_dir_inode;
5347 } else if (ret > 0) {
5349 goto next_dir_inode;
5353 leaf = path->nodes[0];
5354 nritems = btrfs_header_nritems(leaf);
5355 for (i = path->slots[0]; i < nritems; i++) {
5356 struct btrfs_dir_item *di;
5357 struct btrfs_key di_key;
5358 struct inode *di_inode;
5359 struct btrfs_dir_list *new_dir_elem;
5360 int log_mode = LOG_INODE_EXISTS;
5363 btrfs_item_key_to_cpu(leaf, &min_key, i);
5364 if (min_key.objectid != dir_elem->ino ||
5365 min_key.type != BTRFS_DIR_ITEM_KEY)
5366 goto next_dir_inode;
5368 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item);
5369 type = btrfs_dir_type(leaf, di);
5370 if (btrfs_dir_transid(leaf, di) < trans->transid &&
5371 type != BTRFS_FT_DIR)
5373 btrfs_dir_item_key_to_cpu(leaf, di, &di_key);
5374 if (di_key.type == BTRFS_ROOT_ITEM_KEY)
5377 btrfs_release_path(path);
5378 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL);
5379 if (IS_ERR(di_inode)) {
5380 ret = PTR_ERR(di_inode);
5381 goto next_dir_inode;
5384 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5389 ctx->log_new_dentries = false;
5390 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK)
5391 log_mode = LOG_INODE_ALL;
5392 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode),
5393 log_mode, 0, LLONG_MAX, ctx);
5395 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5399 goto next_dir_inode;
5400 if (ctx->log_new_dentries) {
5401 new_dir_elem = kmalloc(sizeof(*new_dir_elem),
5403 if (!new_dir_elem) {
5405 goto next_dir_inode;
5407 new_dir_elem->ino = di_key.objectid;
5408 list_add_tail(&new_dir_elem->list, &dir_list);
5413 ret = btrfs_next_leaf(log, path);
5415 goto next_dir_inode;
5416 } else if (ret > 0) {
5418 goto next_dir_inode;
5422 if (min_key.offset < (u64)-1) {
5427 list_del(&dir_elem->list);
5431 btrfs_free_path(path);
5435 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5436 struct btrfs_inode *inode,
5437 struct btrfs_log_ctx *ctx)
5439 struct btrfs_fs_info *fs_info = trans->fs_info;
5441 struct btrfs_path *path;
5442 struct btrfs_key key;
5443 struct btrfs_root *root = inode->root;
5444 const u64 ino = btrfs_ino(inode);
5446 path = btrfs_alloc_path();
5449 path->skip_locking = 1;
5450 path->search_commit_root = 1;
5453 key.type = BTRFS_INODE_REF_KEY;
5455 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5460 struct extent_buffer *leaf = path->nodes[0];
5461 int slot = path->slots[0];
5466 if (slot >= btrfs_header_nritems(leaf)) {
5467 ret = btrfs_next_leaf(root, path);
5475 btrfs_item_key_to_cpu(leaf, &key, slot);
5476 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */
5477 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY)
5480 item_size = btrfs_item_size_nr(leaf, slot);
5481 ptr = btrfs_item_ptr_offset(leaf, slot);
5482 while (cur_offset < item_size) {
5483 struct btrfs_key inode_key;
5484 struct inode *dir_inode;
5486 inode_key.type = BTRFS_INODE_ITEM_KEY;
5487 inode_key.offset = 0;
5489 if (key.type == BTRFS_INODE_EXTREF_KEY) {
5490 struct btrfs_inode_extref *extref;
5492 extref = (struct btrfs_inode_extref *)
5494 inode_key.objectid = btrfs_inode_extref_parent(
5496 cur_offset += sizeof(*extref);
5497 cur_offset += btrfs_inode_extref_name_len(leaf,
5500 inode_key.objectid = key.offset;
5501 cur_offset = item_size;
5504 dir_inode = btrfs_iget(fs_info->sb, &inode_key,
5506 /* If parent inode was deleted, skip it. */
5507 if (IS_ERR(dir_inode))
5511 ctx->log_new_dentries = false;
5512 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode),
5513 LOG_INODE_ALL, 0, LLONG_MAX, ctx);
5515 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode)))
5517 if (!ret && ctx && ctx->log_new_dentries)
5518 ret = log_new_dir_dentries(trans, root,
5519 BTRFS_I(dir_inode), ctx);
5528 btrfs_free_path(path);
5533 * helper function around btrfs_log_inode to make sure newly created
5534 * parent directories also end up in the log. A minimal inode and backref
5535 * only logging is done of any parent directories that are older than
5536 * the last committed transaction
5538 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
5539 struct btrfs_inode *inode,
5540 struct dentry *parent,
5544 struct btrfs_log_ctx *ctx)
5546 struct btrfs_root *root = inode->root;
5547 struct btrfs_fs_info *fs_info = root->fs_info;
5548 struct super_block *sb;
5549 struct dentry *old_parent = NULL;
5551 u64 last_committed = fs_info->last_trans_committed;
5552 bool log_dentries = false;
5553 struct btrfs_inode *orig_inode = inode;
5555 sb = inode->vfs_inode.i_sb;
5557 if (btrfs_test_opt(fs_info, NOTREELOG)) {
5563 * The prev transaction commit doesn't complete, we need do
5564 * full commit by ourselves.
5566 if (fs_info->last_trans_log_full_commit >
5567 fs_info->last_trans_committed) {
5572 if (btrfs_root_refs(&root->root_item) == 0) {
5577 ret = check_parent_dirs_for_sync(trans, inode, parent, sb,
5582 if (btrfs_inode_in_log(inode, trans->transid)) {
5583 ret = BTRFS_NO_LOG_SYNC;
5587 ret = start_log_trans(trans, root, ctx);
5591 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
5596 * for regular files, if its inode is already on disk, we don't
5597 * have to worry about the parents at all. This is because
5598 * we can use the last_unlink_trans field to record renames
5599 * and other fun in this file.
5601 if (S_ISREG(inode->vfs_inode.i_mode) &&
5602 inode->generation <= last_committed &&
5603 inode->last_unlink_trans <= last_committed) {
5608 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries)
5609 log_dentries = true;
5612 * On unlink we must make sure all our current and old parent directory
5613 * inodes are fully logged. This is to prevent leaving dangling
5614 * directory index entries in directories that were our parents but are
5615 * not anymore. Not doing this results in old parent directory being
5616 * impossible to delete after log replay (rmdir will always fail with
5617 * error -ENOTEMPTY).
5623 * ln testdir/foo testdir/bar
5625 * unlink testdir/bar
5626 * xfs_io -c fsync testdir/foo
5628 * mount fs, triggers log replay
5630 * If we don't log the parent directory (testdir), after log replay the
5631 * directory still has an entry pointing to the file inode using the bar
5632 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and
5633 * the file inode has a link count of 1.
5639 * ln foo testdir/foo2
5640 * ln foo testdir/foo3
5642 * unlink testdir/foo3
5643 * xfs_io -c fsync foo
5645 * mount fs, triggers log replay
5647 * Similar as the first example, after log replay the parent directory
5648 * testdir still has an entry pointing to the inode file with name foo3
5649 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item
5650 * and has a link count of 2.
5652 if (inode->last_unlink_trans > last_committed) {
5653 ret = btrfs_log_all_parents(trans, orig_inode, ctx);
5659 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
5662 inode = BTRFS_I(d_inode(parent));
5663 if (root != inode->root)
5666 if (inode->generation > last_committed) {
5667 ret = btrfs_log_inode(trans, root, inode,
5668 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx);
5672 if (IS_ROOT(parent))
5675 parent = dget_parent(parent);
5677 old_parent = parent;
5680 ret = log_new_dir_dentries(trans, root, orig_inode, ctx);
5686 btrfs_set_log_full_commit(fs_info, trans);
5691 btrfs_remove_log_ctx(root, ctx);
5692 btrfs_end_log_trans(root);
5698 * it is not safe to log dentry if the chunk root has added new
5699 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
5700 * If this returns 1, you must commit the transaction to safely get your
5703 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
5704 struct dentry *dentry,
5707 struct btrfs_log_ctx *ctx)
5709 struct dentry *parent = dget_parent(dentry);
5712 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent,
5713 start, end, LOG_INODE_ALL, ctx);
5720 * should be called during mount to recover any replay any log trees
5723 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
5726 struct btrfs_path *path;
5727 struct btrfs_trans_handle *trans;
5728 struct btrfs_key key;
5729 struct btrfs_key found_key;
5730 struct btrfs_key tmp_key;
5731 struct btrfs_root *log;
5732 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
5733 struct walk_control wc = {
5734 .process_func = process_one_buffer,
5738 path = btrfs_alloc_path();
5742 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5744 trans = btrfs_start_transaction(fs_info->tree_root, 0);
5745 if (IS_ERR(trans)) {
5746 ret = PTR_ERR(trans);
5753 ret = walk_log_tree(trans, log_root_tree, &wc);
5755 btrfs_handle_fs_error(fs_info, ret,
5756 "Failed to pin buffers while recovering log root tree.");
5761 key.objectid = BTRFS_TREE_LOG_OBJECTID;
5762 key.offset = (u64)-1;
5763 key.type = BTRFS_ROOT_ITEM_KEY;
5766 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
5769 btrfs_handle_fs_error(fs_info, ret,
5770 "Couldn't find tree log root.");
5774 if (path->slots[0] == 0)
5778 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
5780 btrfs_release_path(path);
5781 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
5784 log = btrfs_read_fs_root(log_root_tree, &found_key);
5787 btrfs_handle_fs_error(fs_info, ret,
5788 "Couldn't read tree log root.");
5792 tmp_key.objectid = found_key.offset;
5793 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
5794 tmp_key.offset = (u64)-1;
5796 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
5797 if (IS_ERR(wc.replay_dest)) {
5798 ret = PTR_ERR(wc.replay_dest);
5799 free_extent_buffer(log->node);
5800 free_extent_buffer(log->commit_root);
5802 btrfs_handle_fs_error(fs_info, ret,
5803 "Couldn't read target root for tree log recovery.");
5807 wc.replay_dest->log_root = log;
5808 btrfs_record_root_in_trans(trans, wc.replay_dest);
5809 ret = walk_log_tree(trans, log, &wc);
5811 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5812 ret = fixup_inode_link_counts(trans, wc.replay_dest,
5816 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
5817 struct btrfs_root *root = wc.replay_dest;
5819 btrfs_release_path(path);
5822 * We have just replayed everything, and the highest
5823 * objectid of fs roots probably has changed in case
5824 * some inode_item's got replayed.
5826 * root->objectid_mutex is not acquired as log replay
5827 * could only happen during mount.
5829 ret = btrfs_find_highest_objectid(root,
5830 &root->highest_objectid);
5833 key.offset = found_key.offset - 1;
5834 wc.replay_dest->log_root = NULL;
5835 free_extent_buffer(log->node);
5836 free_extent_buffer(log->commit_root);
5842 if (found_key.offset == 0)
5845 btrfs_release_path(path);
5847 /* step one is to pin it all, step two is to replay just inodes */
5850 wc.process_func = replay_one_buffer;
5851 wc.stage = LOG_WALK_REPLAY_INODES;
5854 /* step three is to replay everything */
5855 if (wc.stage < LOG_WALK_REPLAY_ALL) {
5860 btrfs_free_path(path);
5862 /* step 4: commit the transaction, which also unpins the blocks */
5863 ret = btrfs_commit_transaction(trans);
5867 free_extent_buffer(log_root_tree->node);
5868 log_root_tree->log_root = NULL;
5869 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags);
5870 kfree(log_root_tree);
5875 btrfs_end_transaction(wc.trans);
5876 btrfs_free_path(path);
5881 * there are some corner cases where we want to force a full
5882 * commit instead of allowing a directory to be logged.
5884 * They revolve around files there were unlinked from the directory, and
5885 * this function updates the parent directory so that a full commit is
5886 * properly done if it is fsync'd later after the unlinks are done.
5888 * Must be called before the unlink operations (updates to the subvolume tree,
5889 * inodes, etc) are done.
5891 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
5892 struct btrfs_inode *dir, struct btrfs_inode *inode,
5896 * when we're logging a file, if it hasn't been renamed
5897 * or unlinked, and its inode is fully committed on disk,
5898 * we don't have to worry about walking up the directory chain
5899 * to log its parents.
5901 * So, we use the last_unlink_trans field to put this transid
5902 * into the file. When the file is logged we check it and
5903 * don't log the parents if the file is fully on disk.
5905 mutex_lock(&inode->log_mutex);
5906 inode->last_unlink_trans = trans->transid;
5907 mutex_unlock(&inode->log_mutex);
5910 * if this directory was already logged any new
5911 * names for this file/dir will get recorded
5914 if (dir->logged_trans == trans->transid)
5918 * if the inode we're about to unlink was logged,
5919 * the log will be properly updated for any new names
5921 if (inode->logged_trans == trans->transid)
5925 * when renaming files across directories, if the directory
5926 * there we're unlinking from gets fsync'd later on, there's
5927 * no way to find the destination directory later and fsync it
5928 * properly. So, we have to be conservative and force commits
5929 * so the new name gets discovered.
5934 /* we can safely do the unlink without any special recording */
5938 mutex_lock(&dir->log_mutex);
5939 dir->last_unlink_trans = trans->transid;
5940 mutex_unlock(&dir->log_mutex);
5944 * Make sure that if someone attempts to fsync the parent directory of a deleted
5945 * snapshot, it ends up triggering a transaction commit. This is to guarantee
5946 * that after replaying the log tree of the parent directory's root we will not
5947 * see the snapshot anymore and at log replay time we will not see any log tree
5948 * corresponding to the deleted snapshot's root, which could lead to replaying
5949 * it after replaying the log tree of the parent directory (which would replay
5950 * the snapshot delete operation).
5952 * Must be called before the actual snapshot destroy operation (updates to the
5953 * parent root and tree of tree roots trees, etc) are done.
5955 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans,
5956 struct btrfs_inode *dir)
5958 mutex_lock(&dir->log_mutex);
5959 dir->last_unlink_trans = trans->transid;
5960 mutex_unlock(&dir->log_mutex);
5964 * Call this after adding a new name for a file and it will properly
5965 * update the log to reflect the new name.
5967 * It will return zero if all goes well, and it will return 1 if a
5968 * full transaction commit is required.
5970 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
5971 struct btrfs_inode *inode, struct btrfs_inode *old_dir,
5972 struct dentry *parent)
5974 struct btrfs_fs_info *fs_info = trans->fs_info;
5977 * this will force the logging code to walk the dentry chain
5980 if (!S_ISDIR(inode->vfs_inode.i_mode))
5981 inode->last_unlink_trans = trans->transid;
5984 * if this inode hasn't been logged and directory we're renaming it
5985 * from hasn't been logged, we don't need to log it
5987 if (inode->logged_trans <= fs_info->last_trans_committed &&
5988 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed))
5991 return btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX,
5992 LOG_INODE_EXISTS, NULL);