2 * Copyright (C) 2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/blkdev.h>
22 #include <linux/list_sort.h>
26 #include "print-tree.h"
30 /* magic values for the inode_only field in btrfs_log_inode:
32 * LOG_INODE_ALL means to log everything
33 * LOG_INODE_EXISTS means to log just enough to recreate the inode
36 #define LOG_INODE_ALL 0
37 #define LOG_INODE_EXISTS 1
40 * directory trouble cases
42 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
43 * log, we must force a full commit before doing an fsync of the directory
44 * where the unlink was done.
45 * ---> record transid of last unlink/rename per directory
49 * rename foo/some_dir foo2/some_dir
51 * fsync foo/some_dir/some_file
53 * The fsync above will unlink the original some_dir without recording
54 * it in its new location (foo2). After a crash, some_dir will be gone
55 * unless the fsync of some_file forces a full commit
57 * 2) we must log any new names for any file or dir that is in the fsync
58 * log. ---> check inode while renaming/linking.
60 * 2a) we must log any new names for any file or dir during rename
61 * when the directory they are being removed from was logged.
62 * ---> check inode and old parent dir during rename
64 * 2a is actually the more important variant. With the extra logging
65 * a crash might unlink the old name without recreating the new one
67 * 3) after a crash, we must go through any directories with a link count
68 * of zero and redo the rm -rf
75 * The directory f1 was fully removed from the FS, but fsync was never
76 * called on f1, only its parent dir. After a crash the rm -rf must
77 * be replayed. This must be able to recurse down the entire
78 * directory tree. The inode link count fixup code takes care of the
83 * stages for the tree walking. The first
84 * stage (0) is to only pin down the blocks we find
85 * the second stage (1) is to make sure that all the inodes
86 * we find in the log are created in the subvolume.
88 * The last stage is to deal with directories and links and extents
89 * and all the other fun semantics
91 #define LOG_WALK_PIN_ONLY 0
92 #define LOG_WALK_REPLAY_INODES 1
93 #define LOG_WALK_REPLAY_DIR_INDEX 2
94 #define LOG_WALK_REPLAY_ALL 3
96 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
97 struct btrfs_root *root, struct inode *inode,
101 struct btrfs_log_ctx *ctx);
102 static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root,
104 struct btrfs_path *path, u64 objectid);
105 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
106 struct btrfs_root *root,
107 struct btrfs_root *log,
108 struct btrfs_path *path,
109 u64 dirid, int del_all);
112 * tree logging is a special write ahead log used to make sure that
113 * fsyncs and O_SYNCs can happen without doing full tree commits.
115 * Full tree commits are expensive because they require commonly
116 * modified blocks to be recowed, creating many dirty pages in the
117 * extent tree an 4x-6x higher write load than ext3.
119 * Instead of doing a tree commit on every fsync, we use the
120 * key ranges and transaction ids to find items for a given file or directory
121 * that have changed in this transaction. Those items are copied into
122 * a special tree (one per subvolume root), that tree is written to disk
123 * and then the fsync is considered complete.
125 * After a crash, items are copied out of the log-tree back into the
126 * subvolume tree. Any file data extents found are recorded in the extent
127 * allocation tree, and the log-tree freed.
129 * The log tree is read three times, once to pin down all the extents it is
130 * using in ram and once, once to create all the inodes logged in the tree
131 * and once to do all the other items.
135 * start a sub transaction and setup the log tree
136 * this increments the log tree writer count to make the people
137 * syncing the tree wait for us to finish
139 static int start_log_trans(struct btrfs_trans_handle *trans,
140 struct btrfs_root *root,
141 struct btrfs_log_ctx *ctx)
146 mutex_lock(&root->log_mutex);
147 if (root->log_root) {
148 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
152 if (!root->log_start_pid) {
153 root->log_start_pid = current->pid;
154 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
155 } else if (root->log_start_pid != current->pid) {
156 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
159 atomic_inc(&root->log_batch);
160 atomic_inc(&root->log_writers);
162 index = root->log_transid % 2;
163 list_add_tail(&ctx->list, &root->log_ctxs[index]);
164 ctx->log_transid = root->log_transid;
166 mutex_unlock(&root->log_mutex);
171 mutex_lock(&root->fs_info->tree_log_mutex);
172 if (!root->fs_info->log_root_tree)
173 ret = btrfs_init_log_root_tree(trans, root->fs_info);
174 mutex_unlock(&root->fs_info->tree_log_mutex);
178 if (!root->log_root) {
179 ret = btrfs_add_log_tree(trans, root);
183 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state);
184 root->log_start_pid = current->pid;
185 atomic_inc(&root->log_batch);
186 atomic_inc(&root->log_writers);
188 index = root->log_transid % 2;
189 list_add_tail(&ctx->list, &root->log_ctxs[index]);
190 ctx->log_transid = root->log_transid;
193 mutex_unlock(&root->log_mutex);
198 * returns 0 if there was a log transaction running and we were able
199 * to join, or returns -ENOENT if there were not transactions
202 static int join_running_log_trans(struct btrfs_root *root)
210 mutex_lock(&root->log_mutex);
211 if (root->log_root) {
213 atomic_inc(&root->log_writers);
215 mutex_unlock(&root->log_mutex);
220 * This either makes the current running log transaction wait
221 * until you call btrfs_end_log_trans() or it makes any future
222 * log transactions wait until you call btrfs_end_log_trans()
224 int btrfs_pin_log_trans(struct btrfs_root *root)
228 mutex_lock(&root->log_mutex);
229 atomic_inc(&root->log_writers);
230 mutex_unlock(&root->log_mutex);
235 * indicate we're done making changes to the log tree
236 * and wake up anyone waiting to do a sync
238 void btrfs_end_log_trans(struct btrfs_root *root)
240 if (atomic_dec_and_test(&root->log_writers)) {
242 if (waitqueue_active(&root->log_writer_wait))
243 wake_up(&root->log_writer_wait);
249 * the walk control struct is used to pass state down the chain when
250 * processing the log tree. The stage field tells us which part
251 * of the log tree processing we are currently doing. The others
252 * are state fields used for that specific part
254 struct walk_control {
255 /* should we free the extent on disk when done? This is used
256 * at transaction commit time while freeing a log tree
260 /* should we write out the extent buffer? This is used
261 * while flushing the log tree to disk during a sync
265 /* should we wait for the extent buffer io to finish? Also used
266 * while flushing the log tree to disk for a sync
270 /* pin only walk, we record which extents on disk belong to the
275 /* what stage of the replay code we're currently in */
278 /* the root we are currently replaying */
279 struct btrfs_root *replay_dest;
281 /* the trans handle for the current replay */
282 struct btrfs_trans_handle *trans;
284 /* the function that gets used to process blocks we find in the
285 * tree. Note the extent_buffer might not be up to date when it is
286 * passed in, and it must be checked or read if you need the data
289 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb,
290 struct walk_control *wc, u64 gen);
294 * process_func used to pin down extents, write them or wait on them
296 static int process_one_buffer(struct btrfs_root *log,
297 struct extent_buffer *eb,
298 struct walk_control *wc, u64 gen)
303 * If this fs is mixed then we need to be able to process the leaves to
304 * pin down any logged extents, so we have to read the block.
306 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) {
307 ret = btrfs_read_buffer(eb, gen);
313 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root,
316 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) {
317 if (wc->pin && btrfs_header_level(eb) == 0)
318 ret = btrfs_exclude_logged_extents(log, eb);
320 btrfs_write_tree_block(eb);
322 btrfs_wait_tree_block_writeback(eb);
328 * Item overwrite used by replay and tree logging. eb, slot and key all refer
329 * to the src data we are copying out.
331 * root is the tree we are copying into, and path is a scratch
332 * path for use in this function (it should be released on entry and
333 * will be released on exit).
335 * If the key is already in the destination tree the existing item is
336 * overwritten. If the existing item isn't big enough, it is extended.
337 * If it is too large, it is truncated.
339 * If the key isn't in the destination yet, a new item is inserted.
341 static noinline int overwrite_item(struct btrfs_trans_handle *trans,
342 struct btrfs_root *root,
343 struct btrfs_path *path,
344 struct extent_buffer *eb, int slot,
345 struct btrfs_key *key)
349 u64 saved_i_size = 0;
350 int save_old_i_size = 0;
351 unsigned long src_ptr;
352 unsigned long dst_ptr;
353 int overwrite_root = 0;
354 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY;
356 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
359 item_size = btrfs_item_size_nr(eb, slot);
360 src_ptr = btrfs_item_ptr_offset(eb, slot);
362 /* look for the key in the destination tree */
363 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
370 u32 dst_size = btrfs_item_size_nr(path->nodes[0],
372 if (dst_size != item_size)
375 if (item_size == 0) {
376 btrfs_release_path(path);
379 dst_copy = kmalloc(item_size, GFP_NOFS);
380 src_copy = kmalloc(item_size, GFP_NOFS);
381 if (!dst_copy || !src_copy) {
382 btrfs_release_path(path);
388 read_extent_buffer(eb, src_copy, src_ptr, item_size);
390 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
391 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr,
393 ret = memcmp(dst_copy, src_copy, item_size);
398 * they have the same contents, just return, this saves
399 * us from cowing blocks in the destination tree and doing
400 * extra writes that may not have been done by a previous
404 btrfs_release_path(path);
409 * We need to load the old nbytes into the inode so when we
410 * replay the extents we've logged we get the right nbytes.
413 struct btrfs_inode_item *item;
417 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
418 struct btrfs_inode_item);
419 nbytes = btrfs_inode_nbytes(path->nodes[0], item);
420 item = btrfs_item_ptr(eb, slot,
421 struct btrfs_inode_item);
422 btrfs_set_inode_nbytes(eb, item, nbytes);
425 * If this is a directory we need to reset the i_size to
426 * 0 so that we can set it up properly when replaying
427 * the rest of the items in this log.
429 mode = btrfs_inode_mode(eb, item);
431 btrfs_set_inode_size(eb, item, 0);
433 } else if (inode_item) {
434 struct btrfs_inode_item *item;
438 * New inode, set nbytes to 0 so that the nbytes comes out
439 * properly when we replay the extents.
441 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
442 btrfs_set_inode_nbytes(eb, item, 0);
445 * If this is a directory we need to reset the i_size to 0 so
446 * that we can set it up properly when replaying the rest of
447 * the items in this log.
449 mode = btrfs_inode_mode(eb, item);
451 btrfs_set_inode_size(eb, item, 0);
454 btrfs_release_path(path);
455 /* try to insert the key into the destination tree */
456 path->skip_release_on_error = 1;
457 ret = btrfs_insert_empty_item(trans, root, path,
459 path->skip_release_on_error = 0;
461 /* make sure any existing item is the correct size */
462 if (ret == -EEXIST || ret == -EOVERFLOW) {
464 found_size = btrfs_item_size_nr(path->nodes[0],
466 if (found_size > item_size)
467 btrfs_truncate_item(root, path, item_size, 1);
468 else if (found_size < item_size)
469 btrfs_extend_item(root, path,
470 item_size - found_size);
474 dst_ptr = btrfs_item_ptr_offset(path->nodes[0],
477 /* don't overwrite an existing inode if the generation number
478 * was logged as zero. This is done when the tree logging code
479 * is just logging an inode to make sure it exists after recovery.
481 * Also, don't overwrite i_size on directories during replay.
482 * log replay inserts and removes directory items based on the
483 * state of the tree found in the subvolume, and i_size is modified
486 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) {
487 struct btrfs_inode_item *src_item;
488 struct btrfs_inode_item *dst_item;
490 src_item = (struct btrfs_inode_item *)src_ptr;
491 dst_item = (struct btrfs_inode_item *)dst_ptr;
493 if (btrfs_inode_generation(eb, src_item) == 0) {
494 struct extent_buffer *dst_eb = path->nodes[0];
496 if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
497 S_ISREG(btrfs_inode_mode(dst_eb, dst_item))) {
498 struct btrfs_map_token token;
499 u64 ino_size = btrfs_inode_size(eb, src_item);
501 btrfs_init_map_token(&token);
502 btrfs_set_token_inode_size(dst_eb, dst_item,
508 if (overwrite_root &&
509 S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
510 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) {
512 saved_i_size = btrfs_inode_size(path->nodes[0],
517 copy_extent_buffer(path->nodes[0], eb, dst_ptr,
520 if (save_old_i_size) {
521 struct btrfs_inode_item *dst_item;
522 dst_item = (struct btrfs_inode_item *)dst_ptr;
523 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size);
526 /* make sure the generation is filled in */
527 if (key->type == BTRFS_INODE_ITEM_KEY) {
528 struct btrfs_inode_item *dst_item;
529 dst_item = (struct btrfs_inode_item *)dst_ptr;
530 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) {
531 btrfs_set_inode_generation(path->nodes[0], dst_item,
536 btrfs_mark_buffer_dirty(path->nodes[0]);
537 btrfs_release_path(path);
542 * simple helper to read an inode off the disk from a given root
543 * This can only be called for subvolume roots and not for the log
545 static noinline struct inode *read_one_inode(struct btrfs_root *root,
548 struct btrfs_key key;
551 key.objectid = objectid;
552 key.type = BTRFS_INODE_ITEM_KEY;
554 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
557 } else if (is_bad_inode(inode)) {
564 /* replays a single extent in 'eb' at 'slot' with 'key' into the
565 * subvolume 'root'. path is released on entry and should be released
568 * extents in the log tree have not been allocated out of the extent
569 * tree yet. So, this completes the allocation, taking a reference
570 * as required if the extent already exists or creating a new extent
571 * if it isn't in the extent allocation tree yet.
573 * The extent is inserted into the file, dropping any existing extents
574 * from the file that overlap the new one.
576 static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
577 struct btrfs_root *root,
578 struct btrfs_path *path,
579 struct extent_buffer *eb, int slot,
580 struct btrfs_key *key)
584 u64 start = key->offset;
586 struct btrfs_file_extent_item *item;
587 struct inode *inode = NULL;
591 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
592 found_type = btrfs_file_extent_type(eb, item);
594 if (found_type == BTRFS_FILE_EXTENT_REG ||
595 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
596 nbytes = btrfs_file_extent_num_bytes(eb, item);
597 extent_end = start + nbytes;
600 * We don't add to the inodes nbytes if we are prealloc or a
603 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
605 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
606 size = btrfs_file_extent_inline_len(eb, slot, item);
607 nbytes = btrfs_file_extent_ram_bytes(eb, item);
608 extent_end = ALIGN(start + size, root->sectorsize);
614 inode = read_one_inode(root, key->objectid);
621 * first check to see if we already have this extent in the
622 * file. This must be done before the btrfs_drop_extents run
623 * so we don't try to drop this extent.
625 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode),
629 (found_type == BTRFS_FILE_EXTENT_REG ||
630 found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
631 struct btrfs_file_extent_item cmp1;
632 struct btrfs_file_extent_item cmp2;
633 struct btrfs_file_extent_item *existing;
634 struct extent_buffer *leaf;
636 leaf = path->nodes[0];
637 existing = btrfs_item_ptr(leaf, path->slots[0],
638 struct btrfs_file_extent_item);
640 read_extent_buffer(eb, &cmp1, (unsigned long)item,
642 read_extent_buffer(leaf, &cmp2, (unsigned long)existing,
646 * we already have a pointer to this exact extent,
647 * we don't have to do anything
649 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) {
650 btrfs_release_path(path);
654 btrfs_release_path(path);
656 /* drop any overlapping extents */
657 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1);
661 if (found_type == BTRFS_FILE_EXTENT_REG ||
662 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
664 unsigned long dest_offset;
665 struct btrfs_key ins;
667 ret = btrfs_insert_empty_item(trans, root, path, key,
671 dest_offset = btrfs_item_ptr_offset(path->nodes[0],
673 copy_extent_buffer(path->nodes[0], eb, dest_offset,
674 (unsigned long)item, sizeof(*item));
676 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
677 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
678 ins.type = BTRFS_EXTENT_ITEM_KEY;
679 offset = key->offset - btrfs_file_extent_offset(eb, item);
681 if (ins.objectid > 0) {
684 LIST_HEAD(ordered_sums);
686 * is this extent already allocated in the extent
687 * allocation tree? If so, just add a reference
689 ret = btrfs_lookup_data_extent(root, ins.objectid,
692 ret = btrfs_inc_extent_ref(trans, root,
693 ins.objectid, ins.offset,
694 0, root->root_key.objectid,
695 key->objectid, offset, 0);
700 * insert the extent pointer in the extent
703 ret = btrfs_alloc_logged_file_extent(trans,
704 root, root->root_key.objectid,
705 key->objectid, offset, &ins);
709 btrfs_release_path(path);
711 if (btrfs_file_extent_compression(eb, item)) {
712 csum_start = ins.objectid;
713 csum_end = csum_start + ins.offset;
715 csum_start = ins.objectid +
716 btrfs_file_extent_offset(eb, item);
717 csum_end = csum_start +
718 btrfs_file_extent_num_bytes(eb, item);
721 ret = btrfs_lookup_csums_range(root->log_root,
722 csum_start, csum_end - 1,
726 while (!list_empty(&ordered_sums)) {
727 struct btrfs_ordered_sum *sums;
728 sums = list_entry(ordered_sums.next,
729 struct btrfs_ordered_sum,
732 ret = btrfs_csum_file_blocks(trans,
733 root->fs_info->csum_root,
735 list_del(&sums->list);
741 btrfs_release_path(path);
743 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
744 /* inline extents are easy, we just overwrite them */
745 ret = overwrite_item(trans, root, path, eb, slot, key);
750 inode_add_bytes(inode, nbytes);
751 ret = btrfs_update_inode(trans, root, inode);
759 * when cleaning up conflicts between the directory names in the
760 * subvolume, directory names in the log and directory names in the
761 * inode back references, we may have to unlink inodes from directories.
763 * This is a helper function to do the unlink of a specific directory
766 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
767 struct btrfs_root *root,
768 struct btrfs_path *path,
770 struct btrfs_dir_item *di)
775 struct extent_buffer *leaf;
776 struct btrfs_key location;
779 leaf = path->nodes[0];
781 btrfs_dir_item_key_to_cpu(leaf, di, &location);
782 name_len = btrfs_dir_name_len(leaf, di);
783 name = kmalloc(name_len, GFP_NOFS);
787 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
788 btrfs_release_path(path);
790 inode = read_one_inode(root, location.objectid);
796 ret = link_to_fixup_dir(trans, root, path, location.objectid);
800 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
804 ret = btrfs_run_delayed_items(trans, root);
812 * helper function to see if a given name and sequence number found
813 * in an inode back reference are already in a directory and correctly
814 * point to this inode
816 static noinline int inode_in_dir(struct btrfs_root *root,
817 struct btrfs_path *path,
818 u64 dirid, u64 objectid, u64 index,
819 const char *name, int name_len)
821 struct btrfs_dir_item *di;
822 struct btrfs_key location;
825 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid,
826 index, name, name_len, 0);
827 if (di && !IS_ERR(di)) {
828 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
829 if (location.objectid != objectid)
833 btrfs_release_path(path);
835 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0);
836 if (di && !IS_ERR(di)) {
837 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location);
838 if (location.objectid != objectid)
844 btrfs_release_path(path);
849 * helper function to check a log tree for a named back reference in
850 * an inode. This is used to decide if a back reference that is
851 * found in the subvolume conflicts with what we find in the log.
853 * inode backreferences may have multiple refs in a single item,
854 * during replay we process one reference at a time, and we don't
855 * want to delete valid links to a file from the subvolume if that
856 * link is also in the log.
858 static noinline int backref_in_log(struct btrfs_root *log,
859 struct btrfs_key *key,
861 const char *name, int namelen)
863 struct btrfs_path *path;
864 struct btrfs_inode_ref *ref;
866 unsigned long ptr_end;
867 unsigned long name_ptr;
873 path = btrfs_alloc_path();
877 ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
881 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
883 if (key->type == BTRFS_INODE_EXTREF_KEY) {
884 if (btrfs_find_name_in_ext_backref(path, ref_objectid,
885 name, namelen, NULL))
891 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]);
892 ptr_end = ptr + item_size;
893 while (ptr < ptr_end) {
894 ref = (struct btrfs_inode_ref *)ptr;
895 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref);
896 if (found_name_len == namelen) {
897 name_ptr = (unsigned long)(ref + 1);
898 ret = memcmp_extent_buffer(path->nodes[0], name,
905 ptr = (unsigned long)(ref + 1) + found_name_len;
908 btrfs_free_path(path);
912 static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
913 struct btrfs_root *root,
914 struct btrfs_path *path,
915 struct btrfs_root *log_root,
916 struct inode *dir, struct inode *inode,
917 struct extent_buffer *eb,
918 u64 inode_objectid, u64 parent_objectid,
919 u64 ref_index, char *name, int namelen,
925 struct extent_buffer *leaf;
926 struct btrfs_dir_item *di;
927 struct btrfs_key search_key;
928 struct btrfs_inode_extref *extref;
931 /* Search old style refs */
932 search_key.objectid = inode_objectid;
933 search_key.type = BTRFS_INODE_REF_KEY;
934 search_key.offset = parent_objectid;
935 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
937 struct btrfs_inode_ref *victim_ref;
939 unsigned long ptr_end;
941 leaf = path->nodes[0];
943 /* are we trying to overwrite a back ref for the root directory
944 * if so, just jump out, we're done
946 if (search_key.objectid == search_key.offset)
949 /* check all the names in this back reference to see
950 * if they are in the log. if so, we allow them to stay
951 * otherwise they must be unlinked as a conflict
953 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
954 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
955 while (ptr < ptr_end) {
956 victim_ref = (struct btrfs_inode_ref *)ptr;
957 victim_name_len = btrfs_inode_ref_name_len(leaf,
959 victim_name = kmalloc(victim_name_len, GFP_NOFS);
963 read_extent_buffer(leaf, victim_name,
964 (unsigned long)(victim_ref + 1),
967 if (!backref_in_log(log_root, &search_key,
972 btrfs_release_path(path);
974 ret = btrfs_unlink_inode(trans, root, dir,
980 ret = btrfs_run_delayed_items(trans, root);
988 ptr = (unsigned long)(victim_ref + 1) + victim_name_len;
992 * NOTE: we have searched root tree and checked the
993 * coresponding ref, it does not need to check again.
997 btrfs_release_path(path);
999 /* Same search but for extended refs */
1000 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen,
1001 inode_objectid, parent_objectid, 0,
1003 if (!IS_ERR_OR_NULL(extref)) {
1007 struct inode *victim_parent;
1009 leaf = path->nodes[0];
1011 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1012 base = btrfs_item_ptr_offset(leaf, path->slots[0]);
1014 while (cur_offset < item_size) {
1015 extref = (struct btrfs_inode_extref *)(base + cur_offset);
1017 victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
1019 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid)
1022 victim_name = kmalloc(victim_name_len, GFP_NOFS);
1025 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name,
1028 search_key.objectid = inode_objectid;
1029 search_key.type = BTRFS_INODE_EXTREF_KEY;
1030 search_key.offset = btrfs_extref_hash(parent_objectid,
1034 if (!backref_in_log(log_root, &search_key,
1035 parent_objectid, victim_name,
1038 victim_parent = read_one_inode(root,
1040 if (victim_parent) {
1042 btrfs_release_path(path);
1044 ret = btrfs_unlink_inode(trans, root,
1050 ret = btrfs_run_delayed_items(
1053 iput(victim_parent);
1064 cur_offset += victim_name_len + sizeof(*extref);
1068 btrfs_release_path(path);
1070 /* look for a conflicting sequence number */
1071 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
1072 ref_index, name, namelen, 0);
1073 if (di && !IS_ERR(di)) {
1074 ret = drop_one_dir_item(trans, root, path, dir, di);
1078 btrfs_release_path(path);
1080 /* look for a conflicing name */
1081 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
1083 if (di && !IS_ERR(di)) {
1084 ret = drop_one_dir_item(trans, root, path, dir, di);
1088 btrfs_release_path(path);
1093 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1094 u32 *namelen, char **name, u64 *index,
1095 u64 *parent_objectid)
1097 struct btrfs_inode_extref *extref;
1099 extref = (struct btrfs_inode_extref *)ref_ptr;
1101 *namelen = btrfs_inode_extref_name_len(eb, extref);
1102 *name = kmalloc(*namelen, GFP_NOFS);
1106 read_extent_buffer(eb, *name, (unsigned long)&extref->name,
1109 *index = btrfs_inode_extref_index(eb, extref);
1110 if (parent_objectid)
1111 *parent_objectid = btrfs_inode_extref_parent(eb, extref);
1116 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr,
1117 u32 *namelen, char **name, u64 *index)
1119 struct btrfs_inode_ref *ref;
1121 ref = (struct btrfs_inode_ref *)ref_ptr;
1123 *namelen = btrfs_inode_ref_name_len(eb, ref);
1124 *name = kmalloc(*namelen, GFP_NOFS);
1128 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen);
1130 *index = btrfs_inode_ref_index(eb, ref);
1136 * replay one inode back reference item found in the log tree.
1137 * eb, slot and key refer to the buffer and key found in the log tree.
1138 * root is the destination we are replaying into, and path is for temp
1139 * use by this function. (it should be released on return).
1141 static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
1142 struct btrfs_root *root,
1143 struct btrfs_root *log,
1144 struct btrfs_path *path,
1145 struct extent_buffer *eb, int slot,
1146 struct btrfs_key *key)
1148 struct inode *dir = NULL;
1149 struct inode *inode = NULL;
1150 unsigned long ref_ptr;
1151 unsigned long ref_end;
1155 int search_done = 0;
1156 int log_ref_ver = 0;
1157 u64 parent_objectid;
1160 int ref_struct_size;
1162 ref_ptr = btrfs_item_ptr_offset(eb, slot);
1163 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
1165 if (key->type == BTRFS_INODE_EXTREF_KEY) {
1166 struct btrfs_inode_extref *r;
1168 ref_struct_size = sizeof(struct btrfs_inode_extref);
1170 r = (struct btrfs_inode_extref *)ref_ptr;
1171 parent_objectid = btrfs_inode_extref_parent(eb, r);
1173 ref_struct_size = sizeof(struct btrfs_inode_ref);
1174 parent_objectid = key->offset;
1176 inode_objectid = key->objectid;
1179 * it is possible that we didn't log all the parent directories
1180 * for a given inode. If we don't find the dir, just don't
1181 * copy the back ref in. The link count fixup code will take
1184 dir = read_one_inode(root, parent_objectid);
1190 inode = read_one_inode(root, inode_objectid);
1196 while (ref_ptr < ref_end) {
1198 ret = extref_get_fields(eb, ref_ptr, &namelen, &name,
1199 &ref_index, &parent_objectid);
1201 * parent object can change from one array
1205 dir = read_one_inode(root, parent_objectid);
1211 ret = ref_get_fields(eb, ref_ptr, &namelen, &name,
1217 /* if we already have a perfect match, we're done */
1218 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode),
1219 ref_index, name, namelen)) {
1221 * look for a conflicting back reference in the
1222 * metadata. if we find one we have to unlink that name
1223 * of the file before we add our new link. Later on, we
1224 * overwrite any existing back reference, and we don't
1225 * want to create dangling pointers in the directory.
1229 ret = __add_inode_ref(trans, root, path, log,
1233 ref_index, name, namelen,
1242 /* insert our name */
1243 ret = btrfs_add_link(trans, dir, inode, name, namelen,
1248 btrfs_update_inode(trans, root, inode);
1251 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen;
1260 /* finally write the back reference in the inode */
1261 ret = overwrite_item(trans, root, path, eb, slot, key);
1263 btrfs_release_path(path);
1270 static int insert_orphan_item(struct btrfs_trans_handle *trans,
1271 struct btrfs_root *root, u64 ino)
1275 ret = btrfs_insert_orphan_item(trans, root, ino);
1282 static int count_inode_extrefs(struct btrfs_root *root,
1283 struct inode *inode, struct btrfs_path *path)
1287 unsigned int nlink = 0;
1290 u64 inode_objectid = btrfs_ino(inode);
1293 struct btrfs_inode_extref *extref;
1294 struct extent_buffer *leaf;
1297 ret = btrfs_find_one_extref(root, inode_objectid, offset, path,
1302 leaf = path->nodes[0];
1303 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1304 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
1307 while (cur_offset < item_size) {
1308 extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
1309 name_len = btrfs_inode_extref_name_len(leaf, extref);
1313 cur_offset += name_len + sizeof(*extref);
1317 btrfs_release_path(path);
1319 btrfs_release_path(path);
1321 if (ret < 0 && ret != -ENOENT)
1326 static int count_inode_refs(struct btrfs_root *root,
1327 struct inode *inode, struct btrfs_path *path)
1330 struct btrfs_key key;
1331 unsigned int nlink = 0;
1333 unsigned long ptr_end;
1335 u64 ino = btrfs_ino(inode);
1338 key.type = BTRFS_INODE_REF_KEY;
1339 key.offset = (u64)-1;
1342 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1346 if (path->slots[0] == 0)
1351 btrfs_item_key_to_cpu(path->nodes[0], &key,
1353 if (key.objectid != ino ||
1354 key.type != BTRFS_INODE_REF_KEY)
1356 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
1357 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
1359 while (ptr < ptr_end) {
1360 struct btrfs_inode_ref *ref;
1362 ref = (struct btrfs_inode_ref *)ptr;
1363 name_len = btrfs_inode_ref_name_len(path->nodes[0],
1365 ptr = (unsigned long)(ref + 1) + name_len;
1369 if (key.offset == 0)
1371 if (path->slots[0] > 0) {
1376 btrfs_release_path(path);
1378 btrfs_release_path(path);
1384 * There are a few corners where the link count of the file can't
1385 * be properly maintained during replay. So, instead of adding
1386 * lots of complexity to the log code, we just scan the backrefs
1387 * for any file that has been through replay.
1389 * The scan will update the link count on the inode to reflect the
1390 * number of back refs found. If it goes down to zero, the iput
1391 * will free the inode.
1393 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
1394 struct btrfs_root *root,
1395 struct inode *inode)
1397 struct btrfs_path *path;
1400 u64 ino = btrfs_ino(inode);
1402 path = btrfs_alloc_path();
1406 ret = count_inode_refs(root, inode, path);
1412 ret = count_inode_extrefs(root, inode, path);
1420 if (nlink != inode->i_nlink) {
1421 set_nlink(inode, nlink);
1422 btrfs_update_inode(trans, root, inode);
1424 BTRFS_I(inode)->index_cnt = (u64)-1;
1426 if (inode->i_nlink == 0) {
1427 if (S_ISDIR(inode->i_mode)) {
1428 ret = replay_dir_deletes(trans, root, NULL, path,
1433 ret = insert_orphan_item(trans, root, ino);
1437 btrfs_free_path(path);
1441 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
1442 struct btrfs_root *root,
1443 struct btrfs_path *path)
1446 struct btrfs_key key;
1447 struct inode *inode;
1449 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1450 key.type = BTRFS_ORPHAN_ITEM_KEY;
1451 key.offset = (u64)-1;
1453 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1458 if (path->slots[0] == 0)
1463 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1464 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID ||
1465 key.type != BTRFS_ORPHAN_ITEM_KEY)
1468 ret = btrfs_del_item(trans, root, path);
1472 btrfs_release_path(path);
1473 inode = read_one_inode(root, key.offset);
1477 ret = fixup_inode_link_count(trans, root, inode);
1483 * fixup on a directory may create new entries,
1484 * make sure we always look for the highset possible
1487 key.offset = (u64)-1;
1491 btrfs_release_path(path);
1497 * record a given inode in the fixup dir so we can check its link
1498 * count when replay is done. The link count is incremented here
1499 * so the inode won't go away until we check it
1501 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans,
1502 struct btrfs_root *root,
1503 struct btrfs_path *path,
1506 struct btrfs_key key;
1508 struct inode *inode;
1510 inode = read_one_inode(root, objectid);
1514 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
1515 key.type = BTRFS_ORPHAN_ITEM_KEY;
1516 key.offset = objectid;
1518 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1520 btrfs_release_path(path);
1522 if (!inode->i_nlink)
1523 set_nlink(inode, 1);
1526 ret = btrfs_update_inode(trans, root, inode);
1527 } else if (ret == -EEXIST) {
1530 BUG(); /* Logic Error */
1538 * when replaying the log for a directory, we only insert names
1539 * for inodes that actually exist. This means an fsync on a directory
1540 * does not implicitly fsync all the new files in it
1542 static noinline int insert_one_name(struct btrfs_trans_handle *trans,
1543 struct btrfs_root *root,
1544 struct btrfs_path *path,
1545 u64 dirid, u64 index,
1546 char *name, int name_len, u8 type,
1547 struct btrfs_key *location)
1549 struct inode *inode;
1553 inode = read_one_inode(root, location->objectid);
1557 dir = read_one_inode(root, dirid);
1563 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index);
1565 /* FIXME, put inode into FIXUP list */
1573 * Return true if an inode reference exists in the log for the given name,
1574 * inode and parent inode.
1576 static bool name_in_log_ref(struct btrfs_root *log_root,
1577 const char *name, const int name_len,
1578 const u64 dirid, const u64 ino)
1580 struct btrfs_key search_key;
1582 search_key.objectid = ino;
1583 search_key.type = BTRFS_INODE_REF_KEY;
1584 search_key.offset = dirid;
1585 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1588 search_key.type = BTRFS_INODE_EXTREF_KEY;
1589 search_key.offset = btrfs_extref_hash(dirid, name, name_len);
1590 if (backref_in_log(log_root, &search_key, dirid, name, name_len))
1597 * take a single entry in a log directory item and replay it into
1600 * if a conflicting item exists in the subdirectory already,
1601 * the inode it points to is unlinked and put into the link count
1604 * If a name from the log points to a file or directory that does
1605 * not exist in the FS, it is skipped. fsyncs on directories
1606 * do not force down inodes inside that directory, just changes to the
1607 * names or unlinks in a directory.
1609 static noinline int replay_one_name(struct btrfs_trans_handle *trans,
1610 struct btrfs_root *root,
1611 struct btrfs_path *path,
1612 struct extent_buffer *eb,
1613 struct btrfs_dir_item *di,
1614 struct btrfs_key *key)
1618 struct btrfs_dir_item *dst_di;
1619 struct btrfs_key found_key;
1620 struct btrfs_key log_key;
1625 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY);
1627 dir = read_one_inode(root, key->objectid);
1631 name_len = btrfs_dir_name_len(eb, di);
1632 name = kmalloc(name_len, GFP_NOFS);
1638 log_type = btrfs_dir_type(eb, di);
1639 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1642 btrfs_dir_item_key_to_cpu(eb, di, &log_key);
1643 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0);
1648 btrfs_release_path(path);
1650 if (key->type == BTRFS_DIR_ITEM_KEY) {
1651 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
1653 } else if (key->type == BTRFS_DIR_INDEX_KEY) {
1654 dst_di = btrfs_lookup_dir_index_item(trans, root, path,
1663 if (IS_ERR_OR_NULL(dst_di)) {
1664 /* we need a sequence number to insert, so we only
1665 * do inserts for the BTRFS_DIR_INDEX_KEY types
1667 if (key->type != BTRFS_DIR_INDEX_KEY)
1672 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key);
1673 /* the existing item matches the logged item */
1674 if (found_key.objectid == log_key.objectid &&
1675 found_key.type == log_key.type &&
1676 found_key.offset == log_key.offset &&
1677 btrfs_dir_type(path->nodes[0], dst_di) == log_type) {
1678 update_size = false;
1683 * don't drop the conflicting directory entry if the inode
1684 * for the new entry doesn't exist
1689 ret = drop_one_dir_item(trans, root, path, dir, dst_di);
1693 if (key->type == BTRFS_DIR_INDEX_KEY)
1696 btrfs_release_path(path);
1697 if (!ret && update_size) {
1698 btrfs_i_size_write(dir, dir->i_size + name_len * 2);
1699 ret = btrfs_update_inode(trans, root, dir);
1706 if (name_in_log_ref(root->log_root, name, name_len,
1707 key->objectid, log_key.objectid)) {
1708 /* The dentry will be added later. */
1710 update_size = false;
1713 btrfs_release_path(path);
1714 ret = insert_one_name(trans, root, path, key->objectid, key->offset,
1715 name, name_len, log_type, &log_key);
1716 if (ret && ret != -ENOENT && ret != -EEXIST)
1718 update_size = false;
1724 * find all the names in a directory item and reconcile them into
1725 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than
1726 * one name in a directory item, but the same code gets used for
1727 * both directory index types
1729 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
1730 struct btrfs_root *root,
1731 struct btrfs_path *path,
1732 struct extent_buffer *eb, int slot,
1733 struct btrfs_key *key)
1736 u32 item_size = btrfs_item_size_nr(eb, slot);
1737 struct btrfs_dir_item *di;
1740 unsigned long ptr_end;
1742 ptr = btrfs_item_ptr_offset(eb, slot);
1743 ptr_end = ptr + item_size;
1744 while (ptr < ptr_end) {
1745 di = (struct btrfs_dir_item *)ptr;
1746 if (verify_dir_item(root, eb, di))
1748 name_len = btrfs_dir_name_len(eb, di);
1749 ret = replay_one_name(trans, root, path, eb, di, key);
1752 ptr = (unsigned long)(di + 1);
1759 * directory replay has two parts. There are the standard directory
1760 * items in the log copied from the subvolume, and range items
1761 * created in the log while the subvolume was logged.
1763 * The range items tell us which parts of the key space the log
1764 * is authoritative for. During replay, if a key in the subvolume
1765 * directory is in a logged range item, but not actually in the log
1766 * that means it was deleted from the directory before the fsync
1767 * and should be removed.
1769 static noinline int find_dir_range(struct btrfs_root *root,
1770 struct btrfs_path *path,
1771 u64 dirid, int key_type,
1772 u64 *start_ret, u64 *end_ret)
1774 struct btrfs_key key;
1776 struct btrfs_dir_log_item *item;
1780 if (*start_ret == (u64)-1)
1783 key.objectid = dirid;
1784 key.type = key_type;
1785 key.offset = *start_ret;
1787 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1791 if (path->slots[0] == 0)
1796 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1798 if (key.type != key_type || key.objectid != dirid) {
1802 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1803 struct btrfs_dir_log_item);
1804 found_end = btrfs_dir_log_end(path->nodes[0], item);
1806 if (*start_ret >= key.offset && *start_ret <= found_end) {
1808 *start_ret = key.offset;
1809 *end_ret = found_end;
1814 /* check the next slot in the tree to see if it is a valid item */
1815 nritems = btrfs_header_nritems(path->nodes[0]);
1816 if (path->slots[0] >= nritems) {
1817 ret = btrfs_next_leaf(root, path);
1824 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
1826 if (key.type != key_type || key.objectid != dirid) {
1830 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1831 struct btrfs_dir_log_item);
1832 found_end = btrfs_dir_log_end(path->nodes[0], item);
1833 *start_ret = key.offset;
1834 *end_ret = found_end;
1837 btrfs_release_path(path);
1842 * this looks for a given directory item in the log. If the directory
1843 * item is not in the log, the item is removed and the inode it points
1846 static noinline int check_item_in_log(struct btrfs_trans_handle *trans,
1847 struct btrfs_root *root,
1848 struct btrfs_root *log,
1849 struct btrfs_path *path,
1850 struct btrfs_path *log_path,
1852 struct btrfs_key *dir_key)
1855 struct extent_buffer *eb;
1858 struct btrfs_dir_item *di;
1859 struct btrfs_dir_item *log_di;
1862 unsigned long ptr_end;
1864 struct inode *inode;
1865 struct btrfs_key location;
1868 eb = path->nodes[0];
1869 slot = path->slots[0];
1870 item_size = btrfs_item_size_nr(eb, slot);
1871 ptr = btrfs_item_ptr_offset(eb, slot);
1872 ptr_end = ptr + item_size;
1873 while (ptr < ptr_end) {
1874 di = (struct btrfs_dir_item *)ptr;
1875 if (verify_dir_item(root, eb, di)) {
1880 name_len = btrfs_dir_name_len(eb, di);
1881 name = kmalloc(name_len, GFP_NOFS);
1886 read_extent_buffer(eb, name, (unsigned long)(di + 1),
1889 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
1890 log_di = btrfs_lookup_dir_item(trans, log, log_path,
1893 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
1894 log_di = btrfs_lookup_dir_index_item(trans, log,
1900 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) {
1901 btrfs_dir_item_key_to_cpu(eb, di, &location);
1902 btrfs_release_path(path);
1903 btrfs_release_path(log_path);
1904 inode = read_one_inode(root, location.objectid);
1910 ret = link_to_fixup_dir(trans, root,
1911 path, location.objectid);
1919 ret = btrfs_unlink_inode(trans, root, dir, inode,
1922 ret = btrfs_run_delayed_items(trans, root);
1928 /* there might still be more names under this key
1929 * check and repeat if required
1931 ret = btrfs_search_slot(NULL, root, dir_key, path,
1937 } else if (IS_ERR(log_di)) {
1939 return PTR_ERR(log_di);
1941 btrfs_release_path(log_path);
1944 ptr = (unsigned long)(di + 1);
1949 btrfs_release_path(path);
1950 btrfs_release_path(log_path);
1955 * deletion replay happens before we copy any new directory items
1956 * out of the log or out of backreferences from inodes. It
1957 * scans the log to find ranges of keys that log is authoritative for,
1958 * and then scans the directory to find items in those ranges that are
1959 * not present in the log.
1961 * Anything we don't find in the log is unlinked and removed from the
1964 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
1965 struct btrfs_root *root,
1966 struct btrfs_root *log,
1967 struct btrfs_path *path,
1968 u64 dirid, int del_all)
1972 int key_type = BTRFS_DIR_LOG_ITEM_KEY;
1974 struct btrfs_key dir_key;
1975 struct btrfs_key found_key;
1976 struct btrfs_path *log_path;
1979 dir_key.objectid = dirid;
1980 dir_key.type = BTRFS_DIR_ITEM_KEY;
1981 log_path = btrfs_alloc_path();
1985 dir = read_one_inode(root, dirid);
1986 /* it isn't an error if the inode isn't there, that can happen
1987 * because we replay the deletes before we copy in the inode item
1991 btrfs_free_path(log_path);
1999 range_end = (u64)-1;
2001 ret = find_dir_range(log, path, dirid, key_type,
2002 &range_start, &range_end);
2007 dir_key.offset = range_start;
2010 ret = btrfs_search_slot(NULL, root, &dir_key, path,
2015 nritems = btrfs_header_nritems(path->nodes[0]);
2016 if (path->slots[0] >= nritems) {
2017 ret = btrfs_next_leaf(root, path);
2021 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2023 if (found_key.objectid != dirid ||
2024 found_key.type != dir_key.type)
2027 if (found_key.offset > range_end)
2030 ret = check_item_in_log(trans, root, log, path,
2035 if (found_key.offset == (u64)-1)
2037 dir_key.offset = found_key.offset + 1;
2039 btrfs_release_path(path);
2040 if (range_end == (u64)-1)
2042 range_start = range_end + 1;
2047 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) {
2048 key_type = BTRFS_DIR_LOG_INDEX_KEY;
2049 dir_key.type = BTRFS_DIR_INDEX_KEY;
2050 btrfs_release_path(path);
2054 btrfs_release_path(path);
2055 btrfs_free_path(log_path);
2061 * the process_func used to replay items from the log tree. This
2062 * gets called in two different stages. The first stage just looks
2063 * for inodes and makes sure they are all copied into the subvolume.
2065 * The second stage copies all the other item types from the log into
2066 * the subvolume. The two stage approach is slower, but gets rid of
2067 * lots of complexity around inodes referencing other inodes that exist
2068 * only in the log (references come from either directory items or inode
2071 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
2072 struct walk_control *wc, u64 gen)
2075 struct btrfs_path *path;
2076 struct btrfs_root *root = wc->replay_dest;
2077 struct btrfs_key key;
2082 ret = btrfs_read_buffer(eb, gen);
2086 level = btrfs_header_level(eb);
2091 path = btrfs_alloc_path();
2095 nritems = btrfs_header_nritems(eb);
2096 for (i = 0; i < nritems; i++) {
2097 btrfs_item_key_to_cpu(eb, &key, i);
2099 /* inode keys are done during the first stage */
2100 if (key.type == BTRFS_INODE_ITEM_KEY &&
2101 wc->stage == LOG_WALK_REPLAY_INODES) {
2102 struct btrfs_inode_item *inode_item;
2105 inode_item = btrfs_item_ptr(eb, i,
2106 struct btrfs_inode_item);
2107 mode = btrfs_inode_mode(eb, inode_item);
2108 if (S_ISDIR(mode)) {
2109 ret = replay_dir_deletes(wc->trans,
2110 root, log, path, key.objectid, 0);
2114 ret = overwrite_item(wc->trans, root, path,
2119 /* for regular files, make sure corresponding
2120 * orhpan item exist. extents past the new EOF
2121 * will be truncated later by orphan cleanup.
2123 if (S_ISREG(mode)) {
2124 ret = insert_orphan_item(wc->trans, root,
2130 ret = link_to_fixup_dir(wc->trans, root,
2131 path, key.objectid);
2136 if (key.type == BTRFS_DIR_INDEX_KEY &&
2137 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) {
2138 ret = replay_one_dir_item(wc->trans, root, path,
2144 if (wc->stage < LOG_WALK_REPLAY_ALL)
2147 /* these keys are simply copied */
2148 if (key.type == BTRFS_XATTR_ITEM_KEY) {
2149 ret = overwrite_item(wc->trans, root, path,
2153 } else if (key.type == BTRFS_INODE_REF_KEY ||
2154 key.type == BTRFS_INODE_EXTREF_KEY) {
2155 ret = add_inode_ref(wc->trans, root, log, path,
2157 if (ret && ret != -ENOENT)
2160 } else if (key.type == BTRFS_EXTENT_DATA_KEY) {
2161 ret = replay_one_extent(wc->trans, root, path,
2165 } else if (key.type == BTRFS_DIR_ITEM_KEY) {
2166 ret = replay_one_dir_item(wc->trans, root, path,
2172 btrfs_free_path(path);
2176 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
2177 struct btrfs_root *root,
2178 struct btrfs_path *path, int *level,
2179 struct walk_control *wc)
2184 struct extent_buffer *next;
2185 struct extent_buffer *cur;
2186 struct extent_buffer *parent;
2190 WARN_ON(*level < 0);
2191 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2193 while (*level > 0) {
2194 WARN_ON(*level < 0);
2195 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2196 cur = path->nodes[*level];
2198 WARN_ON(btrfs_header_level(cur) != *level);
2200 if (path->slots[*level] >=
2201 btrfs_header_nritems(cur))
2204 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2205 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2206 blocksize = root->nodesize;
2208 parent = path->nodes[*level];
2209 root_owner = btrfs_header_owner(parent);
2211 next = btrfs_find_create_tree_block(root, bytenr);
2216 ret = wc->process_func(root, next, wc, ptr_gen);
2218 free_extent_buffer(next);
2222 path->slots[*level]++;
2224 ret = btrfs_read_buffer(next, ptr_gen);
2226 free_extent_buffer(next);
2231 btrfs_tree_lock(next);
2232 btrfs_set_lock_blocking(next);
2233 clean_tree_block(trans, root->fs_info,
2235 btrfs_wait_tree_block_writeback(next);
2236 btrfs_tree_unlock(next);
2239 WARN_ON(root_owner !=
2240 BTRFS_TREE_LOG_OBJECTID);
2241 ret = btrfs_free_and_pin_reserved_extent(root,
2244 free_extent_buffer(next);
2248 free_extent_buffer(next);
2251 ret = btrfs_read_buffer(next, ptr_gen);
2253 free_extent_buffer(next);
2257 WARN_ON(*level <= 0);
2258 if (path->nodes[*level-1])
2259 free_extent_buffer(path->nodes[*level-1]);
2260 path->nodes[*level-1] = next;
2261 *level = btrfs_header_level(next);
2262 path->slots[*level] = 0;
2265 WARN_ON(*level < 0);
2266 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2268 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]);
2274 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
2275 struct btrfs_root *root,
2276 struct btrfs_path *path, int *level,
2277 struct walk_control *wc)
2284 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2285 slot = path->slots[i];
2286 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) {
2289 WARN_ON(*level == 0);
2292 struct extent_buffer *parent;
2293 if (path->nodes[*level] == root->node)
2294 parent = path->nodes[*level];
2296 parent = path->nodes[*level + 1];
2298 root_owner = btrfs_header_owner(parent);
2299 ret = wc->process_func(root, path->nodes[*level], wc,
2300 btrfs_header_generation(path->nodes[*level]));
2305 struct extent_buffer *next;
2307 next = path->nodes[*level];
2310 btrfs_tree_lock(next);
2311 btrfs_set_lock_blocking(next);
2312 clean_tree_block(trans, root->fs_info,
2314 btrfs_wait_tree_block_writeback(next);
2315 btrfs_tree_unlock(next);
2318 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
2319 ret = btrfs_free_and_pin_reserved_extent(root,
2320 path->nodes[*level]->start,
2321 path->nodes[*level]->len);
2325 free_extent_buffer(path->nodes[*level]);
2326 path->nodes[*level] = NULL;
2334 * drop the reference count on the tree rooted at 'snap'. This traverses
2335 * the tree freeing any blocks that have a ref count of zero after being
2338 static int walk_log_tree(struct btrfs_trans_handle *trans,
2339 struct btrfs_root *log, struct walk_control *wc)
2344 struct btrfs_path *path;
2347 path = btrfs_alloc_path();
2351 level = btrfs_header_level(log->node);
2353 path->nodes[level] = log->node;
2354 extent_buffer_get(log->node);
2355 path->slots[level] = 0;
2358 wret = walk_down_log_tree(trans, log, path, &level, wc);
2366 wret = walk_up_log_tree(trans, log, path, &level, wc);
2375 /* was the root node processed? if not, catch it here */
2376 if (path->nodes[orig_level]) {
2377 ret = wc->process_func(log, path->nodes[orig_level], wc,
2378 btrfs_header_generation(path->nodes[orig_level]));
2382 struct extent_buffer *next;
2384 next = path->nodes[orig_level];
2387 btrfs_tree_lock(next);
2388 btrfs_set_lock_blocking(next);
2389 clean_tree_block(trans, log->fs_info, next);
2390 btrfs_wait_tree_block_writeback(next);
2391 btrfs_tree_unlock(next);
2394 WARN_ON(log->root_key.objectid !=
2395 BTRFS_TREE_LOG_OBJECTID);
2396 ret = btrfs_free_and_pin_reserved_extent(log, next->start,
2404 btrfs_free_path(path);
2409 * helper function to update the item for a given subvolumes log root
2410 * in the tree of log roots
2412 static int update_log_root(struct btrfs_trans_handle *trans,
2413 struct btrfs_root *log)
2417 if (log->log_transid == 1) {
2418 /* insert root item on the first sync */
2419 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
2420 &log->root_key, &log->root_item);
2422 ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
2423 &log->root_key, &log->root_item);
2428 static void wait_log_commit(struct btrfs_trans_handle *trans,
2429 struct btrfs_root *root, int transid)
2432 int index = transid % 2;
2435 * we only allow two pending log transactions at a time,
2436 * so we know that if ours is more than 2 older than the
2437 * current transaction, we're done
2440 prepare_to_wait(&root->log_commit_wait[index],
2441 &wait, TASK_UNINTERRUPTIBLE);
2442 mutex_unlock(&root->log_mutex);
2444 if (root->log_transid_committed < transid &&
2445 atomic_read(&root->log_commit[index]))
2448 finish_wait(&root->log_commit_wait[index], &wait);
2449 mutex_lock(&root->log_mutex);
2450 } while (root->log_transid_committed < transid &&
2451 atomic_read(&root->log_commit[index]));
2454 static void wait_for_writer(struct btrfs_trans_handle *trans,
2455 struct btrfs_root *root)
2459 while (atomic_read(&root->log_writers)) {
2460 prepare_to_wait(&root->log_writer_wait,
2461 &wait, TASK_UNINTERRUPTIBLE);
2462 mutex_unlock(&root->log_mutex);
2463 if (atomic_read(&root->log_writers))
2465 finish_wait(&root->log_writer_wait, &wait);
2466 mutex_lock(&root->log_mutex);
2470 static inline void btrfs_remove_log_ctx(struct btrfs_root *root,
2471 struct btrfs_log_ctx *ctx)
2476 mutex_lock(&root->log_mutex);
2477 list_del_init(&ctx->list);
2478 mutex_unlock(&root->log_mutex);
2482 * Invoked in log mutex context, or be sure there is no other task which
2483 * can access the list.
2485 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
2486 int index, int error)
2488 struct btrfs_log_ctx *ctx;
2491 INIT_LIST_HEAD(&root->log_ctxs[index]);
2495 list_for_each_entry(ctx, &root->log_ctxs[index], list)
2496 ctx->log_ret = error;
2498 INIT_LIST_HEAD(&root->log_ctxs[index]);
2502 * btrfs_sync_log does sends a given tree log down to the disk and
2503 * updates the super blocks to record it. When this call is done,
2504 * you know that any inodes previously logged are safely on disk only
2507 * Any other return value means you need to call btrfs_commit_transaction.
2508 * Some of the edge cases for fsyncing directories that have had unlinks
2509 * or renames done in the past mean that sometimes the only safe
2510 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN,
2511 * that has happened.
2513 int btrfs_sync_log(struct btrfs_trans_handle *trans,
2514 struct btrfs_root *root, struct btrfs_log_ctx *ctx)
2520 struct btrfs_root *log = root->log_root;
2521 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
2522 int log_transid = 0;
2523 struct btrfs_log_ctx root_log_ctx;
2524 struct blk_plug plug;
2526 mutex_lock(&root->log_mutex);
2527 log_transid = ctx->log_transid;
2528 if (root->log_transid_committed >= log_transid) {
2529 mutex_unlock(&root->log_mutex);
2530 return ctx->log_ret;
2533 index1 = log_transid % 2;
2534 if (atomic_read(&root->log_commit[index1])) {
2535 wait_log_commit(trans, root, log_transid);
2536 mutex_unlock(&root->log_mutex);
2537 return ctx->log_ret;
2539 ASSERT(log_transid == root->log_transid);
2540 atomic_set(&root->log_commit[index1], 1);
2542 /* wait for previous tree log sync to complete */
2543 if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
2544 wait_log_commit(trans, root, log_transid - 1);
2547 int batch = atomic_read(&root->log_batch);
2548 /* when we're on an ssd, just kick the log commit out */
2549 if (!btrfs_test_opt(root, SSD) &&
2550 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
2551 mutex_unlock(&root->log_mutex);
2552 schedule_timeout_uninterruptible(1);
2553 mutex_lock(&root->log_mutex);
2555 wait_for_writer(trans, root);
2556 if (batch == atomic_read(&root->log_batch))
2560 /* bail out if we need to do a full commit */
2561 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2563 btrfs_free_logged_extents(log, log_transid);
2564 mutex_unlock(&root->log_mutex);
2568 if (log_transid % 2 == 0)
2569 mark = EXTENT_DIRTY;
2573 /* we start IO on all the marked extents here, but we don't actually
2574 * wait for them until later.
2576 blk_start_plug(&plug);
2577 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
2579 blk_finish_plug(&plug);
2580 btrfs_abort_transaction(trans, root, ret);
2581 btrfs_free_logged_extents(log, log_transid);
2582 btrfs_set_log_full_commit(root->fs_info, trans);
2583 mutex_unlock(&root->log_mutex);
2587 btrfs_set_root_node(&log->root_item, log->node);
2589 root->log_transid++;
2590 log->log_transid = root->log_transid;
2591 root->log_start_pid = 0;
2593 * IO has been started, blocks of the log tree have WRITTEN flag set
2594 * in their headers. new modifications of the log will be written to
2595 * new positions. so it's safe to allow log writers to go in.
2597 mutex_unlock(&root->log_mutex);
2599 btrfs_init_log_ctx(&root_log_ctx);
2601 mutex_lock(&log_root_tree->log_mutex);
2602 atomic_inc(&log_root_tree->log_batch);
2603 atomic_inc(&log_root_tree->log_writers);
2605 index2 = log_root_tree->log_transid % 2;
2606 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]);
2607 root_log_ctx.log_transid = log_root_tree->log_transid;
2609 mutex_unlock(&log_root_tree->log_mutex);
2611 ret = update_log_root(trans, log);
2613 mutex_lock(&log_root_tree->log_mutex);
2614 if (atomic_dec_and_test(&log_root_tree->log_writers)) {
2616 if (waitqueue_active(&log_root_tree->log_writer_wait))
2617 wake_up(&log_root_tree->log_writer_wait);
2621 if (!list_empty(&root_log_ctx.list))
2622 list_del_init(&root_log_ctx.list);
2624 blk_finish_plug(&plug);
2625 btrfs_set_log_full_commit(root->fs_info, trans);
2627 if (ret != -ENOSPC) {
2628 btrfs_abort_transaction(trans, root, ret);
2629 mutex_unlock(&log_root_tree->log_mutex);
2632 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2633 btrfs_free_logged_extents(log, log_transid);
2634 mutex_unlock(&log_root_tree->log_mutex);
2639 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) {
2640 blk_finish_plug(&plug);
2641 mutex_unlock(&log_root_tree->log_mutex);
2642 ret = root_log_ctx.log_ret;
2646 index2 = root_log_ctx.log_transid % 2;
2647 if (atomic_read(&log_root_tree->log_commit[index2])) {
2648 blk_finish_plug(&plug);
2649 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
2651 btrfs_wait_logged_extents(trans, log, log_transid);
2652 wait_log_commit(trans, log_root_tree,
2653 root_log_ctx.log_transid);
2654 mutex_unlock(&log_root_tree->log_mutex);
2656 ret = root_log_ctx.log_ret;
2659 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
2660 atomic_set(&log_root_tree->log_commit[index2], 1);
2662 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
2663 wait_log_commit(trans, log_root_tree,
2664 root_log_ctx.log_transid - 1);
2667 wait_for_writer(trans, log_root_tree);
2670 * now that we've moved on to the tree of log tree roots,
2671 * check the full commit flag again
2673 if (btrfs_need_log_full_commit(root->fs_info, trans)) {
2674 blk_finish_plug(&plug);
2675 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2676 btrfs_free_logged_extents(log, log_transid);
2677 mutex_unlock(&log_root_tree->log_mutex);
2679 goto out_wake_log_root;
2682 ret = btrfs_write_marked_extents(log_root_tree,
2683 &log_root_tree->dirty_log_pages,
2684 EXTENT_DIRTY | EXTENT_NEW);
2685 blk_finish_plug(&plug);
2687 btrfs_set_log_full_commit(root->fs_info, trans);
2688 btrfs_abort_transaction(trans, root, ret);
2689 btrfs_free_logged_extents(log, log_transid);
2690 mutex_unlock(&log_root_tree->log_mutex);
2691 goto out_wake_log_root;
2693 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2695 ret = btrfs_wait_marked_extents(log_root_tree,
2696 &log_root_tree->dirty_log_pages,
2697 EXTENT_NEW | EXTENT_DIRTY);
2699 btrfs_set_log_full_commit(root->fs_info, trans);
2700 btrfs_free_logged_extents(log, log_transid);
2701 mutex_unlock(&log_root_tree->log_mutex);
2702 goto out_wake_log_root;
2704 btrfs_wait_logged_extents(trans, log, log_transid);
2706 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2707 log_root_tree->node->start);
2708 btrfs_set_super_log_root_level(root->fs_info->super_for_commit,
2709 btrfs_header_level(log_root_tree->node));
2711 log_root_tree->log_transid++;
2712 mutex_unlock(&log_root_tree->log_mutex);
2715 * nobody else is going to jump in and write the the ctree
2716 * super here because the log_commit atomic below is protecting
2717 * us. We must be called with a transaction handle pinning
2718 * the running transaction open, so a full commit can't hop
2719 * in and cause problems either.
2721 ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
2723 btrfs_set_log_full_commit(root->fs_info, trans);
2724 btrfs_abort_transaction(trans, root, ret);
2725 goto out_wake_log_root;
2728 mutex_lock(&root->log_mutex);
2729 if (root->last_log_commit < log_transid)
2730 root->last_log_commit = log_transid;
2731 mutex_unlock(&root->log_mutex);
2735 * We needn't get log_mutex here because we are sure all
2736 * the other tasks are blocked.
2738 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
2740 mutex_lock(&log_root_tree->log_mutex);
2741 log_root_tree->log_transid_committed++;
2742 atomic_set(&log_root_tree->log_commit[index2], 0);
2743 mutex_unlock(&log_root_tree->log_mutex);
2745 if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
2746 wake_up(&log_root_tree->log_commit_wait[index2]);
2749 btrfs_remove_all_log_ctxs(root, index1, ret);
2751 mutex_lock(&root->log_mutex);
2752 root->log_transid_committed++;
2753 atomic_set(&root->log_commit[index1], 0);
2754 mutex_unlock(&root->log_mutex);
2756 if (waitqueue_active(&root->log_commit_wait[index1]))
2757 wake_up(&root->log_commit_wait[index1]);
2761 static void free_log_tree(struct btrfs_trans_handle *trans,
2762 struct btrfs_root *log)
2767 struct walk_control wc = {
2769 .process_func = process_one_buffer
2772 ret = walk_log_tree(trans, log, &wc);
2773 /* I don't think this can happen but just in case */
2775 btrfs_abort_transaction(trans, log, ret);
2778 ret = find_first_extent_bit(&log->dirty_log_pages,
2779 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW,
2784 clear_extent_bits(&log->dirty_log_pages, start, end,
2785 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
2789 * We may have short-circuited the log tree with the full commit logic
2790 * and left ordered extents on our list, so clear these out to keep us
2791 * from leaking inodes and memory.
2793 btrfs_free_logged_extents(log, 0);
2794 btrfs_free_logged_extents(log, 1);
2796 free_extent_buffer(log->node);
2801 * free all the extents used by the tree log. This should be called
2802 * at commit time of the full transaction
2804 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
2806 if (root->log_root) {
2807 free_log_tree(trans, root->log_root);
2808 root->log_root = NULL;
2813 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans,
2814 struct btrfs_fs_info *fs_info)
2816 if (fs_info->log_root_tree) {
2817 free_log_tree(trans, fs_info->log_root_tree);
2818 fs_info->log_root_tree = NULL;
2824 * If both a file and directory are logged, and unlinks or renames are
2825 * mixed in, we have a few interesting corners:
2827 * create file X in dir Y
2828 * link file X to X.link in dir Y
2830 * unlink file X but leave X.link
2833 * After a crash we would expect only X.link to exist. But file X
2834 * didn't get fsync'd again so the log has back refs for X and X.link.
2836 * We solve this by removing directory entries and inode backrefs from the
2837 * log when a file that was logged in the current transaction is
2838 * unlinked. Any later fsync will include the updated log entries, and
2839 * we'll be able to reconstruct the proper directory items from backrefs.
2841 * This optimizations allows us to avoid relogging the entire inode
2842 * or the entire directory.
2844 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
2845 struct btrfs_root *root,
2846 const char *name, int name_len,
2847 struct inode *dir, u64 index)
2849 struct btrfs_root *log;
2850 struct btrfs_dir_item *di;
2851 struct btrfs_path *path;
2855 u64 dir_ino = btrfs_ino(dir);
2857 if (BTRFS_I(dir)->logged_trans < trans->transid)
2860 ret = join_running_log_trans(root);
2864 mutex_lock(&BTRFS_I(dir)->log_mutex);
2866 log = root->log_root;
2867 path = btrfs_alloc_path();
2873 di = btrfs_lookup_dir_item(trans, log, path, dir_ino,
2874 name, name_len, -1);
2880 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2881 bytes_del += name_len;
2887 btrfs_release_path(path);
2888 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino,
2889 index, name, name_len, -1);
2895 ret = btrfs_delete_one_dir_name(trans, log, path, di);
2896 bytes_del += name_len;
2903 /* update the directory size in the log to reflect the names
2907 struct btrfs_key key;
2909 key.objectid = dir_ino;
2911 key.type = BTRFS_INODE_ITEM_KEY;
2912 btrfs_release_path(path);
2914 ret = btrfs_search_slot(trans, log, &key, path, 0, 1);
2920 struct btrfs_inode_item *item;
2923 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2924 struct btrfs_inode_item);
2925 i_size = btrfs_inode_size(path->nodes[0], item);
2926 if (i_size > bytes_del)
2927 i_size -= bytes_del;
2930 btrfs_set_inode_size(path->nodes[0], item, i_size);
2931 btrfs_mark_buffer_dirty(path->nodes[0]);
2934 btrfs_release_path(path);
2937 btrfs_free_path(path);
2939 mutex_unlock(&BTRFS_I(dir)->log_mutex);
2940 if (ret == -ENOSPC) {
2941 btrfs_set_log_full_commit(root->fs_info, trans);
2944 btrfs_abort_transaction(trans, root, ret);
2946 btrfs_end_log_trans(root);
2951 /* see comments for btrfs_del_dir_entries_in_log */
2952 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
2953 struct btrfs_root *root,
2954 const char *name, int name_len,
2955 struct inode *inode, u64 dirid)
2957 struct btrfs_root *log;
2961 if (BTRFS_I(inode)->logged_trans < trans->transid)
2964 ret = join_running_log_trans(root);
2967 log = root->log_root;
2968 mutex_lock(&BTRFS_I(inode)->log_mutex);
2970 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode),
2972 mutex_unlock(&BTRFS_I(inode)->log_mutex);
2973 if (ret == -ENOSPC) {
2974 btrfs_set_log_full_commit(root->fs_info, trans);
2976 } else if (ret < 0 && ret != -ENOENT)
2977 btrfs_abort_transaction(trans, root, ret);
2978 btrfs_end_log_trans(root);
2984 * creates a range item in the log for 'dirid'. first_offset and
2985 * last_offset tell us which parts of the key space the log should
2986 * be considered authoritative for.
2988 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans,
2989 struct btrfs_root *log,
2990 struct btrfs_path *path,
2991 int key_type, u64 dirid,
2992 u64 first_offset, u64 last_offset)
2995 struct btrfs_key key;
2996 struct btrfs_dir_log_item *item;
2998 key.objectid = dirid;
2999 key.offset = first_offset;
3000 if (key_type == BTRFS_DIR_ITEM_KEY)
3001 key.type = BTRFS_DIR_LOG_ITEM_KEY;
3003 key.type = BTRFS_DIR_LOG_INDEX_KEY;
3004 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item));
3008 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3009 struct btrfs_dir_log_item);
3010 btrfs_set_dir_log_end(path->nodes[0], item, last_offset);
3011 btrfs_mark_buffer_dirty(path->nodes[0]);
3012 btrfs_release_path(path);
3017 * log all the items included in the current transaction for a given
3018 * directory. This also creates the range items in the log tree required
3019 * to replay anything deleted before the fsync
3021 static noinline int log_dir_items(struct btrfs_trans_handle *trans,
3022 struct btrfs_root *root, struct inode *inode,
3023 struct btrfs_path *path,
3024 struct btrfs_path *dst_path, int key_type,
3025 u64 min_offset, u64 *last_offset_ret)
3027 struct btrfs_key min_key;
3028 struct btrfs_root *log = root->log_root;
3029 struct extent_buffer *src;
3034 u64 first_offset = min_offset;
3035 u64 last_offset = (u64)-1;
3036 u64 ino = btrfs_ino(inode);
3038 log = root->log_root;
3040 min_key.objectid = ino;
3041 min_key.type = key_type;
3042 min_key.offset = min_offset;
3044 ret = btrfs_search_forward(root, &min_key, path, trans->transid);
3047 * we didn't find anything from this transaction, see if there
3048 * is anything at all
3050 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) {
3051 min_key.objectid = ino;
3052 min_key.type = key_type;
3053 min_key.offset = (u64)-1;
3054 btrfs_release_path(path);
3055 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3057 btrfs_release_path(path);
3060 ret = btrfs_previous_item(root, path, ino, key_type);
3062 /* if ret == 0 there are items for this type,
3063 * create a range to tell us the last key of this type.
3064 * otherwise, there are no items in this directory after
3065 * *min_offset, and we create a range to indicate that.
3068 struct btrfs_key tmp;
3069 btrfs_item_key_to_cpu(path->nodes[0], &tmp,
3071 if (key_type == tmp.type)
3072 first_offset = max(min_offset, tmp.offset) + 1;
3077 /* go backward to find any previous key */
3078 ret = btrfs_previous_item(root, path, ino, key_type);
3080 struct btrfs_key tmp;
3081 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3082 if (key_type == tmp.type) {
3083 first_offset = tmp.offset;
3084 ret = overwrite_item(trans, log, dst_path,
3085 path->nodes[0], path->slots[0],
3093 btrfs_release_path(path);
3095 /* find the first key from this transaction again */
3096 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
3097 if (WARN_ON(ret != 0))
3101 * we have a block from this transaction, log every item in it
3102 * from our directory
3105 struct btrfs_key tmp;
3106 src = path->nodes[0];
3107 nritems = btrfs_header_nritems(src);
3108 for (i = path->slots[0]; i < nritems; i++) {
3109 btrfs_item_key_to_cpu(src, &min_key, i);
3111 if (min_key.objectid != ino || min_key.type != key_type)
3113 ret = overwrite_item(trans, log, dst_path, src, i,
3120 path->slots[0] = nritems;
3123 * look ahead to the next item and see if it is also
3124 * from this directory and from this transaction
3126 ret = btrfs_next_leaf(root, path);
3128 last_offset = (u64)-1;
3131 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]);
3132 if (tmp.objectid != ino || tmp.type != key_type) {
3133 last_offset = (u64)-1;
3136 if (btrfs_header_generation(path->nodes[0]) != trans->transid) {
3137 ret = overwrite_item(trans, log, dst_path,
3138 path->nodes[0], path->slots[0],
3143 last_offset = tmp.offset;
3148 btrfs_release_path(path);
3149 btrfs_release_path(dst_path);
3152 *last_offset_ret = last_offset;
3154 * insert the log range keys to indicate where the log
3157 ret = insert_dir_log_key(trans, log, path, key_type,
3158 ino, first_offset, last_offset);
3166 * logging directories is very similar to logging inodes, We find all the items
3167 * from the current transaction and write them to the log.
3169 * The recovery code scans the directory in the subvolume, and if it finds a
3170 * key in the range logged that is not present in the log tree, then it means
3171 * that dir entry was unlinked during the transaction.
3173 * In order for that scan to work, we must include one key smaller than
3174 * the smallest logged by this transaction and one key larger than the largest
3175 * key logged by this transaction.
3177 static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
3178 struct btrfs_root *root, struct inode *inode,
3179 struct btrfs_path *path,
3180 struct btrfs_path *dst_path)
3185 int key_type = BTRFS_DIR_ITEM_KEY;
3191 ret = log_dir_items(trans, root, inode, path,
3192 dst_path, key_type, min_key,
3196 if (max_key == (u64)-1)
3198 min_key = max_key + 1;
3201 if (key_type == BTRFS_DIR_ITEM_KEY) {
3202 key_type = BTRFS_DIR_INDEX_KEY;
3209 * a helper function to drop items from the log before we relog an
3210 * inode. max_key_type indicates the highest item type to remove.
3211 * This cannot be run for file data extents because it does not
3212 * free the extents they point to.
3214 static int drop_objectid_items(struct btrfs_trans_handle *trans,
3215 struct btrfs_root *log,
3216 struct btrfs_path *path,
3217 u64 objectid, int max_key_type)
3220 struct btrfs_key key;
3221 struct btrfs_key found_key;
3224 key.objectid = objectid;
3225 key.type = max_key_type;
3226 key.offset = (u64)-1;
3229 ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
3230 BUG_ON(ret == 0); /* Logic error */
3234 if (path->slots[0] == 0)
3238 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
3241 if (found_key.objectid != objectid)
3244 found_key.offset = 0;
3246 ret = btrfs_bin_search(path->nodes[0], &found_key, 0,
3249 ret = btrfs_del_items(trans, log, path, start_slot,
3250 path->slots[0] - start_slot + 1);
3252 * If start slot isn't 0 then we don't need to re-search, we've
3253 * found the last guy with the objectid in this tree.
3255 if (ret || start_slot != 0)
3257 btrfs_release_path(path);
3259 btrfs_release_path(path);
3265 static void fill_inode_item(struct btrfs_trans_handle *trans,
3266 struct extent_buffer *leaf,
3267 struct btrfs_inode_item *item,
3268 struct inode *inode, int log_inode_only,
3271 struct btrfs_map_token token;
3273 btrfs_init_map_token(&token);
3275 if (log_inode_only) {
3276 /* set the generation to zero so the recover code
3277 * can tell the difference between an logging
3278 * just to say 'this inode exists' and a logging
3279 * to say 'update this inode with these values'
3281 btrfs_set_token_inode_generation(leaf, item, 0, &token);
3282 btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
3284 btrfs_set_token_inode_generation(leaf, item,
3285 BTRFS_I(inode)->generation,
3287 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token);
3290 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token);
3291 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token);
3292 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
3293 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
3295 btrfs_set_token_timespec_sec(leaf, &item->atime,
3296 inode->i_atime.tv_sec, &token);
3297 btrfs_set_token_timespec_nsec(leaf, &item->atime,
3298 inode->i_atime.tv_nsec, &token);
3300 btrfs_set_token_timespec_sec(leaf, &item->mtime,
3301 inode->i_mtime.tv_sec, &token);
3302 btrfs_set_token_timespec_nsec(leaf, &item->mtime,
3303 inode->i_mtime.tv_nsec, &token);
3305 btrfs_set_token_timespec_sec(leaf, &item->ctime,
3306 inode->i_ctime.tv_sec, &token);
3307 btrfs_set_token_timespec_nsec(leaf, &item->ctime,
3308 inode->i_ctime.tv_nsec, &token);
3310 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
3313 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token);
3314 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token);
3315 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token);
3316 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token);
3317 btrfs_set_token_inode_block_group(leaf, item, 0, &token);
3320 static int log_inode_item(struct btrfs_trans_handle *trans,
3321 struct btrfs_root *log, struct btrfs_path *path,
3322 struct inode *inode)
3324 struct btrfs_inode_item *inode_item;
3327 ret = btrfs_insert_empty_item(trans, log, path,
3328 &BTRFS_I(inode)->location,
3329 sizeof(*inode_item));
3330 if (ret && ret != -EEXIST)
3332 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3333 struct btrfs_inode_item);
3334 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
3335 btrfs_release_path(path);
3339 static noinline int copy_items(struct btrfs_trans_handle *trans,
3340 struct inode *inode,
3341 struct btrfs_path *dst_path,
3342 struct btrfs_path *src_path, u64 *last_extent,
3343 int start_slot, int nr, int inode_only,
3346 unsigned long src_offset;
3347 unsigned long dst_offset;
3348 struct btrfs_root *log = BTRFS_I(inode)->root->log_root;
3349 struct btrfs_file_extent_item *extent;
3350 struct btrfs_inode_item *inode_item;
3351 struct extent_buffer *src = src_path->nodes[0];
3352 struct btrfs_key first_key, last_key, key;
3354 struct btrfs_key *ins_keys;
3358 struct list_head ordered_sums;
3359 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3360 bool has_extents = false;
3361 bool need_find_last_extent = true;
3364 INIT_LIST_HEAD(&ordered_sums);
3366 ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
3367 nr * sizeof(u32), GFP_NOFS);
3371 first_key.objectid = (u64)-1;
3373 ins_sizes = (u32 *)ins_data;
3374 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
3376 for (i = 0; i < nr; i++) {
3377 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot);
3378 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot);
3380 ret = btrfs_insert_empty_items(trans, log, dst_path,
3381 ins_keys, ins_sizes, nr);
3387 for (i = 0; i < nr; i++, dst_path->slots[0]++) {
3388 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
3389 dst_path->slots[0]);
3391 src_offset = btrfs_item_ptr_offset(src, start_slot + i);
3393 if ((i == (nr - 1)))
3394 last_key = ins_keys[i];
3396 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) {
3397 inode_item = btrfs_item_ptr(dst_path->nodes[0],
3399 struct btrfs_inode_item);
3400 fill_inode_item(trans, dst_path->nodes[0], inode_item,
3401 inode, inode_only == LOG_INODE_EXISTS,
3404 copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
3405 src_offset, ins_sizes[i]);
3409 * We set need_find_last_extent here in case we know we were
3410 * processing other items and then walk into the first extent in
3411 * the inode. If we don't hit an extent then nothing changes,
3412 * we'll do the last search the next time around.
3414 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) {
3416 if (first_key.objectid == (u64)-1)
3417 first_key = ins_keys[i];
3419 need_find_last_extent = false;
3422 /* take a reference on file data extents so that truncates
3423 * or deletes of this inode don't have to relog the inode
3426 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY &&
3429 extent = btrfs_item_ptr(src, start_slot + i,
3430 struct btrfs_file_extent_item);
3432 if (btrfs_file_extent_generation(src, extent) < trans->transid)
3435 found_type = btrfs_file_extent_type(src, extent);
3436 if (found_type == BTRFS_FILE_EXTENT_REG) {
3438 ds = btrfs_file_extent_disk_bytenr(src,
3440 /* ds == 0 is a hole */
3444 dl = btrfs_file_extent_disk_num_bytes(src,
3446 cs = btrfs_file_extent_offset(src, extent);
3447 cl = btrfs_file_extent_num_bytes(src,
3449 if (btrfs_file_extent_compression(src,
3455 ret = btrfs_lookup_csums_range(
3456 log->fs_info->csum_root,
3457 ds + cs, ds + cs + cl - 1,
3460 btrfs_release_path(dst_path);
3468 btrfs_mark_buffer_dirty(dst_path->nodes[0]);
3469 btrfs_release_path(dst_path);
3473 * we have to do this after the loop above to avoid changing the
3474 * log tree while trying to change the log tree.
3477 while (!list_empty(&ordered_sums)) {
3478 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3479 struct btrfs_ordered_sum,
3482 ret = btrfs_csum_file_blocks(trans, log, sums);
3483 list_del(&sums->list);
3490 if (need_find_last_extent && *last_extent == first_key.offset) {
3492 * We don't have any leafs between our current one and the one
3493 * we processed before that can have file extent items for our
3494 * inode (and have a generation number smaller than our current
3497 need_find_last_extent = false;
3501 * Because we use btrfs_search_forward we could skip leaves that were
3502 * not modified and then assume *last_extent is valid when it really
3503 * isn't. So back up to the previous leaf and read the end of the last
3504 * extent before we go and fill in holes.
3506 if (need_find_last_extent) {
3509 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path);
3514 if (src_path->slots[0])
3515 src_path->slots[0]--;
3516 src = src_path->nodes[0];
3517 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]);
3518 if (key.objectid != btrfs_ino(inode) ||
3519 key.type != BTRFS_EXTENT_DATA_KEY)
3521 extent = btrfs_item_ptr(src, src_path->slots[0],
3522 struct btrfs_file_extent_item);
3523 if (btrfs_file_extent_type(src, extent) ==
3524 BTRFS_FILE_EXTENT_INLINE) {
3525 len = btrfs_file_extent_inline_len(src,
3528 *last_extent = ALIGN(key.offset + len,
3531 len = btrfs_file_extent_num_bytes(src, extent);
3532 *last_extent = key.offset + len;
3536 /* So we did prev_leaf, now we need to move to the next leaf, but a few
3537 * things could have happened
3539 * 1) A merge could have happened, so we could currently be on a leaf
3540 * that holds what we were copying in the first place.
3541 * 2) A split could have happened, and now not all of the items we want
3542 * are on the same leaf.
3544 * So we need to adjust how we search for holes, we need to drop the
3545 * path and re-search for the first extent key we found, and then walk
3546 * forward until we hit the last one we copied.
3548 if (need_find_last_extent) {
3549 /* btrfs_prev_leaf could return 1 without releasing the path */
3550 btrfs_release_path(src_path);
3551 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key,
3556 src = src_path->nodes[0];
3557 i = src_path->slots[0];
3563 * Ok so here we need to go through and fill in any holes we may have
3564 * to make sure that holes are punched for those areas in case they had
3565 * extents previously.
3571 if (i >= btrfs_header_nritems(src_path->nodes[0])) {
3572 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path);
3576 src = src_path->nodes[0];
3580 btrfs_item_key_to_cpu(src, &key, i);
3581 if (!btrfs_comp_cpu_keys(&key, &last_key))
3583 if (key.objectid != btrfs_ino(inode) ||
3584 key.type != BTRFS_EXTENT_DATA_KEY) {
3588 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item);
3589 if (btrfs_file_extent_type(src, extent) ==
3590 BTRFS_FILE_EXTENT_INLINE) {
3591 len = btrfs_file_extent_inline_len(src, i, extent);
3592 extent_end = ALIGN(key.offset + len, log->sectorsize);
3594 len = btrfs_file_extent_num_bytes(src, extent);
3595 extent_end = key.offset + len;
3599 if (*last_extent == key.offset) {
3600 *last_extent = extent_end;
3603 offset = *last_extent;
3604 len = key.offset - *last_extent;
3605 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode),
3606 offset, 0, 0, len, 0, len, 0,
3610 *last_extent = extent_end;
3613 * Need to let the callers know we dropped the path so they should
3616 if (!ret && need_find_last_extent)
3621 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b)
3623 struct extent_map *em1, *em2;
3625 em1 = list_entry(a, struct extent_map, list);
3626 em2 = list_entry(b, struct extent_map, list);
3628 if (em1->start < em2->start)
3630 else if (em1->start > em2->start)
3635 static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3636 struct inode *inode,
3637 struct btrfs_root *root,
3638 const struct extent_map *em,
3639 const struct list_head *logged_list,
3640 bool *ordered_io_error)
3642 struct btrfs_ordered_extent *ordered;
3643 struct btrfs_root *log = root->log_root;
3644 u64 mod_start = em->mod_start;
3645 u64 mod_len = em->mod_len;
3646 const bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
3649 LIST_HEAD(ordered_sums);
3652 *ordered_io_error = false;
3654 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) ||
3655 em->block_start == EXTENT_MAP_HOLE)
3659 * Wait far any ordered extent that covers our extent map. If it
3660 * finishes without an error, first check and see if our csums are on
3661 * our outstanding ordered extents.
3663 list_for_each_entry(ordered, logged_list, log_list) {
3664 struct btrfs_ordered_sum *sum;
3669 if (ordered->file_offset + ordered->len <= mod_start ||
3670 mod_start + mod_len <= ordered->file_offset)
3673 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
3674 !test_bit(BTRFS_ORDERED_IOERR, &ordered->flags) &&
3675 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
3676 const u64 start = ordered->file_offset;
3677 const u64 end = ordered->file_offset + ordered->len - 1;
3679 WARN_ON(ordered->inode != inode);
3680 filemap_fdatawrite_range(inode->i_mapping, start, end);
3683 wait_event(ordered->wait,
3684 (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) ||
3685 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
3687 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
3689 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3690 * i_mapping flags, so that the next fsync won't get
3691 * an outdated io error too.
3693 btrfs_inode_check_errors(inode);
3694 *ordered_io_error = true;
3698 * We are going to copy all the csums on this ordered extent, so
3699 * go ahead and adjust mod_start and mod_len in case this
3700 * ordered extent has already been logged.
3702 if (ordered->file_offset > mod_start) {
3703 if (ordered->file_offset + ordered->len >=
3704 mod_start + mod_len)
3705 mod_len = ordered->file_offset - mod_start;
3707 * If we have this case
3709 * |--------- logged extent ---------|
3710 * |----- ordered extent ----|
3712 * Just don't mess with mod_start and mod_len, we'll
3713 * just end up logging more csums than we need and it
3717 if (ordered->file_offset + ordered->len <
3718 mod_start + mod_len) {
3719 mod_len = (mod_start + mod_len) -
3720 (ordered->file_offset + ordered->len);
3721 mod_start = ordered->file_offset +
3732 * To keep us from looping for the above case of an ordered
3733 * extent that falls inside of the logged extent.
3735 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM,
3739 if (ordered->csum_bytes_left) {
3740 btrfs_start_ordered_extent(inode, ordered, 0);
3741 wait_event(ordered->wait,
3742 ordered->csum_bytes_left == 0);
3745 list_for_each_entry(sum, &ordered->list, list) {
3746 ret = btrfs_csum_file_blocks(trans, log, sum);
3752 if (*ordered_io_error || !mod_len || ret || skip_csum)
3755 if (em->compress_type) {
3757 csum_len = max(em->block_len, em->orig_block_len);
3759 csum_offset = mod_start - em->start;
3763 /* block start is already adjusted for the file extent offset. */
3764 ret = btrfs_lookup_csums_range(log->fs_info->csum_root,
3765 em->block_start + csum_offset,
3766 em->block_start + csum_offset +
3767 csum_len - 1, &ordered_sums, 0);
3771 while (!list_empty(&ordered_sums)) {
3772 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
3773 struct btrfs_ordered_sum,
3776 ret = btrfs_csum_file_blocks(trans, log, sums);
3777 list_del(&sums->list);
3784 static int log_one_extent(struct btrfs_trans_handle *trans,
3785 struct inode *inode, struct btrfs_root *root,
3786 const struct extent_map *em,
3787 struct btrfs_path *path,
3788 const struct list_head *logged_list,
3789 struct btrfs_log_ctx *ctx)
3791 struct btrfs_root *log = root->log_root;
3792 struct btrfs_file_extent_item *fi;
3793 struct extent_buffer *leaf;
3794 struct btrfs_map_token token;
3795 struct btrfs_key key;
3796 u64 extent_offset = em->start - em->orig_start;
3799 int extent_inserted = 0;
3800 bool ordered_io_err = false;
3802 ret = wait_ordered_extents(trans, inode, root, em, logged_list,
3807 if (ordered_io_err) {
3812 btrfs_init_map_token(&token);
3814 ret = __btrfs_drop_extents(trans, log, inode, path, em->start,
3815 em->start + em->len, NULL, 0, 1,
3816 sizeof(*fi), &extent_inserted);
3820 if (!extent_inserted) {
3821 key.objectid = btrfs_ino(inode);
3822 key.type = BTRFS_EXTENT_DATA_KEY;
3823 key.offset = em->start;
3825 ret = btrfs_insert_empty_item(trans, log, path, &key,
3830 leaf = path->nodes[0];
3831 fi = btrfs_item_ptr(leaf, path->slots[0],
3832 struct btrfs_file_extent_item);
3834 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
3836 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3837 btrfs_set_token_file_extent_type(leaf, fi,
3838 BTRFS_FILE_EXTENT_PREALLOC,
3841 btrfs_set_token_file_extent_type(leaf, fi,
3842 BTRFS_FILE_EXTENT_REG,
3845 block_len = max(em->block_len, em->orig_block_len);
3846 if (em->compress_type != BTRFS_COMPRESS_NONE) {
3847 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3850 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3852 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) {
3853 btrfs_set_token_file_extent_disk_bytenr(leaf, fi,
3855 extent_offset, &token);
3856 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len,
3859 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token);
3860 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0,
3864 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token);
3865 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token);
3866 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token);
3867 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type,
3869 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token);
3870 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token);
3871 btrfs_mark_buffer_dirty(leaf);
3873 btrfs_release_path(path);
3878 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans,
3879 struct btrfs_root *root,
3880 struct inode *inode,
3881 struct btrfs_path *path,
3882 struct list_head *logged_list,
3883 struct btrfs_log_ctx *ctx)
3885 struct extent_map *em, *n;
3886 struct list_head extents;
3887 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree;
3892 INIT_LIST_HEAD(&extents);
3894 write_lock(&tree->lock);
3895 test_gen = root->fs_info->last_trans_committed;
3897 list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
3898 list_del_init(&em->list);
3901 * Just an arbitrary number, this can be really CPU intensive
3902 * once we start getting a lot of extents, and really once we
3903 * have a bunch of extents we just want to commit since it will
3906 if (++num > 32768) {
3907 list_del_init(&tree->modified_extents);
3912 if (em->generation <= test_gen)
3914 /* Need a ref to keep it from getting evicted from cache */
3915 atomic_inc(&em->refs);
3916 set_bit(EXTENT_FLAG_LOGGING, &em->flags);
3917 list_add_tail(&em->list, &extents);
3921 list_sort(NULL, &extents, extent_cmp);
3924 while (!list_empty(&extents)) {
3925 em = list_entry(extents.next, struct extent_map, list);
3927 list_del_init(&em->list);
3930 * If we had an error we just need to delete everybody from our
3934 clear_em_logging(tree, em);
3935 free_extent_map(em);
3939 write_unlock(&tree->lock);
3941 ret = log_one_extent(trans, inode, root, em, path, logged_list,
3943 write_lock(&tree->lock);
3944 clear_em_logging(tree, em);
3945 free_extent_map(em);
3947 WARN_ON(!list_empty(&extents));
3948 write_unlock(&tree->lock);
3950 btrfs_release_path(path);
3954 static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
3955 struct btrfs_path *path, u64 *size_ret)
3957 struct btrfs_key key;
3960 key.objectid = btrfs_ino(inode);
3961 key.type = BTRFS_INODE_ITEM_KEY;
3964 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
3967 } else if (ret > 0) {
3968 *size_ret = i_size_read(inode);
3970 struct btrfs_inode_item *item;
3972 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3973 struct btrfs_inode_item);
3974 *size_ret = btrfs_inode_size(path->nodes[0], item);
3977 btrfs_release_path(path);
3981 /* log a single inode in the tree log.
3982 * At least one parent directory for this inode must exist in the tree
3983 * or be logged already.
3985 * Any items from this inode changed by the current transaction are copied
3986 * to the log tree. An extra reference is taken on any extents in this
3987 * file, allowing us to avoid a whole pile of corner cases around logging
3988 * blocks that have been removed from the tree.
3990 * See LOG_INODE_ALL and related defines for a description of what inode_only
3993 * This handles both files and directories.
3995 static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3996 struct btrfs_root *root, struct inode *inode,
4000 struct btrfs_log_ctx *ctx)
4002 struct btrfs_path *path;
4003 struct btrfs_path *dst_path;
4004 struct btrfs_key min_key;
4005 struct btrfs_key max_key;
4006 struct btrfs_root *log = root->log_root;
4007 struct extent_buffer *src = NULL;
4008 LIST_HEAD(logged_list);
4009 u64 last_extent = 0;
4013 int ins_start_slot = 0;
4015 bool fast_search = false;
4016 u64 ino = btrfs_ino(inode);
4017 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4018 u64 logged_isize = 0;
4020 path = btrfs_alloc_path();
4023 dst_path = btrfs_alloc_path();
4025 btrfs_free_path(path);
4029 min_key.objectid = ino;
4030 min_key.type = BTRFS_INODE_ITEM_KEY;
4033 max_key.objectid = ino;
4036 /* today the code can only do partial logging of directories */
4037 if (S_ISDIR(inode->i_mode) ||
4038 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4039 &BTRFS_I(inode)->runtime_flags) &&
4040 inode_only == LOG_INODE_EXISTS))
4041 max_key.type = BTRFS_XATTR_ITEM_KEY;
4043 max_key.type = (u8)-1;
4044 max_key.offset = (u64)-1;
4047 * Only run delayed items if we are a dir or a new file.
4048 * Otherwise commit the delayed inode only, which is needed in
4049 * order for the log replay code to mark inodes for link count
4050 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
4052 if (S_ISDIR(inode->i_mode) ||
4053 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
4054 ret = btrfs_commit_inode_delayed_items(trans, inode);
4056 ret = btrfs_commit_inode_delayed_inode(inode);
4059 btrfs_free_path(path);
4060 btrfs_free_path(dst_path);
4064 mutex_lock(&BTRFS_I(inode)->log_mutex);
4066 btrfs_get_logged_extents(inode, &logged_list, start, end);
4069 * a brute force approach to making sure we get the most uptodate
4070 * copies of everything.
4072 if (S_ISDIR(inode->i_mode)) {
4073 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
4075 if (inode_only == LOG_INODE_EXISTS) {
4076 max_key_type = BTRFS_INODE_EXTREF_KEY;
4077 max_key.type = max_key_type;
4079 ret = drop_objectid_items(trans, log, path, ino, max_key_type);
4081 if (inode_only == LOG_INODE_EXISTS) {
4083 * Make sure the new inode item we write to the log has
4084 * the same isize as the current one (if it exists).
4085 * This is necessary to prevent data loss after log
4086 * replay, and also to prevent doing a wrong expanding
4087 * truncate - for e.g. create file, write 4K into offset
4088 * 0, fsync, write 4K into offset 4096, add hard link,
4089 * fsync some other file (to sync log), power fail - if
4090 * we use the inode's current i_size, after log replay
4091 * we get a 8Kb file, with the last 4Kb extent as a hole
4092 * (zeroes), as if an expanding truncate happened,
4093 * instead of getting a file of 4Kb only.
4095 err = logged_inode_size(log, inode, path,
4100 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4101 &BTRFS_I(inode)->runtime_flags)) {
4102 if (inode_only == LOG_INODE_EXISTS) {
4103 max_key.type = BTRFS_INODE_EXTREF_KEY;
4104 ret = drop_objectid_items(trans, log, path, ino,
4107 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
4108 &BTRFS_I(inode)->runtime_flags);
4109 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4110 &BTRFS_I(inode)->runtime_flags);
4111 ret = btrfs_truncate_inode_items(trans, log,
4114 } else if (test_bit(BTRFS_INODE_COPY_EVERYTHING,
4115 &BTRFS_I(inode)->runtime_flags) ||
4116 inode_only == LOG_INODE_EXISTS) {
4117 if (inode_only == LOG_INODE_ALL) {
4118 clear_bit(BTRFS_INODE_COPY_EVERYTHING,
4119 &BTRFS_I(inode)->runtime_flags);
4121 max_key.type = BTRFS_XATTR_ITEM_KEY;
4123 max_key.type = BTRFS_INODE_EXTREF_KEY;
4125 ret = drop_objectid_items(trans, log, path, ino,
4128 if (inode_only == LOG_INODE_ALL)
4130 ret = log_inode_item(trans, log, dst_path, inode);
4146 ret = btrfs_search_forward(root, &min_key,
4147 path, trans->transid);
4151 /* note, ins_nr might be > 0 here, cleanup outside the loop */
4152 if (min_key.objectid != ino)
4154 if (min_key.type > max_key.type)
4157 src = path->nodes[0];
4158 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
4161 } else if (!ins_nr) {
4162 ins_start_slot = path->slots[0];
4167 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4168 ins_start_slot, ins_nr, inode_only,
4176 btrfs_release_path(path);
4180 ins_start_slot = path->slots[0];
4183 nritems = btrfs_header_nritems(path->nodes[0]);
4185 if (path->slots[0] < nritems) {
4186 btrfs_item_key_to_cpu(path->nodes[0], &min_key,
4191 ret = copy_items(trans, inode, dst_path, path,
4192 &last_extent, ins_start_slot,
4193 ins_nr, inode_only, logged_isize);
4201 btrfs_release_path(path);
4203 if (min_key.offset < (u64)-1) {
4205 } else if (min_key.type < max_key.type) {
4213 ret = copy_items(trans, inode, dst_path, path, &last_extent,
4214 ins_start_slot, ins_nr, inode_only,
4225 btrfs_release_path(path);
4226 btrfs_release_path(dst_path);
4229 * Some ordered extents started by fsync might have completed
4230 * before we collected the ordered extents in logged_list, which
4231 * means they're gone, not in our logged_list nor in the inode's
4232 * ordered tree. We want the application/user space to know an
4233 * error happened while attempting to persist file data so that
4234 * it can take proper action. If such error happened, we leave
4235 * without writing to the log tree and the fsync must report the
4236 * file data write error and not commit the current transaction.
4238 err = btrfs_inode_check_errors(inode);
4243 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4249 } else if (inode_only == LOG_INODE_ALL) {
4250 struct extent_map *em, *n;
4252 write_lock(&em_tree->lock);
4254 * We can't just remove every em if we're called for a ranged
4255 * fsync - that is, one that doesn't cover the whole possible
4256 * file range (0 to LLONG_MAX). This is because we can have
4257 * em's that fall outside the range we're logging and therefore
4258 * their ordered operations haven't completed yet
4259 * (btrfs_finish_ordered_io() not invoked yet). This means we
4260 * didn't get their respective file extent item in the fs/subvol
4261 * tree yet, and need to let the next fast fsync (one which
4262 * consults the list of modified extent maps) find the em so
4263 * that it logs a matching file extent item and waits for the
4264 * respective ordered operation to complete (if it's still
4267 * Removing every em outside the range we're logging would make
4268 * the next fast fsync not log their matching file extent items,
4269 * therefore making us lose data after a log replay.
4271 list_for_each_entry_safe(em, n, &em_tree->modified_extents,
4273 const u64 mod_end = em->mod_start + em->mod_len - 1;
4275 if (em->mod_start >= start && mod_end <= end)
4276 list_del_init(&em->list);
4278 write_unlock(&em_tree->lock);
4281 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
4282 ret = log_directory_changes(trans, root, inode, path, dst_path);
4289 BTRFS_I(inode)->logged_trans = trans->transid;
4290 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans;
4293 btrfs_put_logged_extents(&logged_list);
4295 btrfs_submit_logged_extents(&logged_list, log);
4296 mutex_unlock(&BTRFS_I(inode)->log_mutex);
4298 btrfs_free_path(path);
4299 btrfs_free_path(dst_path);
4304 * follow the dentry parent pointers up the chain and see if any
4305 * of the directories in it require a full commit before they can
4306 * be logged. Returns zero if nothing special needs to be done or 1 if
4307 * a full commit is required.
4309 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
4310 struct inode *inode,
4311 struct dentry *parent,
4312 struct super_block *sb,
4316 struct btrfs_root *root;
4317 struct dentry *old_parent = NULL;
4318 struct inode *orig_inode = inode;
4321 * for regular files, if its inode is already on disk, we don't
4322 * have to worry about the parents at all. This is because
4323 * we can use the last_unlink_trans field to record renames
4324 * and other fun in this file.
4326 if (S_ISREG(inode->i_mode) &&
4327 BTRFS_I(inode)->generation <= last_committed &&
4328 BTRFS_I(inode)->last_unlink_trans <= last_committed)
4331 if (!S_ISDIR(inode->i_mode)) {
4332 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4334 inode = parent->d_inode;
4339 * If we are logging a directory then we start with our inode,
4340 * not our parents inode, so we need to skipp setting the
4341 * logged_trans so that further down in the log code we don't
4342 * think this inode has already been logged.
4344 if (inode != orig_inode)
4345 BTRFS_I(inode)->logged_trans = trans->transid;
4348 if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
4349 root = BTRFS_I(inode)->root;
4352 * make sure any commits to the log are forced
4353 * to be full commits
4355 btrfs_set_log_full_commit(root->fs_info, trans);
4360 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4363 if (IS_ROOT(parent))
4366 parent = dget_parent(parent);
4368 old_parent = parent;
4369 inode = parent->d_inode;
4378 * helper function around btrfs_log_inode to make sure newly created
4379 * parent directories also end up in the log. A minimal inode and backref
4380 * only logging is done of any parent directories that are older than
4381 * the last committed transaction
4383 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
4384 struct btrfs_root *root, struct inode *inode,
4385 struct dentry *parent,
4389 struct btrfs_log_ctx *ctx)
4391 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
4392 struct super_block *sb;
4393 struct dentry *old_parent = NULL;
4395 u64 last_committed = root->fs_info->last_trans_committed;
4396 const struct dentry * const first_parent = parent;
4397 const bool did_unlink = (BTRFS_I(inode)->last_unlink_trans >
4402 if (btrfs_test_opt(root, NOTREELOG)) {
4408 * The prev transaction commit doesn't complete, we need do
4409 * full commit by ourselves.
4411 if (root->fs_info->last_trans_log_full_commit >
4412 root->fs_info->last_trans_committed) {
4417 if (root != BTRFS_I(inode)->root ||
4418 btrfs_root_refs(&root->root_item) == 0) {
4423 ret = check_parent_dirs_for_sync(trans, inode, parent,
4424 sb, last_committed);
4428 if (btrfs_inode_in_log(inode, trans->transid)) {
4429 ret = BTRFS_NO_LOG_SYNC;
4433 ret = start_log_trans(trans, root, ctx);
4437 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx);
4442 * for regular files, if its inode is already on disk, we don't
4443 * have to worry about the parents at all. This is because
4444 * we can use the last_unlink_trans field to record renames
4445 * and other fun in this file.
4447 if (S_ISREG(inode->i_mode) &&
4448 BTRFS_I(inode)->generation <= last_committed &&
4449 BTRFS_I(inode)->last_unlink_trans <= last_committed) {
4455 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
4458 inode = parent->d_inode;
4459 if (root != BTRFS_I(inode)->root)
4463 * On unlink we must make sure our immediate parent directory
4464 * inode is fully logged. This is to prevent leaving dangling
4465 * directory index entries and a wrong directory inode's i_size.
4466 * Not doing so can result in a directory being impossible to
4467 * delete after log replay (rmdir will always fail with error
4470 if (did_unlink && parent == first_parent)
4471 inode_only = LOG_INODE_ALL;
4473 inode_only = LOG_INODE_EXISTS;
4475 if (BTRFS_I(inode)->generation >
4476 root->fs_info->last_trans_committed ||
4477 inode_only == LOG_INODE_ALL) {
4478 ret = btrfs_log_inode(trans, root, inode, inode_only,
4483 if (IS_ROOT(parent))
4486 parent = dget_parent(parent);
4488 old_parent = parent;
4494 btrfs_set_log_full_commit(root->fs_info, trans);
4499 btrfs_remove_log_ctx(root, ctx);
4500 btrfs_end_log_trans(root);
4506 * it is not safe to log dentry if the chunk root has added new
4507 * chunks. This returns 0 if the dentry was logged, and 1 otherwise.
4508 * If this returns 1, you must commit the transaction to safely get your
4511 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
4512 struct btrfs_root *root, struct dentry *dentry,
4515 struct btrfs_log_ctx *ctx)
4517 struct dentry *parent = dget_parent(dentry);
4520 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent,
4521 start, end, 0, ctx);
4528 * should be called during mount to recover any replay any log trees
4531 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
4534 struct btrfs_path *path;
4535 struct btrfs_trans_handle *trans;
4536 struct btrfs_key key;
4537 struct btrfs_key found_key;
4538 struct btrfs_key tmp_key;
4539 struct btrfs_root *log;
4540 struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
4541 struct walk_control wc = {
4542 .process_func = process_one_buffer,
4546 path = btrfs_alloc_path();
4550 fs_info->log_root_recovering = 1;
4552 trans = btrfs_start_transaction(fs_info->tree_root, 0);
4553 if (IS_ERR(trans)) {
4554 ret = PTR_ERR(trans);
4561 ret = walk_log_tree(trans, log_root_tree, &wc);
4563 btrfs_error(fs_info, ret, "Failed to pin buffers while "
4564 "recovering log root tree.");
4569 key.objectid = BTRFS_TREE_LOG_OBJECTID;
4570 key.offset = (u64)-1;
4571 key.type = BTRFS_ROOT_ITEM_KEY;
4574 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
4577 btrfs_error(fs_info, ret,
4578 "Couldn't find tree log root.");
4582 if (path->slots[0] == 0)
4586 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
4588 btrfs_release_path(path);
4589 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID)
4592 log = btrfs_read_fs_root(log_root_tree, &found_key);
4595 btrfs_error(fs_info, ret,
4596 "Couldn't read tree log root.");
4600 tmp_key.objectid = found_key.offset;
4601 tmp_key.type = BTRFS_ROOT_ITEM_KEY;
4602 tmp_key.offset = (u64)-1;
4604 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
4605 if (IS_ERR(wc.replay_dest)) {
4606 ret = PTR_ERR(wc.replay_dest);
4607 free_extent_buffer(log->node);
4608 free_extent_buffer(log->commit_root);
4610 btrfs_error(fs_info, ret, "Couldn't read target root "
4611 "for tree log recovery.");
4615 wc.replay_dest->log_root = log;
4616 btrfs_record_root_in_trans(trans, wc.replay_dest);
4617 ret = walk_log_tree(trans, log, &wc);
4619 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) {
4620 ret = fixup_inode_link_counts(trans, wc.replay_dest,
4624 key.offset = found_key.offset - 1;
4625 wc.replay_dest->log_root = NULL;
4626 free_extent_buffer(log->node);
4627 free_extent_buffer(log->commit_root);
4633 if (found_key.offset == 0)
4636 btrfs_release_path(path);
4638 /* step one is to pin it all, step two is to replay just inodes */
4641 wc.process_func = replay_one_buffer;
4642 wc.stage = LOG_WALK_REPLAY_INODES;
4645 /* step three is to replay everything */
4646 if (wc.stage < LOG_WALK_REPLAY_ALL) {
4651 btrfs_free_path(path);
4653 /* step 4: commit the transaction, which also unpins the blocks */
4654 ret = btrfs_commit_transaction(trans, fs_info->tree_root);
4658 free_extent_buffer(log_root_tree->node);
4659 log_root_tree->log_root = NULL;
4660 fs_info->log_root_recovering = 0;
4661 kfree(log_root_tree);
4666 btrfs_end_transaction(wc.trans, fs_info->tree_root);
4667 btrfs_free_path(path);
4672 * there are some corner cases where we want to force a full
4673 * commit instead of allowing a directory to be logged.
4675 * They revolve around files there were unlinked from the directory, and
4676 * this function updates the parent directory so that a full commit is
4677 * properly done if it is fsync'd later after the unlinks are done.
4679 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
4680 struct inode *dir, struct inode *inode,
4684 * when we're logging a file, if it hasn't been renamed
4685 * or unlinked, and its inode is fully committed on disk,
4686 * we don't have to worry about walking up the directory chain
4687 * to log its parents.
4689 * So, we use the last_unlink_trans field to put this transid
4690 * into the file. When the file is logged we check it and
4691 * don't log the parents if the file is fully on disk.
4693 if (S_ISREG(inode->i_mode))
4694 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4697 * if this directory was already logged any new
4698 * names for this file/dir will get recorded
4701 if (BTRFS_I(dir)->logged_trans == trans->transid)
4705 * if the inode we're about to unlink was logged,
4706 * the log will be properly updated for any new names
4708 if (BTRFS_I(inode)->logged_trans == trans->transid)
4712 * when renaming files across directories, if the directory
4713 * there we're unlinking from gets fsync'd later on, there's
4714 * no way to find the destination directory later and fsync it
4715 * properly. So, we have to be conservative and force commits
4716 * so the new name gets discovered.
4721 /* we can safely do the unlink without any special recording */
4725 BTRFS_I(dir)->last_unlink_trans = trans->transid;
4729 * Call this after adding a new name for a file and it will properly
4730 * update the log to reflect the new name.
4732 * It will return zero if all goes well, and it will return 1 if a
4733 * full transaction commit is required.
4735 int btrfs_log_new_name(struct btrfs_trans_handle *trans,
4736 struct inode *inode, struct inode *old_dir,
4737 struct dentry *parent)
4739 struct btrfs_root * root = BTRFS_I(inode)->root;
4742 * this will force the logging code to walk the dentry chain
4745 if (S_ISREG(inode->i_mode))
4746 BTRFS_I(inode)->last_unlink_trans = trans->transid;
4749 * if this inode hasn't been logged and directory we're renaming it
4750 * from hasn't been logged, we don't need to log it
4752 if (BTRFS_I(inode)->logged_trans <=
4753 root->fs_info->last_trans_committed &&
4754 (!old_dir || BTRFS_I(old_dir)->logged_trans <=
4755 root->fs_info->last_trans_committed))
4758 return btrfs_log_inode_parent(trans, root, inode, parent, 0,
4759 LLONG_MAX, 1, NULL);