2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
29 #include <linux/compat.h>
36 #include "btrfs_inode.h"
37 #include "transaction.h"
38 #include "compression.h"
41 * A fs_path is a helper to dynamically build path names with unknown size.
42 * It reallocates the internal buffer on demand.
43 * It allows fast adding of path elements on the right side (normal path) and
44 * fast adding to the left side (reversed path). A reversed path can also be
45 * unreversed if needed.
54 unsigned short buf_len:15;
55 unsigned short reversed:1;
59 * Average path length does not exceed 200 bytes, we'll have
60 * better packing in the slab and higher chance to satisfy
61 * a allocation later during send.
66 #define FS_PATH_INLINE_SIZE \
67 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
70 /* reused for each extent */
72 struct btrfs_root *root;
79 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
80 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
83 struct file *send_filp;
89 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1];
90 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */
92 struct btrfs_root *send_root;
93 struct btrfs_root *parent_root;
94 struct clone_root *clone_roots;
97 /* current state of the compare_tree call */
98 struct btrfs_path *left_path;
99 struct btrfs_path *right_path;
100 struct btrfs_key *cmp_key;
103 * infos of the currently processed inode. In case of deleted inodes,
104 * these are the values from the deleted inode.
109 int cur_inode_new_gen;
110 int cur_inode_deleted;
114 u64 cur_inode_last_extent;
118 struct list_head new_refs;
119 struct list_head deleted_refs;
121 struct radix_tree_root name_cache;
122 struct list_head name_cache_list;
125 struct file_ra_state ra;
130 * We process inodes by their increasing order, so if before an
131 * incremental send we reverse the parent/child relationship of
132 * directories such that a directory with a lower inode number was
133 * the parent of a directory with a higher inode number, and the one
134 * becoming the new parent got renamed too, we can't rename/move the
135 * directory with lower inode number when we finish processing it - we
136 * must process the directory with higher inode number first, then
137 * rename/move it and then rename/move the directory with lower inode
138 * number. Example follows.
140 * Tree state when the first send was performed:
152 * Tree state when the second (incremental) send is performed:
161 * The sequence of steps that lead to the second state was:
163 * mv /a/b/c/d /a/b/c2/d2
164 * mv /a/b/c /a/b/c2/d2/cc
166 * "c" has lower inode number, but we can't move it (2nd mv operation)
167 * before we move "d", which has higher inode number.
169 * So we just memorize which move/rename operations must be performed
170 * later when their respective parent is processed and moved/renamed.
173 /* Indexed by parent directory inode number. */
174 struct rb_root pending_dir_moves;
177 * Reverse index, indexed by the inode number of a directory that
178 * is waiting for the move/rename of its immediate parent before its
179 * own move/rename can be performed.
181 struct rb_root waiting_dir_moves;
184 * A directory that is going to be rm'ed might have a child directory
185 * which is in the pending directory moves index above. In this case,
186 * the directory can only be removed after the move/rename of its child
187 * is performed. Example:
207 * Sequence of steps that lead to the send snapshot:
208 * rm -f /a/b/c/foo.txt
210 * mv /a/b/c/x /a/b/YY
213 * When the child is processed, its move/rename is delayed until its
214 * parent is processed (as explained above), but all other operations
215 * like update utimes, chown, chgrp, etc, are performed and the paths
216 * that it uses for those operations must use the orphanized name of
217 * its parent (the directory we're going to rm later), so we need to
218 * memorize that name.
220 * Indexed by the inode number of the directory to be deleted.
222 struct rb_root orphan_dirs;
225 struct pending_dir_move {
227 struct list_head list;
231 struct list_head update_refs;
234 struct waiting_dir_move {
238 * There might be some directory that could not be removed because it
239 * was waiting for this directory inode to be moved first. Therefore
240 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
246 struct orphan_dir_info {
252 struct name_cache_entry {
253 struct list_head list;
255 * radix_tree has only 32bit entries but we need to handle 64bit inums.
256 * We use the lower 32bit of the 64bit inum to store it in the tree. If
257 * more then one inum would fall into the same entry, we use radix_list
258 * to store the additional entries. radix_list is also used to store
259 * entries where two entries have the same inum but different
262 struct list_head radix_list;
268 int need_later_update;
273 static void inconsistent_snapshot_error(struct send_ctx *sctx,
274 enum btrfs_compare_tree_result result,
277 const char *result_string;
280 case BTRFS_COMPARE_TREE_NEW:
281 result_string = "new";
283 case BTRFS_COMPARE_TREE_DELETED:
284 result_string = "deleted";
286 case BTRFS_COMPARE_TREE_CHANGED:
287 result_string = "updated";
289 case BTRFS_COMPARE_TREE_SAME:
291 result_string = "unchanged";
295 result_string = "unexpected";
298 btrfs_err(sctx->send_root->fs_info,
299 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
300 result_string, what, sctx->cmp_key->objectid,
301 sctx->send_root->root_key.objectid,
303 sctx->parent_root->root_key.objectid : 0));
306 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
308 static struct waiting_dir_move *
309 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
311 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
313 static int need_send_hole(struct send_ctx *sctx)
315 return (sctx->parent_root && !sctx->cur_inode_new &&
316 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted &&
317 S_ISREG(sctx->cur_inode_mode));
320 static void fs_path_reset(struct fs_path *p)
323 p->start = p->buf + p->buf_len - 1;
333 static struct fs_path *fs_path_alloc(void)
337 p = kmalloc(sizeof(*p), GFP_KERNEL);
341 p->buf = p->inline_buf;
342 p->buf_len = FS_PATH_INLINE_SIZE;
347 static struct fs_path *fs_path_alloc_reversed(void)
359 static void fs_path_free(struct fs_path *p)
363 if (p->buf != p->inline_buf)
368 static int fs_path_len(struct fs_path *p)
370 return p->end - p->start;
373 static int fs_path_ensure_buf(struct fs_path *p, int len)
381 if (p->buf_len >= len)
384 if (len > PATH_MAX) {
389 path_len = p->end - p->start;
390 old_buf_len = p->buf_len;
393 * First time the inline_buf does not suffice
395 if (p->buf == p->inline_buf) {
396 tmp_buf = kmalloc(len, GFP_KERNEL);
398 memcpy(tmp_buf, p->buf, old_buf_len);
400 tmp_buf = krealloc(p->buf, len, GFP_KERNEL);
406 * The real size of the buffer is bigger, this will let the fast path
407 * happen most of the time
409 p->buf_len = ksize(p->buf);
412 tmp_buf = p->buf + old_buf_len - path_len - 1;
413 p->end = p->buf + p->buf_len - 1;
414 p->start = p->end - path_len;
415 memmove(p->start, tmp_buf, path_len + 1);
418 p->end = p->start + path_len;
423 static int fs_path_prepare_for_add(struct fs_path *p, int name_len,
429 new_len = p->end - p->start + name_len;
430 if (p->start != p->end)
432 ret = fs_path_ensure_buf(p, new_len);
437 if (p->start != p->end)
439 p->start -= name_len;
440 *prepared = p->start;
442 if (p->start != p->end)
453 static int fs_path_add(struct fs_path *p, const char *name, int name_len)
458 ret = fs_path_prepare_for_add(p, name_len, &prepared);
461 memcpy(prepared, name, name_len);
467 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2)
472 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared);
475 memcpy(prepared, p2->start, p2->end - p2->start);
481 static int fs_path_add_from_extent_buffer(struct fs_path *p,
482 struct extent_buffer *eb,
483 unsigned long off, int len)
488 ret = fs_path_prepare_for_add(p, len, &prepared);
492 read_extent_buffer(eb, prepared, off, len);
498 static int fs_path_copy(struct fs_path *p, struct fs_path *from)
502 p->reversed = from->reversed;
505 ret = fs_path_add_path(p, from);
511 static void fs_path_unreverse(struct fs_path *p)
520 len = p->end - p->start;
522 p->end = p->start + len;
523 memmove(p->start, tmp, len + 1);
527 static struct btrfs_path *alloc_path_for_send(void)
529 struct btrfs_path *path;
531 path = btrfs_alloc_path();
534 path->search_commit_root = 1;
535 path->skip_locking = 1;
536 path->need_commit_sem = 1;
540 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off)
546 ret = kernel_write(filp, buf + pos, len - pos, off);
547 /* TODO handle that correctly */
548 /*if (ret == -ERESTARTSYS) {
562 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len)
564 struct btrfs_tlv_header *hdr;
565 int total_len = sizeof(*hdr) + len;
566 int left = sctx->send_max_size - sctx->send_size;
568 if (unlikely(left < total_len))
571 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size);
572 hdr->tlv_type = cpu_to_le16(attr);
573 hdr->tlv_len = cpu_to_le16(len);
574 memcpy(hdr + 1, data, len);
575 sctx->send_size += total_len;
580 #define TLV_PUT_DEFINE_INT(bits) \
581 static int tlv_put_u##bits(struct send_ctx *sctx, \
582 u##bits attr, u##bits value) \
584 __le##bits __tmp = cpu_to_le##bits(value); \
585 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
588 TLV_PUT_DEFINE_INT(64)
590 static int tlv_put_string(struct send_ctx *sctx, u16 attr,
591 const char *str, int len)
595 return tlv_put(sctx, attr, str, len);
598 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr,
601 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE);
604 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr,
605 struct extent_buffer *eb,
606 struct btrfs_timespec *ts)
608 struct btrfs_timespec bts;
609 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts));
610 return tlv_put(sctx, attr, &bts, sizeof(bts));
614 #define TLV_PUT(sctx, attrtype, attrlen, data) \
616 ret = tlv_put(sctx, attrtype, attrlen, data); \
618 goto tlv_put_failure; \
621 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
623 ret = tlv_put_u##bits(sctx, attrtype, value); \
625 goto tlv_put_failure; \
628 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
629 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
630 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
631 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
632 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
634 ret = tlv_put_string(sctx, attrtype, str, len); \
636 goto tlv_put_failure; \
638 #define TLV_PUT_PATH(sctx, attrtype, p) \
640 ret = tlv_put_string(sctx, attrtype, p->start, \
641 p->end - p->start); \
643 goto tlv_put_failure; \
645 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
647 ret = tlv_put_uuid(sctx, attrtype, uuid); \
649 goto tlv_put_failure; \
651 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
653 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
655 goto tlv_put_failure; \
658 static int send_header(struct send_ctx *sctx)
660 struct btrfs_stream_header hdr;
662 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC);
663 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION);
665 return write_buf(sctx->send_filp, &hdr, sizeof(hdr),
670 * For each command/item we want to send to userspace, we call this function.
672 static int begin_cmd(struct send_ctx *sctx, int cmd)
674 struct btrfs_cmd_header *hdr;
676 if (WARN_ON(!sctx->send_buf))
679 BUG_ON(sctx->send_size);
681 sctx->send_size += sizeof(*hdr);
682 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
683 hdr->cmd = cpu_to_le16(cmd);
688 static int send_cmd(struct send_ctx *sctx)
691 struct btrfs_cmd_header *hdr;
694 hdr = (struct btrfs_cmd_header *)sctx->send_buf;
695 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
698 crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
699 hdr->crc = cpu_to_le32(crc);
701 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
704 sctx->total_send_size += sctx->send_size;
705 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size;
712 * Sends a move instruction to user space
714 static int send_rename(struct send_ctx *sctx,
715 struct fs_path *from, struct fs_path *to)
717 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
720 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start);
722 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME);
726 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from);
727 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to);
729 ret = send_cmd(sctx);
737 * Sends a link instruction to user space
739 static int send_link(struct send_ctx *sctx,
740 struct fs_path *path, struct fs_path *lnk)
742 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
745 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start);
747 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK);
751 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
752 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk);
754 ret = send_cmd(sctx);
762 * Sends an unlink instruction to user space
764 static int send_unlink(struct send_ctx *sctx, struct fs_path *path)
766 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
769 btrfs_debug(fs_info, "send_unlink %s", path->start);
771 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK);
775 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
777 ret = send_cmd(sctx);
785 * Sends a rmdir instruction to user space
787 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path)
789 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
792 btrfs_debug(fs_info, "send_rmdir %s", path->start);
794 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR);
798 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
800 ret = send_cmd(sctx);
808 * Helper function to retrieve some fields from an inode item.
810 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path,
811 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid,
815 struct btrfs_inode_item *ii;
816 struct btrfs_key key;
819 key.type = BTRFS_INODE_ITEM_KEY;
821 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
828 ii = btrfs_item_ptr(path->nodes[0], path->slots[0],
829 struct btrfs_inode_item);
831 *size = btrfs_inode_size(path->nodes[0], ii);
833 *gen = btrfs_inode_generation(path->nodes[0], ii);
835 *mode = btrfs_inode_mode(path->nodes[0], ii);
837 *uid = btrfs_inode_uid(path->nodes[0], ii);
839 *gid = btrfs_inode_gid(path->nodes[0], ii);
841 *rdev = btrfs_inode_rdev(path->nodes[0], ii);
846 static int get_inode_info(struct btrfs_root *root,
847 u64 ino, u64 *size, u64 *gen,
848 u64 *mode, u64 *uid, u64 *gid,
851 struct btrfs_path *path;
854 path = alloc_path_for_send();
857 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid,
859 btrfs_free_path(path);
863 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
868 * Helper function to iterate the entries in ONE btrfs_inode_ref or
869 * btrfs_inode_extref.
870 * The iterate callback may return a non zero value to stop iteration. This can
871 * be a negative value for error codes or 1 to simply stop it.
873 * path must point to the INODE_REF or INODE_EXTREF when called.
875 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path,
876 struct btrfs_key *found_key, int resolve,
877 iterate_inode_ref_t iterate, void *ctx)
879 struct extent_buffer *eb = path->nodes[0];
880 struct btrfs_item *item;
881 struct btrfs_inode_ref *iref;
882 struct btrfs_inode_extref *extref;
883 struct btrfs_path *tmp_path;
887 int slot = path->slots[0];
894 unsigned long name_off;
895 unsigned long elem_size;
898 p = fs_path_alloc_reversed();
902 tmp_path = alloc_path_for_send();
909 if (found_key->type == BTRFS_INODE_REF_KEY) {
910 ptr = (unsigned long)btrfs_item_ptr(eb, slot,
911 struct btrfs_inode_ref);
912 item = btrfs_item_nr(slot);
913 total = btrfs_item_size(eb, item);
914 elem_size = sizeof(*iref);
916 ptr = btrfs_item_ptr_offset(eb, slot);
917 total = btrfs_item_size_nr(eb, slot);
918 elem_size = sizeof(*extref);
921 while (cur < total) {
924 if (found_key->type == BTRFS_INODE_REF_KEY) {
925 iref = (struct btrfs_inode_ref *)(ptr + cur);
926 name_len = btrfs_inode_ref_name_len(eb, iref);
927 name_off = (unsigned long)(iref + 1);
928 index = btrfs_inode_ref_index(eb, iref);
929 dir = found_key->offset;
931 extref = (struct btrfs_inode_extref *)(ptr + cur);
932 name_len = btrfs_inode_extref_name_len(eb, extref);
933 name_off = (unsigned long)&extref->name;
934 index = btrfs_inode_extref_index(eb, extref);
935 dir = btrfs_inode_extref_parent(eb, extref);
939 start = btrfs_ref_to_path(root, tmp_path, name_len,
943 ret = PTR_ERR(start);
946 if (start < p->buf) {
947 /* overflow , try again with larger buffer */
948 ret = fs_path_ensure_buf(p,
949 p->buf_len + p->buf - start);
952 start = btrfs_ref_to_path(root, tmp_path,
957 ret = PTR_ERR(start);
960 BUG_ON(start < p->buf);
964 ret = fs_path_add_from_extent_buffer(p, eb, name_off,
970 cur += elem_size + name_len;
971 ret = iterate(num, dir, index, p, ctx);
978 btrfs_free_path(tmp_path);
983 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key,
984 const char *name, int name_len,
985 const char *data, int data_len,
989 * Helper function to iterate the entries in ONE btrfs_dir_item.
990 * The iterate callback may return a non zero value to stop iteration. This can
991 * be a negative value for error codes or 1 to simply stop it.
993 * path must point to the dir item when called.
995 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path,
996 iterate_dir_item_t iterate, void *ctx)
999 struct extent_buffer *eb;
1000 struct btrfs_item *item;
1001 struct btrfs_dir_item *di;
1002 struct btrfs_key di_key;
1015 * Start with a small buffer (1 page). If later we end up needing more
1016 * space, which can happen for xattrs on a fs with a leaf size greater
1017 * then the page size, attempt to increase the buffer. Typically xattr
1021 buf = kmalloc(buf_len, GFP_KERNEL);
1027 eb = path->nodes[0];
1028 slot = path->slots[0];
1029 item = btrfs_item_nr(slot);
1030 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
1033 total = btrfs_item_size(eb, item);
1036 while (cur < total) {
1037 name_len = btrfs_dir_name_len(eb, di);
1038 data_len = btrfs_dir_data_len(eb, di);
1039 type = btrfs_dir_type(eb, di);
1040 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
1042 if (type == BTRFS_FT_XATTR) {
1043 if (name_len > XATTR_NAME_MAX) {
1044 ret = -ENAMETOOLONG;
1047 if (name_len + data_len >
1048 BTRFS_MAX_XATTR_SIZE(root->fs_info)) {
1056 if (name_len + data_len > PATH_MAX) {
1057 ret = -ENAMETOOLONG;
1062 ret = btrfs_is_name_len_valid(eb, path->slots[0],
1063 (unsigned long)(di + 1), name_len + data_len);
1068 if (name_len + data_len > buf_len) {
1069 buf_len = name_len + data_len;
1070 if (is_vmalloc_addr(buf)) {
1074 char *tmp = krealloc(buf, buf_len,
1075 GFP_KERNEL | __GFP_NOWARN);
1082 buf = kvmalloc(buf_len, GFP_KERNEL);
1090 read_extent_buffer(eb, buf, (unsigned long)(di + 1),
1091 name_len + data_len);
1093 len = sizeof(*di) + name_len + data_len;
1094 di = (struct btrfs_dir_item *)((char *)di + len);
1097 ret = iterate(num, &di_key, buf, name_len, buf + name_len,
1098 data_len, type, ctx);
1114 static int __copy_first_ref(int num, u64 dir, int index,
1115 struct fs_path *p, void *ctx)
1118 struct fs_path *pt = ctx;
1120 ret = fs_path_copy(pt, p);
1124 /* we want the first only */
1129 * Retrieve the first path of an inode. If an inode has more then one
1130 * ref/hardlink, this is ignored.
1132 static int get_inode_path(struct btrfs_root *root,
1133 u64 ino, struct fs_path *path)
1136 struct btrfs_key key, found_key;
1137 struct btrfs_path *p;
1139 p = alloc_path_for_send();
1143 fs_path_reset(path);
1146 key.type = BTRFS_INODE_REF_KEY;
1149 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0);
1156 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
1157 if (found_key.objectid != ino ||
1158 (found_key.type != BTRFS_INODE_REF_KEY &&
1159 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1164 ret = iterate_inode_ref(root, p, &found_key, 1,
1165 __copy_first_ref, path);
1175 struct backref_ctx {
1176 struct send_ctx *sctx;
1178 struct btrfs_path *path;
1179 /* number of total found references */
1183 * used for clones found in send_root. clones found behind cur_objectid
1184 * and cur_offset are not considered as allowed clones.
1189 /* may be truncated in case it's the last extent in a file */
1192 /* data offset in the file extent item */
1195 /* Just to check for bugs in backref resolving */
1199 static int __clone_root_cmp_bsearch(const void *key, const void *elt)
1201 u64 root = (u64)(uintptr_t)key;
1202 struct clone_root *cr = (struct clone_root *)elt;
1204 if (root < cr->root->objectid)
1206 if (root > cr->root->objectid)
1211 static int __clone_root_cmp_sort(const void *e1, const void *e2)
1213 struct clone_root *cr1 = (struct clone_root *)e1;
1214 struct clone_root *cr2 = (struct clone_root *)e2;
1216 if (cr1->root->objectid < cr2->root->objectid)
1218 if (cr1->root->objectid > cr2->root->objectid)
1224 * Called for every backref that is found for the current extent.
1225 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1227 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_)
1229 struct backref_ctx *bctx = ctx_;
1230 struct clone_root *found;
1234 /* First check if the root is in the list of accepted clone sources */
1235 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots,
1236 bctx->sctx->clone_roots_cnt,
1237 sizeof(struct clone_root),
1238 __clone_root_cmp_bsearch);
1242 if (found->root == bctx->sctx->send_root &&
1243 ino == bctx->cur_objectid &&
1244 offset == bctx->cur_offset) {
1245 bctx->found_itself = 1;
1249 * There are inodes that have extents that lie behind its i_size. Don't
1250 * accept clones from these extents.
1252 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL,
1254 btrfs_release_path(bctx->path);
1258 if (offset + bctx->data_offset + bctx->extent_len > i_size)
1262 * Make sure we don't consider clones from send_root that are
1263 * behind the current inode/offset.
1265 if (found->root == bctx->sctx->send_root) {
1267 * TODO for the moment we don't accept clones from the inode
1268 * that is currently send. We may change this when
1269 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1272 if (ino >= bctx->cur_objectid)
1275 if (ino > bctx->cur_objectid)
1277 if (offset + bctx->extent_len > bctx->cur_offset)
1283 found->found_refs++;
1284 if (ino < found->ino) {
1286 found->offset = offset;
1287 } else if (found->ino == ino) {
1289 * same extent found more then once in the same file.
1291 if (found->offset > offset + bctx->extent_len)
1292 found->offset = offset;
1299 * Given an inode, offset and extent item, it finds a good clone for a clone
1300 * instruction. Returns -ENOENT when none could be found. The function makes
1301 * sure that the returned clone is usable at the point where sending is at the
1302 * moment. This means, that no clones are accepted which lie behind the current
1305 * path must point to the extent item when called.
1307 static int find_extent_clone(struct send_ctx *sctx,
1308 struct btrfs_path *path,
1309 u64 ino, u64 data_offset,
1311 struct clone_root **found)
1313 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
1319 u64 extent_item_pos;
1321 struct btrfs_file_extent_item *fi;
1322 struct extent_buffer *eb = path->nodes[0];
1323 struct backref_ctx *backref_ctx = NULL;
1324 struct clone_root *cur_clone_root;
1325 struct btrfs_key found_key;
1326 struct btrfs_path *tmp_path;
1330 tmp_path = alloc_path_for_send();
1334 /* We only use this path under the commit sem */
1335 tmp_path->need_commit_sem = 0;
1337 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL);
1343 backref_ctx->path = tmp_path;
1345 if (data_offset >= ino_size) {
1347 * There may be extents that lie behind the file's size.
1348 * I at least had this in combination with snapshotting while
1349 * writing large files.
1355 fi = btrfs_item_ptr(eb, path->slots[0],
1356 struct btrfs_file_extent_item);
1357 extent_type = btrfs_file_extent_type(eb, fi);
1358 if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1362 compressed = btrfs_file_extent_compression(eb, fi);
1364 num_bytes = btrfs_file_extent_num_bytes(eb, fi);
1365 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1366 if (disk_byte == 0) {
1370 logical = disk_byte + btrfs_file_extent_offset(eb, fi);
1372 down_read(&fs_info->commit_root_sem);
1373 ret = extent_from_logical(fs_info, disk_byte, tmp_path,
1374 &found_key, &flags);
1375 up_read(&fs_info->commit_root_sem);
1376 btrfs_release_path(tmp_path);
1380 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1386 * Setup the clone roots.
1388 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1389 cur_clone_root = sctx->clone_roots + i;
1390 cur_clone_root->ino = (u64)-1;
1391 cur_clone_root->offset = 0;
1392 cur_clone_root->found_refs = 0;
1395 backref_ctx->sctx = sctx;
1396 backref_ctx->found = 0;
1397 backref_ctx->cur_objectid = ino;
1398 backref_ctx->cur_offset = data_offset;
1399 backref_ctx->found_itself = 0;
1400 backref_ctx->extent_len = num_bytes;
1402 * For non-compressed extents iterate_extent_inodes() gives us extent
1403 * offsets that already take into account the data offset, but not for
1404 * compressed extents, since the offset is logical and not relative to
1405 * the physical extent locations. We must take this into account to
1406 * avoid sending clone offsets that go beyond the source file's size,
1407 * which would result in the clone ioctl failing with -EINVAL on the
1410 if (compressed == BTRFS_COMPRESS_NONE)
1411 backref_ctx->data_offset = 0;
1413 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi);
1416 * The last extent of a file may be too large due to page alignment.
1417 * We need to adjust extent_len in this case so that the checks in
1418 * __iterate_backrefs work.
1420 if (data_offset + num_bytes >= ino_size)
1421 backref_ctx->extent_len = ino_size - data_offset;
1424 * Now collect all backrefs.
1426 if (compressed == BTRFS_COMPRESS_NONE)
1427 extent_item_pos = logical - found_key.objectid;
1429 extent_item_pos = 0;
1430 ret = iterate_extent_inodes(fs_info, found_key.objectid,
1431 extent_item_pos, 1, __iterate_backrefs,
1437 if (!backref_ctx->found_itself) {
1438 /* found a bug in backref code? */
1441 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu",
1442 ino, data_offset, disk_byte, found_key.objectid);
1446 btrfs_debug(fs_info,
1447 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu",
1448 data_offset, ino, num_bytes, logical);
1450 if (!backref_ctx->found)
1451 btrfs_debug(fs_info, "no clones found");
1453 cur_clone_root = NULL;
1454 for (i = 0; i < sctx->clone_roots_cnt; i++) {
1455 if (sctx->clone_roots[i].found_refs) {
1456 if (!cur_clone_root)
1457 cur_clone_root = sctx->clone_roots + i;
1458 else if (sctx->clone_roots[i].root == sctx->send_root)
1459 /* prefer clones from send_root over others */
1460 cur_clone_root = sctx->clone_roots + i;
1465 if (cur_clone_root) {
1466 *found = cur_clone_root;
1473 btrfs_free_path(tmp_path);
1478 static int read_symlink(struct btrfs_root *root,
1480 struct fs_path *dest)
1483 struct btrfs_path *path;
1484 struct btrfs_key key;
1485 struct btrfs_file_extent_item *ei;
1491 path = alloc_path_for_send();
1496 key.type = BTRFS_EXTENT_DATA_KEY;
1498 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1503 * An empty symlink inode. Can happen in rare error paths when
1504 * creating a symlink (transaction committed before the inode
1505 * eviction handler removed the symlink inode items and a crash
1506 * happened in between or the subvol was snapshoted in between).
1507 * Print an informative message to dmesg/syslog so that the user
1508 * can delete the symlink.
1510 btrfs_err(root->fs_info,
1511 "Found empty symlink inode %llu at root %llu",
1512 ino, root->root_key.objectid);
1517 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
1518 struct btrfs_file_extent_item);
1519 type = btrfs_file_extent_type(path->nodes[0], ei);
1520 compression = btrfs_file_extent_compression(path->nodes[0], ei);
1521 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE);
1522 BUG_ON(compression);
1524 off = btrfs_file_extent_inline_start(ei);
1525 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei);
1527 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len);
1530 btrfs_free_path(path);
1535 * Helper function to generate a file name that is unique in the root of
1536 * send_root and parent_root. This is used to generate names for orphan inodes.
1538 static int gen_unique_name(struct send_ctx *sctx,
1540 struct fs_path *dest)
1543 struct btrfs_path *path;
1544 struct btrfs_dir_item *di;
1549 path = alloc_path_for_send();
1554 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu",
1556 ASSERT(len < sizeof(tmp));
1558 di = btrfs_lookup_dir_item(NULL, sctx->send_root,
1559 path, BTRFS_FIRST_FREE_OBJECTID,
1560 tmp, strlen(tmp), 0);
1561 btrfs_release_path(path);
1567 /* not unique, try again */
1572 if (!sctx->parent_root) {
1578 di = btrfs_lookup_dir_item(NULL, sctx->parent_root,
1579 path, BTRFS_FIRST_FREE_OBJECTID,
1580 tmp, strlen(tmp), 0);
1581 btrfs_release_path(path);
1587 /* not unique, try again */
1595 ret = fs_path_add(dest, tmp, strlen(tmp));
1598 btrfs_free_path(path);
1603 inode_state_no_change,
1604 inode_state_will_create,
1605 inode_state_did_create,
1606 inode_state_will_delete,
1607 inode_state_did_delete,
1610 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen)
1618 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL,
1620 if (ret < 0 && ret != -ENOENT)
1624 if (!sctx->parent_root) {
1625 right_ret = -ENOENT;
1627 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen,
1628 NULL, NULL, NULL, NULL);
1629 if (ret < 0 && ret != -ENOENT)
1634 if (!left_ret && !right_ret) {
1635 if (left_gen == gen && right_gen == gen) {
1636 ret = inode_state_no_change;
1637 } else if (left_gen == gen) {
1638 if (ino < sctx->send_progress)
1639 ret = inode_state_did_create;
1641 ret = inode_state_will_create;
1642 } else if (right_gen == gen) {
1643 if (ino < sctx->send_progress)
1644 ret = inode_state_did_delete;
1646 ret = inode_state_will_delete;
1650 } else if (!left_ret) {
1651 if (left_gen == gen) {
1652 if (ino < sctx->send_progress)
1653 ret = inode_state_did_create;
1655 ret = inode_state_will_create;
1659 } else if (!right_ret) {
1660 if (right_gen == gen) {
1661 if (ino < sctx->send_progress)
1662 ret = inode_state_did_delete;
1664 ret = inode_state_will_delete;
1676 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen)
1680 if (ino == BTRFS_FIRST_FREE_OBJECTID)
1683 ret = get_cur_inode_state(sctx, ino, gen);
1687 if (ret == inode_state_no_change ||
1688 ret == inode_state_did_create ||
1689 ret == inode_state_will_delete)
1699 * Helper function to lookup a dir item in a dir.
1701 static int lookup_dir_item_inode(struct btrfs_root *root,
1702 u64 dir, const char *name, int name_len,
1707 struct btrfs_dir_item *di;
1708 struct btrfs_key key;
1709 struct btrfs_path *path;
1711 path = alloc_path_for_send();
1715 di = btrfs_lookup_dir_item(NULL, root, path,
1716 dir, name, name_len, 0);
1725 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
1726 if (key.type == BTRFS_ROOT_ITEM_KEY) {
1730 *found_inode = key.objectid;
1731 *found_type = btrfs_dir_type(path->nodes[0], di);
1734 btrfs_free_path(path);
1739 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1740 * generation of the parent dir and the name of the dir entry.
1742 static int get_first_ref(struct btrfs_root *root, u64 ino,
1743 u64 *dir, u64 *dir_gen, struct fs_path *name)
1746 struct btrfs_key key;
1747 struct btrfs_key found_key;
1748 struct btrfs_path *path;
1752 path = alloc_path_for_send();
1757 key.type = BTRFS_INODE_REF_KEY;
1760 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0);
1764 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1766 if (ret || found_key.objectid != ino ||
1767 (found_key.type != BTRFS_INODE_REF_KEY &&
1768 found_key.type != BTRFS_INODE_EXTREF_KEY)) {
1773 if (found_key.type == BTRFS_INODE_REF_KEY) {
1774 struct btrfs_inode_ref *iref;
1775 iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1776 struct btrfs_inode_ref);
1777 len = btrfs_inode_ref_name_len(path->nodes[0], iref);
1778 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1779 (unsigned long)(iref + 1),
1781 parent_dir = found_key.offset;
1783 struct btrfs_inode_extref *extref;
1784 extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
1785 struct btrfs_inode_extref);
1786 len = btrfs_inode_extref_name_len(path->nodes[0], extref);
1787 ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
1788 (unsigned long)&extref->name, len);
1789 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
1793 btrfs_release_path(path);
1796 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL,
1805 btrfs_free_path(path);
1809 static int is_first_ref(struct btrfs_root *root,
1811 const char *name, int name_len)
1814 struct fs_path *tmp_name;
1817 tmp_name = fs_path_alloc();
1821 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name);
1825 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) {
1830 ret = !memcmp(tmp_name->start, name, name_len);
1833 fs_path_free(tmp_name);
1838 * Used by process_recorded_refs to determine if a new ref would overwrite an
1839 * already existing ref. In case it detects an overwrite, it returns the
1840 * inode/gen in who_ino/who_gen.
1841 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1842 * to make sure later references to the overwritten inode are possible.
1843 * Orphanizing is however only required for the first ref of an inode.
1844 * process_recorded_refs does an additional is_first_ref check to see if
1845 * orphanizing is really required.
1847 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
1848 const char *name, int name_len,
1849 u64 *who_ino, u64 *who_gen, u64 *who_mode)
1853 u64 other_inode = 0;
1856 if (!sctx->parent_root)
1859 ret = is_inode_existent(sctx, dir, dir_gen);
1864 * If we have a parent root we need to verify that the parent dir was
1865 * not deleted and then re-created, if it was then we have no overwrite
1866 * and we can just unlink this entry.
1868 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) {
1869 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL,
1871 if (ret < 0 && ret != -ENOENT)
1881 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len,
1882 &other_inode, &other_type);
1883 if (ret < 0 && ret != -ENOENT)
1891 * Check if the overwritten ref was already processed. If yes, the ref
1892 * was already unlinked/moved, so we can safely assume that we will not
1893 * overwrite anything at this point in time.
1895 if (other_inode > sctx->send_progress ||
1896 is_waiting_for_move(sctx, other_inode)) {
1897 ret = get_inode_info(sctx->parent_root, other_inode, NULL,
1898 who_gen, who_mode, NULL, NULL, NULL);
1903 *who_ino = other_inode;
1913 * Checks if the ref was overwritten by an already processed inode. This is
1914 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1915 * thus the orphan name needs be used.
1916 * process_recorded_refs also uses it to avoid unlinking of refs that were
1919 static int did_overwrite_ref(struct send_ctx *sctx,
1920 u64 dir, u64 dir_gen,
1921 u64 ino, u64 ino_gen,
1922 const char *name, int name_len)
1929 if (!sctx->parent_root)
1932 ret = is_inode_existent(sctx, dir, dir_gen);
1936 if (dir != BTRFS_FIRST_FREE_OBJECTID) {
1937 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL,
1939 if (ret < 0 && ret != -ENOENT)
1949 /* check if the ref was overwritten by another ref */
1950 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len,
1951 &ow_inode, &other_type);
1952 if (ret < 0 && ret != -ENOENT)
1955 /* was never and will never be overwritten */
1960 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL,
1965 if (ow_inode == ino && gen == ino_gen) {
1971 * We know that it is or will be overwritten. Check this now.
1972 * The current inode being processed might have been the one that caused
1973 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1974 * the current inode being processed.
1976 if ((ow_inode < sctx->send_progress) ||
1977 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino &&
1978 gen == sctx->cur_inode_gen))
1988 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1989 * that got overwritten. This is used by process_recorded_refs to determine
1990 * if it has to use the path as returned by get_cur_path or the orphan name.
1992 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen)
1995 struct fs_path *name = NULL;
1999 if (!sctx->parent_root)
2002 name = fs_path_alloc();
2006 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name);
2010 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen,
2011 name->start, fs_path_len(name));
2019 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
2020 * so we need to do some special handling in case we have clashes. This function
2021 * takes care of this with the help of name_cache_entry::radix_list.
2022 * In case of error, nce is kfreed.
2024 static int name_cache_insert(struct send_ctx *sctx,
2025 struct name_cache_entry *nce)
2028 struct list_head *nce_head;
2030 nce_head = radix_tree_lookup(&sctx->name_cache,
2031 (unsigned long)nce->ino);
2033 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL);
2038 INIT_LIST_HEAD(nce_head);
2040 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head);
2047 list_add_tail(&nce->radix_list, nce_head);
2048 list_add_tail(&nce->list, &sctx->name_cache_list);
2049 sctx->name_cache_size++;
2054 static void name_cache_delete(struct send_ctx *sctx,
2055 struct name_cache_entry *nce)
2057 struct list_head *nce_head;
2059 nce_head = radix_tree_lookup(&sctx->name_cache,
2060 (unsigned long)nce->ino);
2062 btrfs_err(sctx->send_root->fs_info,
2063 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2064 nce->ino, sctx->name_cache_size);
2067 list_del(&nce->radix_list);
2068 list_del(&nce->list);
2069 sctx->name_cache_size--;
2072 * We may not get to the final release of nce_head if the lookup fails
2074 if (nce_head && list_empty(nce_head)) {
2075 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino);
2080 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx,
2083 struct list_head *nce_head;
2084 struct name_cache_entry *cur;
2086 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino);
2090 list_for_each_entry(cur, nce_head, radix_list) {
2091 if (cur->ino == ino && cur->gen == gen)
2098 * Removes the entry from the list and adds it back to the end. This marks the
2099 * entry as recently used so that name_cache_clean_unused does not remove it.
2101 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce)
2103 list_del(&nce->list);
2104 list_add_tail(&nce->list, &sctx->name_cache_list);
2108 * Remove some entries from the beginning of name_cache_list.
2110 static void name_cache_clean_unused(struct send_ctx *sctx)
2112 struct name_cache_entry *nce;
2114 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE)
2117 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) {
2118 nce = list_entry(sctx->name_cache_list.next,
2119 struct name_cache_entry, list);
2120 name_cache_delete(sctx, nce);
2125 static void name_cache_free(struct send_ctx *sctx)
2127 struct name_cache_entry *nce;
2129 while (!list_empty(&sctx->name_cache_list)) {
2130 nce = list_entry(sctx->name_cache_list.next,
2131 struct name_cache_entry, list);
2132 name_cache_delete(sctx, nce);
2138 * Used by get_cur_path for each ref up to the root.
2139 * Returns 0 if it succeeded.
2140 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2141 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2142 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2143 * Returns <0 in case of error.
2145 static int __get_cur_name_and_parent(struct send_ctx *sctx,
2149 struct fs_path *dest)
2153 struct name_cache_entry *nce = NULL;
2156 * First check if we already did a call to this function with the same
2157 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2158 * return the cached result.
2160 nce = name_cache_search(sctx, ino, gen);
2162 if (ino < sctx->send_progress && nce->need_later_update) {
2163 name_cache_delete(sctx, nce);
2167 name_cache_used(sctx, nce);
2168 *parent_ino = nce->parent_ino;
2169 *parent_gen = nce->parent_gen;
2170 ret = fs_path_add(dest, nce->name, nce->name_len);
2179 * If the inode is not existent yet, add the orphan name and return 1.
2180 * This should only happen for the parent dir that we determine in
2183 ret = is_inode_existent(sctx, ino, gen);
2188 ret = gen_unique_name(sctx, ino, gen, dest);
2196 * Depending on whether the inode was already processed or not, use
2197 * send_root or parent_root for ref lookup.
2199 if (ino < sctx->send_progress)
2200 ret = get_first_ref(sctx->send_root, ino,
2201 parent_ino, parent_gen, dest);
2203 ret = get_first_ref(sctx->parent_root, ino,
2204 parent_ino, parent_gen, dest);
2209 * Check if the ref was overwritten by an inode's ref that was processed
2210 * earlier. If yes, treat as orphan and return 1.
2212 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen,
2213 dest->start, dest->end - dest->start);
2217 fs_path_reset(dest);
2218 ret = gen_unique_name(sctx, ino, gen, dest);
2226 * Store the result of the lookup in the name cache.
2228 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL);
2236 nce->parent_ino = *parent_ino;
2237 nce->parent_gen = *parent_gen;
2238 nce->name_len = fs_path_len(dest);
2240 strcpy(nce->name, dest->start);
2242 if (ino < sctx->send_progress)
2243 nce->need_later_update = 0;
2245 nce->need_later_update = 1;
2247 nce_ret = name_cache_insert(sctx, nce);
2250 name_cache_clean_unused(sctx);
2257 * Magic happens here. This function returns the first ref to an inode as it
2258 * would look like while receiving the stream at this point in time.
2259 * We walk the path up to the root. For every inode in between, we check if it
2260 * was already processed/sent. If yes, we continue with the parent as found
2261 * in send_root. If not, we continue with the parent as found in parent_root.
2262 * If we encounter an inode that was deleted at this point in time, we use the
2263 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2264 * that were not created yet and overwritten inodes/refs.
2266 * When do we have have orphan inodes:
2267 * 1. When an inode is freshly created and thus no valid refs are available yet
2268 * 2. When a directory lost all it's refs (deleted) but still has dir items
2269 * inside which were not processed yet (pending for move/delete). If anyone
2270 * tried to get the path to the dir items, it would get a path inside that
2272 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2273 * of an unprocessed inode. If in that case the first ref would be
2274 * overwritten, the overwritten inode gets "orphanized". Later when we
2275 * process this overwritten inode, it is restored at a new place by moving
2278 * sctx->send_progress tells this function at which point in time receiving
2281 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
2282 struct fs_path *dest)
2285 struct fs_path *name = NULL;
2286 u64 parent_inode = 0;
2290 name = fs_path_alloc();
2297 fs_path_reset(dest);
2299 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) {
2300 struct waiting_dir_move *wdm;
2302 fs_path_reset(name);
2304 if (is_waiting_for_rm(sctx, ino)) {
2305 ret = gen_unique_name(sctx, ino, gen, name);
2308 ret = fs_path_add_path(dest, name);
2312 wdm = get_waiting_dir_move(sctx, ino);
2313 if (wdm && wdm->orphanized) {
2314 ret = gen_unique_name(sctx, ino, gen, name);
2317 ret = get_first_ref(sctx->parent_root, ino,
2318 &parent_inode, &parent_gen, name);
2320 ret = __get_cur_name_and_parent(sctx, ino, gen,
2330 ret = fs_path_add_path(dest, name);
2341 fs_path_unreverse(dest);
2346 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2348 static int send_subvol_begin(struct send_ctx *sctx)
2351 struct btrfs_root *send_root = sctx->send_root;
2352 struct btrfs_root *parent_root = sctx->parent_root;
2353 struct btrfs_path *path;
2354 struct btrfs_key key;
2355 struct btrfs_root_ref *ref;
2356 struct extent_buffer *leaf;
2360 path = btrfs_alloc_path();
2364 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL);
2366 btrfs_free_path(path);
2370 key.objectid = send_root->objectid;
2371 key.type = BTRFS_ROOT_BACKREF_KEY;
2374 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root,
2383 leaf = path->nodes[0];
2384 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2385 if (key.type != BTRFS_ROOT_BACKREF_KEY ||
2386 key.objectid != send_root->objectid) {
2390 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
2391 namelen = btrfs_root_ref_name_len(leaf, ref);
2392 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen);
2393 btrfs_release_path(path);
2396 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT);
2400 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL);
2405 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen);
2407 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid))
2408 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2409 sctx->send_root->root_item.received_uuid);
2411 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID,
2412 sctx->send_root->root_item.uuid);
2414 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID,
2415 le64_to_cpu(sctx->send_root->root_item.ctransid));
2417 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid))
2418 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2419 parent_root->root_item.received_uuid);
2421 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
2422 parent_root->root_item.uuid);
2423 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
2424 le64_to_cpu(sctx->parent_root->root_item.ctransid));
2427 ret = send_cmd(sctx);
2431 btrfs_free_path(path);
2436 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size)
2438 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2442 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size);
2444 p = fs_path_alloc();
2448 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE);
2452 ret = get_cur_path(sctx, ino, gen, p);
2455 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2456 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size);
2458 ret = send_cmd(sctx);
2466 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode)
2468 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2472 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode);
2474 p = fs_path_alloc();
2478 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD);
2482 ret = get_cur_path(sctx, ino, gen, p);
2485 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2486 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777);
2488 ret = send_cmd(sctx);
2496 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid)
2498 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2502 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu",
2505 p = fs_path_alloc();
2509 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN);
2513 ret = get_cur_path(sctx, ino, gen, p);
2516 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2517 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid);
2518 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid);
2520 ret = send_cmd(sctx);
2528 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen)
2530 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2532 struct fs_path *p = NULL;
2533 struct btrfs_inode_item *ii;
2534 struct btrfs_path *path = NULL;
2535 struct extent_buffer *eb;
2536 struct btrfs_key key;
2539 btrfs_debug(fs_info, "send_utimes %llu", ino);
2541 p = fs_path_alloc();
2545 path = alloc_path_for_send();
2552 key.type = BTRFS_INODE_ITEM_KEY;
2554 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2560 eb = path->nodes[0];
2561 slot = path->slots[0];
2562 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item);
2564 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES);
2568 ret = get_cur_path(sctx, ino, gen, p);
2571 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2572 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
2573 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
2574 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
2575 /* TODO Add otime support when the otime patches get into upstream */
2577 ret = send_cmd(sctx);
2582 btrfs_free_path(path);
2587 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2588 * a valid path yet because we did not process the refs yet. So, the inode
2589 * is created as orphan.
2591 static int send_create_inode(struct send_ctx *sctx, u64 ino)
2593 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
2601 btrfs_debug(fs_info, "send_create_inode %llu", ino);
2603 p = fs_path_alloc();
2607 if (ino != sctx->cur_ino) {
2608 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode,
2613 gen = sctx->cur_inode_gen;
2614 mode = sctx->cur_inode_mode;
2615 rdev = sctx->cur_inode_rdev;
2618 if (S_ISREG(mode)) {
2619 cmd = BTRFS_SEND_C_MKFILE;
2620 } else if (S_ISDIR(mode)) {
2621 cmd = BTRFS_SEND_C_MKDIR;
2622 } else if (S_ISLNK(mode)) {
2623 cmd = BTRFS_SEND_C_SYMLINK;
2624 } else if (S_ISCHR(mode) || S_ISBLK(mode)) {
2625 cmd = BTRFS_SEND_C_MKNOD;
2626 } else if (S_ISFIFO(mode)) {
2627 cmd = BTRFS_SEND_C_MKFIFO;
2628 } else if (S_ISSOCK(mode)) {
2629 cmd = BTRFS_SEND_C_MKSOCK;
2631 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o",
2632 (int)(mode & S_IFMT));
2637 ret = begin_cmd(sctx, cmd);
2641 ret = gen_unique_name(sctx, ino, gen, p);
2645 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
2646 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino);
2648 if (S_ISLNK(mode)) {
2650 ret = read_symlink(sctx->send_root, ino, p);
2653 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
2654 } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
2655 S_ISFIFO(mode) || S_ISSOCK(mode)) {
2656 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
2657 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
2660 ret = send_cmd(sctx);
2672 * We need some special handling for inodes that get processed before the parent
2673 * directory got created. See process_recorded_refs for details.
2674 * This function does the check if we already created the dir out of order.
2676 static int did_create_dir(struct send_ctx *sctx, u64 dir)
2679 struct btrfs_path *path = NULL;
2680 struct btrfs_key key;
2681 struct btrfs_key found_key;
2682 struct btrfs_key di_key;
2683 struct extent_buffer *eb;
2684 struct btrfs_dir_item *di;
2687 path = alloc_path_for_send();
2694 key.type = BTRFS_DIR_INDEX_KEY;
2696 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
2701 eb = path->nodes[0];
2702 slot = path->slots[0];
2703 if (slot >= btrfs_header_nritems(eb)) {
2704 ret = btrfs_next_leaf(sctx->send_root, path);
2707 } else if (ret > 0) {
2714 btrfs_item_key_to_cpu(eb, &found_key, slot);
2715 if (found_key.objectid != key.objectid ||
2716 found_key.type != key.type) {
2721 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item);
2722 btrfs_dir_item_key_to_cpu(eb, di, &di_key);
2724 if (di_key.type != BTRFS_ROOT_ITEM_KEY &&
2725 di_key.objectid < sctx->send_progress) {
2734 btrfs_free_path(path);
2739 * Only creates the inode if it is:
2740 * 1. Not a directory
2741 * 2. Or a directory which was not created already due to out of order
2742 * directories. See did_create_dir and process_recorded_refs for details.
2744 static int send_create_inode_if_needed(struct send_ctx *sctx)
2748 if (S_ISDIR(sctx->cur_inode_mode)) {
2749 ret = did_create_dir(sctx, sctx->cur_ino);
2758 ret = send_create_inode(sctx, sctx->cur_ino);
2766 struct recorded_ref {
2767 struct list_head list;
2769 struct fs_path *full_path;
2775 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path)
2777 ref->full_path = path;
2778 ref->name = (char *)kbasename(ref->full_path->start);
2779 ref->name_len = ref->full_path->end - ref->name;
2783 * We need to process new refs before deleted refs, but compare_tree gives us
2784 * everything mixed. So we first record all refs and later process them.
2785 * This function is a helper to record one ref.
2787 static int __record_ref(struct list_head *head, u64 dir,
2788 u64 dir_gen, struct fs_path *path)
2790 struct recorded_ref *ref;
2792 ref = kmalloc(sizeof(*ref), GFP_KERNEL);
2797 ref->dir_gen = dir_gen;
2798 set_ref_path(ref, path);
2799 list_add_tail(&ref->list, head);
2803 static int dup_ref(struct recorded_ref *ref, struct list_head *list)
2805 struct recorded_ref *new;
2807 new = kmalloc(sizeof(*ref), GFP_KERNEL);
2811 new->dir = ref->dir;
2812 new->dir_gen = ref->dir_gen;
2813 new->full_path = NULL;
2814 INIT_LIST_HEAD(&new->list);
2815 list_add_tail(&new->list, list);
2819 static void __free_recorded_refs(struct list_head *head)
2821 struct recorded_ref *cur;
2823 while (!list_empty(head)) {
2824 cur = list_entry(head->next, struct recorded_ref, list);
2825 fs_path_free(cur->full_path);
2826 list_del(&cur->list);
2831 static void free_recorded_refs(struct send_ctx *sctx)
2833 __free_recorded_refs(&sctx->new_refs);
2834 __free_recorded_refs(&sctx->deleted_refs);
2838 * Renames/moves a file/dir to its orphan name. Used when the first
2839 * ref of an unprocessed inode gets overwritten and for all non empty
2842 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen,
2843 struct fs_path *path)
2846 struct fs_path *orphan;
2848 orphan = fs_path_alloc();
2852 ret = gen_unique_name(sctx, ino, gen, orphan);
2856 ret = send_rename(sctx, path, orphan);
2859 fs_path_free(orphan);
2863 static struct orphan_dir_info *
2864 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2866 struct rb_node **p = &sctx->orphan_dirs.rb_node;
2867 struct rb_node *parent = NULL;
2868 struct orphan_dir_info *entry, *odi;
2870 odi = kmalloc(sizeof(*odi), GFP_KERNEL);
2872 return ERR_PTR(-ENOMEM);
2878 entry = rb_entry(parent, struct orphan_dir_info, node);
2879 if (dir_ino < entry->ino) {
2881 } else if (dir_ino > entry->ino) {
2882 p = &(*p)->rb_right;
2889 rb_link_node(&odi->node, parent, p);
2890 rb_insert_color(&odi->node, &sctx->orphan_dirs);
2894 static struct orphan_dir_info *
2895 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
2897 struct rb_node *n = sctx->orphan_dirs.rb_node;
2898 struct orphan_dir_info *entry;
2901 entry = rb_entry(n, struct orphan_dir_info, node);
2902 if (dir_ino < entry->ino)
2904 else if (dir_ino > entry->ino)
2912 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
2914 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
2919 static void free_orphan_dir_info(struct send_ctx *sctx,
2920 struct orphan_dir_info *odi)
2924 rb_erase(&odi->node, &sctx->orphan_dirs);
2929 * Returns 1 if a directory can be removed at this point in time.
2930 * We check this by iterating all dir items and checking if the inode behind
2931 * the dir item was already processed.
2933 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
2937 struct btrfs_root *root = sctx->parent_root;
2938 struct btrfs_path *path;
2939 struct btrfs_key key;
2940 struct btrfs_key found_key;
2941 struct btrfs_key loc;
2942 struct btrfs_dir_item *di;
2945 * Don't try to rmdir the top/root subvolume dir.
2947 if (dir == BTRFS_FIRST_FREE_OBJECTID)
2950 path = alloc_path_for_send();
2955 key.type = BTRFS_DIR_INDEX_KEY;
2957 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2962 struct waiting_dir_move *dm;
2964 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
2965 ret = btrfs_next_leaf(root, path);
2972 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2974 if (found_key.objectid != key.objectid ||
2975 found_key.type != key.type)
2978 di = btrfs_item_ptr(path->nodes[0], path->slots[0],
2979 struct btrfs_dir_item);
2980 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc);
2982 dm = get_waiting_dir_move(sctx, loc.objectid);
2984 struct orphan_dir_info *odi;
2986 odi = add_orphan_dir_info(sctx, dir);
2992 dm->rmdir_ino = dir;
2997 if (loc.objectid > send_progress) {
2998 struct orphan_dir_info *odi;
3000 odi = get_orphan_dir_info(sctx, dir);
3001 free_orphan_dir_info(sctx, odi);
3012 btrfs_free_path(path);
3016 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino)
3018 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino);
3020 return entry != NULL;
3023 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
3025 struct rb_node **p = &sctx->waiting_dir_moves.rb_node;
3026 struct rb_node *parent = NULL;
3027 struct waiting_dir_move *entry, *dm;
3029 dm = kmalloc(sizeof(*dm), GFP_KERNEL);
3034 dm->orphanized = orphanized;
3038 entry = rb_entry(parent, struct waiting_dir_move, node);
3039 if (ino < entry->ino) {
3041 } else if (ino > entry->ino) {
3042 p = &(*p)->rb_right;
3049 rb_link_node(&dm->node, parent, p);
3050 rb_insert_color(&dm->node, &sctx->waiting_dir_moves);
3054 static struct waiting_dir_move *
3055 get_waiting_dir_move(struct send_ctx *sctx, u64 ino)
3057 struct rb_node *n = sctx->waiting_dir_moves.rb_node;
3058 struct waiting_dir_move *entry;
3061 entry = rb_entry(n, struct waiting_dir_move, node);
3062 if (ino < entry->ino)
3064 else if (ino > entry->ino)
3072 static void free_waiting_dir_move(struct send_ctx *sctx,
3073 struct waiting_dir_move *dm)
3077 rb_erase(&dm->node, &sctx->waiting_dir_moves);
3081 static int add_pending_dir_move(struct send_ctx *sctx,
3085 struct list_head *new_refs,
3086 struct list_head *deleted_refs,
3087 const bool is_orphan)
3089 struct rb_node **p = &sctx->pending_dir_moves.rb_node;
3090 struct rb_node *parent = NULL;
3091 struct pending_dir_move *entry = NULL, *pm;
3092 struct recorded_ref *cur;
3096 pm = kmalloc(sizeof(*pm), GFP_KERNEL);
3099 pm->parent_ino = parent_ino;
3102 INIT_LIST_HEAD(&pm->list);
3103 INIT_LIST_HEAD(&pm->update_refs);
3104 RB_CLEAR_NODE(&pm->node);
3108 entry = rb_entry(parent, struct pending_dir_move, node);
3109 if (parent_ino < entry->parent_ino) {
3111 } else if (parent_ino > entry->parent_ino) {
3112 p = &(*p)->rb_right;
3119 list_for_each_entry(cur, deleted_refs, list) {
3120 ret = dup_ref(cur, &pm->update_refs);
3124 list_for_each_entry(cur, new_refs, list) {
3125 ret = dup_ref(cur, &pm->update_refs);
3130 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan);
3135 list_add_tail(&pm->list, &entry->list);
3137 rb_link_node(&pm->node, parent, p);
3138 rb_insert_color(&pm->node, &sctx->pending_dir_moves);
3143 __free_recorded_refs(&pm->update_refs);
3149 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
3152 struct rb_node *n = sctx->pending_dir_moves.rb_node;
3153 struct pending_dir_move *entry;
3156 entry = rb_entry(n, struct pending_dir_move, node);
3157 if (parent_ino < entry->parent_ino)
3159 else if (parent_ino > entry->parent_ino)
3167 static int path_loop(struct send_ctx *sctx, struct fs_path *name,
3168 u64 ino, u64 gen, u64 *ancestor_ino)
3171 u64 parent_inode = 0;
3173 u64 start_ino = ino;
3176 while (ino != BTRFS_FIRST_FREE_OBJECTID) {
3177 fs_path_reset(name);
3179 if (is_waiting_for_rm(sctx, ino))
3181 if (is_waiting_for_move(sctx, ino)) {
3182 if (*ancestor_ino == 0)
3183 *ancestor_ino = ino;
3184 ret = get_first_ref(sctx->parent_root, ino,
3185 &parent_inode, &parent_gen, name);
3187 ret = __get_cur_name_and_parent(sctx, ino, gen,
3197 if (parent_inode == start_ino) {
3199 if (*ancestor_ino == 0)
3200 *ancestor_ino = ino;
3209 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
3211 struct fs_path *from_path = NULL;
3212 struct fs_path *to_path = NULL;
3213 struct fs_path *name = NULL;
3214 u64 orig_progress = sctx->send_progress;
3215 struct recorded_ref *cur;
3216 u64 parent_ino, parent_gen;
3217 struct waiting_dir_move *dm = NULL;
3223 name = fs_path_alloc();
3224 from_path = fs_path_alloc();
3225 if (!name || !from_path) {
3230 dm = get_waiting_dir_move(sctx, pm->ino);
3232 rmdir_ino = dm->rmdir_ino;
3233 is_orphan = dm->orphanized;
3234 free_waiting_dir_move(sctx, dm);
3237 ret = gen_unique_name(sctx, pm->ino,
3238 pm->gen, from_path);
3240 ret = get_first_ref(sctx->parent_root, pm->ino,
3241 &parent_ino, &parent_gen, name);
3244 ret = get_cur_path(sctx, parent_ino, parent_gen,
3248 ret = fs_path_add_path(from_path, name);
3253 sctx->send_progress = sctx->cur_ino + 1;
3254 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
3258 LIST_HEAD(deleted_refs);
3259 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
3260 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
3261 &pm->update_refs, &deleted_refs,
3266 dm = get_waiting_dir_move(sctx, pm->ino);
3268 dm->rmdir_ino = rmdir_ino;
3272 fs_path_reset(name);
3275 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
3279 ret = send_rename(sctx, from_path, to_path);
3284 struct orphan_dir_info *odi;
3286 odi = get_orphan_dir_info(sctx, rmdir_ino);
3288 /* already deleted */
3291 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino);
3297 name = fs_path_alloc();
3302 ret = get_cur_path(sctx, rmdir_ino, odi->gen, name);
3305 ret = send_rmdir(sctx, name);
3308 free_orphan_dir_info(sctx, odi);
3312 ret = send_utimes(sctx, pm->ino, pm->gen);
3317 * After rename/move, need to update the utimes of both new parent(s)
3318 * and old parent(s).
3320 list_for_each_entry(cur, &pm->update_refs, list) {
3322 * The parent inode might have been deleted in the send snapshot
3324 ret = get_inode_info(sctx->send_root, cur->dir, NULL,
3325 NULL, NULL, NULL, NULL, NULL);
3326 if (ret == -ENOENT) {
3333 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
3340 fs_path_free(from_path);
3341 fs_path_free(to_path);
3342 sctx->send_progress = orig_progress;
3347 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m)
3349 if (!list_empty(&m->list))
3351 if (!RB_EMPTY_NODE(&m->node))
3352 rb_erase(&m->node, &sctx->pending_dir_moves);
3353 __free_recorded_refs(&m->update_refs);
3357 static void tail_append_pending_moves(struct pending_dir_move *moves,
3358 struct list_head *stack)
3360 if (list_empty(&moves->list)) {
3361 list_add_tail(&moves->list, stack);
3364 list_splice_init(&moves->list, &list);
3365 list_add_tail(&moves->list, stack);
3366 list_splice_tail(&list, stack);
3370 static int apply_children_dir_moves(struct send_ctx *sctx)
3372 struct pending_dir_move *pm;
3373 struct list_head stack;
3374 u64 parent_ino = sctx->cur_ino;
3377 pm = get_pending_dir_moves(sctx, parent_ino);
3381 INIT_LIST_HEAD(&stack);
3382 tail_append_pending_moves(pm, &stack);
3384 while (!list_empty(&stack)) {
3385 pm = list_first_entry(&stack, struct pending_dir_move, list);
3386 parent_ino = pm->ino;
3387 ret = apply_dir_move(sctx, pm);
3388 free_pending_move(sctx, pm);
3391 pm = get_pending_dir_moves(sctx, parent_ino);
3393 tail_append_pending_moves(pm, &stack);
3398 while (!list_empty(&stack)) {
3399 pm = list_first_entry(&stack, struct pending_dir_move, list);
3400 free_pending_move(sctx, pm);
3406 * We might need to delay a directory rename even when no ancestor directory
3407 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3408 * renamed. This happens when we rename a directory to the old name (the name
3409 * in the parent root) of some other unrelated directory that got its rename
3410 * delayed due to some ancestor with higher number that got renamed.
3416 * |---- a/ (ino 257)
3417 * | |---- file (ino 260)
3419 * |---- b/ (ino 258)
3420 * |---- c/ (ino 259)
3424 * |---- a/ (ino 258)
3425 * |---- x/ (ino 259)
3426 * |---- y/ (ino 257)
3427 * |----- file (ino 260)
3429 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3430 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3431 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3434 * 1 - rename 259 from 'c' to 'x'
3435 * 2 - rename 257 from 'a' to 'x/y'
3436 * 3 - rename 258 from 'b' to 'a'
3438 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3439 * be done right away and < 0 on error.
3441 static int wait_for_dest_dir_move(struct send_ctx *sctx,
3442 struct recorded_ref *parent_ref,
3443 const bool is_orphan)
3445 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info;
3446 struct btrfs_path *path;
3447 struct btrfs_key key;
3448 struct btrfs_key di_key;
3449 struct btrfs_dir_item *di;
3453 struct waiting_dir_move *wdm;
3455 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
3458 path = alloc_path_for_send();
3462 key.objectid = parent_ref->dir;
3463 key.type = BTRFS_DIR_ITEM_KEY;
3464 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
3466 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
3469 } else if (ret > 0) {
3474 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name,
3475 parent_ref->name_len);
3481 * di_key.objectid has the number of the inode that has a dentry in the
3482 * parent directory with the same name that sctx->cur_ino is being
3483 * renamed to. We need to check if that inode is in the send root as
3484 * well and if it is currently marked as an inode with a pending rename,
3485 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3486 * that it happens after that other inode is renamed.
3488 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
3489 if (di_key.type != BTRFS_INODE_ITEM_KEY) {
3494 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
3495 &left_gen, NULL, NULL, NULL, NULL);
3498 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
3499 &right_gen, NULL, NULL, NULL, NULL);
3506 /* Different inode, no need to delay the rename of sctx->cur_ino */
3507 if (right_gen != left_gen) {
3512 wdm = get_waiting_dir_move(sctx, di_key.objectid);
3513 if (wdm && !wdm->orphanized) {
3514 ret = add_pending_dir_move(sctx,
3516 sctx->cur_inode_gen,
3519 &sctx->deleted_refs,
3525 btrfs_free_path(path);
3530 * Check if ino ino1 is an ancestor of inode ino2 in the given root.
3531 * Return 1 if true, 0 if false and < 0 on error.
3533 static int is_ancestor(struct btrfs_root *root,
3537 struct fs_path *fs_path)
3540 bool free_path = false;
3544 fs_path = fs_path_alloc();
3550 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3554 fs_path_reset(fs_path);
3555 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3557 if (ret == -ENOENT && ino == ino2)
3561 if (parent == ino1) {
3562 ret = parent_gen == ino1_gen ? 1 : 0;
3569 fs_path_free(fs_path);
3573 static int wait_for_parent_move(struct send_ctx *sctx,
3574 struct recorded_ref *parent_ref,
3575 const bool is_orphan)
3578 u64 ino = parent_ref->dir;
3579 u64 ino_gen = parent_ref->dir_gen;
3580 u64 parent_ino_before, parent_ino_after;
3581 struct fs_path *path_before = NULL;
3582 struct fs_path *path_after = NULL;
3585 path_after = fs_path_alloc();
3586 path_before = fs_path_alloc();
3587 if (!path_after || !path_before) {
3593 * Our current directory inode may not yet be renamed/moved because some
3594 * ancestor (immediate or not) has to be renamed/moved first. So find if
3595 * such ancestor exists and make sure our own rename/move happens after
3596 * that ancestor is processed to avoid path build infinite loops (done
3597 * at get_cur_path()).
3599 while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3600 u64 parent_ino_after_gen;
3602 if (is_waiting_for_move(sctx, ino)) {
3604 * If the current inode is an ancestor of ino in the
3605 * parent root, we need to delay the rename of the
3606 * current inode, otherwise don't delayed the rename
3607 * because we can end up with a circular dependency
3608 * of renames, resulting in some directories never
3609 * getting the respective rename operations issued in
3610 * the send stream or getting into infinite path build
3613 ret = is_ancestor(sctx->parent_root,
3614 sctx->cur_ino, sctx->cur_inode_gen,
3620 fs_path_reset(path_before);
3621 fs_path_reset(path_after);
3623 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after,
3624 &parent_ino_after_gen, path_after);
3627 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before,
3629 if (ret < 0 && ret != -ENOENT) {
3631 } else if (ret == -ENOENT) {
3636 len1 = fs_path_len(path_before);
3637 len2 = fs_path_len(path_after);
3638 if (ino > sctx->cur_ino &&
3639 (parent_ino_before != parent_ino_after || len1 != len2 ||
3640 memcmp(path_before->start, path_after->start, len1))) {
3643 ret = get_inode_info(sctx->parent_root, ino, NULL,
3644 &parent_ino_gen, NULL, NULL, NULL,
3648 if (ino_gen == parent_ino_gen) {
3653 ino = parent_ino_after;
3654 ino_gen = parent_ino_after_gen;
3658 fs_path_free(path_before);
3659 fs_path_free(path_after);
3662 ret = add_pending_dir_move(sctx,
3664 sctx->cur_inode_gen,
3667 &sctx->deleted_refs,
3676 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref)
3679 struct fs_path *new_path;
3682 * Our reference's name member points to its full_path member string, so
3683 * we use here a new path.
3685 new_path = fs_path_alloc();
3689 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path);
3691 fs_path_free(new_path);
3694 ret = fs_path_add(new_path, ref->name, ref->name_len);
3696 fs_path_free(new_path);
3700 fs_path_free(ref->full_path);
3701 set_ref_path(ref, new_path);
3707 * This does all the move/link/unlink/rmdir magic.
3709 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
3711 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
3713 struct recorded_ref *cur;
3714 struct recorded_ref *cur2;
3715 struct list_head check_dirs;
3716 struct fs_path *valid_path = NULL;
3720 int did_overwrite = 0;
3722 u64 last_dir_ino_rm = 0;
3723 bool can_rename = true;
3724 bool orphanized_dir = false;
3725 bool orphanized_ancestor = false;
3727 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino);
3730 * This should never happen as the root dir always has the same ref
3731 * which is always '..'
3733 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID);
3734 INIT_LIST_HEAD(&check_dirs);
3736 valid_path = fs_path_alloc();
3743 * First, check if the first ref of the current inode was overwritten
3744 * before. If yes, we know that the current inode was already orphanized
3745 * and thus use the orphan name. If not, we can use get_cur_path to
3746 * get the path of the first ref as it would like while receiving at
3747 * this point in time.
3748 * New inodes are always orphan at the beginning, so force to use the
3749 * orphan name in this case.
3750 * The first ref is stored in valid_path and will be updated if it
3751 * gets moved around.
3753 if (!sctx->cur_inode_new) {
3754 ret = did_overwrite_first_ref(sctx, sctx->cur_ino,
3755 sctx->cur_inode_gen);
3761 if (sctx->cur_inode_new || did_overwrite) {
3762 ret = gen_unique_name(sctx, sctx->cur_ino,
3763 sctx->cur_inode_gen, valid_path);
3768 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3774 list_for_each_entry(cur, &sctx->new_refs, list) {
3776 * We may have refs where the parent directory does not exist
3777 * yet. This happens if the parent directories inum is higher
3778 * the the current inum. To handle this case, we create the
3779 * parent directory out of order. But we need to check if this
3780 * did already happen before due to other refs in the same dir.
3782 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
3785 if (ret == inode_state_will_create) {
3788 * First check if any of the current inodes refs did
3789 * already create the dir.
3791 list_for_each_entry(cur2, &sctx->new_refs, list) {
3794 if (cur2->dir == cur->dir) {
3801 * If that did not happen, check if a previous inode
3802 * did already create the dir.
3805 ret = did_create_dir(sctx, cur->dir);
3809 ret = send_create_inode(sctx, cur->dir);
3816 * Check if this new ref would overwrite the first ref of
3817 * another unprocessed inode. If yes, orphanize the
3818 * overwritten inode. If we find an overwritten ref that is
3819 * not the first ref, simply unlink it.
3821 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen,
3822 cur->name, cur->name_len,
3823 &ow_inode, &ow_gen, &ow_mode);
3827 ret = is_first_ref(sctx->parent_root,
3828 ow_inode, cur->dir, cur->name,
3833 struct name_cache_entry *nce;
3834 struct waiting_dir_move *wdm;
3836 ret = orphanize_inode(sctx, ow_inode, ow_gen,
3840 if (S_ISDIR(ow_mode))
3841 orphanized_dir = true;
3844 * If ow_inode has its rename operation delayed
3845 * make sure that its orphanized name is used in
3846 * the source path when performing its rename
3849 if (is_waiting_for_move(sctx, ow_inode)) {
3850 wdm = get_waiting_dir_move(sctx,
3853 wdm->orphanized = true;
3857 * Make sure we clear our orphanized inode's
3858 * name from the name cache. This is because the
3859 * inode ow_inode might be an ancestor of some
3860 * other inode that will be orphanized as well
3861 * later and has an inode number greater than
3862 * sctx->send_progress. We need to prevent
3863 * future name lookups from using the old name
3864 * and get instead the orphan name.
3866 nce = name_cache_search(sctx, ow_inode, ow_gen);
3868 name_cache_delete(sctx, nce);
3873 * ow_inode might currently be an ancestor of
3874 * cur_ino, therefore compute valid_path (the
3875 * current path of cur_ino) again because it
3876 * might contain the pre-orphanization name of
3877 * ow_inode, which is no longer valid.
3879 ret = is_ancestor(sctx->parent_root,
3881 sctx->cur_ino, NULL);
3883 orphanized_ancestor = true;
3884 fs_path_reset(valid_path);
3885 ret = get_cur_path(sctx, sctx->cur_ino,
3886 sctx->cur_inode_gen,
3892 ret = send_unlink(sctx, cur->full_path);
3898 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
3899 ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
3908 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root &&
3910 ret = wait_for_parent_move(sctx, cur, is_orphan);
3920 * link/move the ref to the new place. If we have an orphan
3921 * inode, move it and update valid_path. If not, link or move
3922 * it depending on the inode mode.
3924 if (is_orphan && can_rename) {
3925 ret = send_rename(sctx, valid_path, cur->full_path);
3929 ret = fs_path_copy(valid_path, cur->full_path);
3932 } else if (can_rename) {
3933 if (S_ISDIR(sctx->cur_inode_mode)) {
3935 * Dirs can't be linked, so move it. For moved
3936 * dirs, we always have one new and one deleted
3937 * ref. The deleted ref is ignored later.
3939 ret = send_rename(sctx, valid_path,
3942 ret = fs_path_copy(valid_path,
3948 * We might have previously orphanized an inode
3949 * which is an ancestor of our current inode,
3950 * so our reference's full path, which was
3951 * computed before any such orphanizations, must
3954 if (orphanized_dir) {
3955 ret = update_ref_path(sctx, cur);
3959 ret = send_link(sctx, cur->full_path,
3965 ret = dup_ref(cur, &check_dirs);
3970 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) {
3972 * Check if we can already rmdir the directory. If not,
3973 * orphanize it. For every dir item inside that gets deleted
3974 * later, we do this check again and rmdir it then if possible.
3975 * See the use of check_dirs for more details.
3977 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen,
3982 ret = send_rmdir(sctx, valid_path);
3985 } else if (!is_orphan) {
3986 ret = orphanize_inode(sctx, sctx->cur_ino,
3987 sctx->cur_inode_gen, valid_path);
3993 list_for_each_entry(cur, &sctx->deleted_refs, list) {
3994 ret = dup_ref(cur, &check_dirs);
3998 } else if (S_ISDIR(sctx->cur_inode_mode) &&
3999 !list_empty(&sctx->deleted_refs)) {
4001 * We have a moved dir. Add the old parent to check_dirs
4003 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref,
4005 ret = dup_ref(cur, &check_dirs);
4008 } else if (!S_ISDIR(sctx->cur_inode_mode)) {
4010 * We have a non dir inode. Go through all deleted refs and
4011 * unlink them if they were not already overwritten by other
4014 list_for_each_entry(cur, &sctx->deleted_refs, list) {
4015 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen,
4016 sctx->cur_ino, sctx->cur_inode_gen,
4017 cur->name, cur->name_len);
4022 * If we orphanized any ancestor before, we need
4023 * to recompute the full path for deleted names,
4024 * since any such path was computed before we
4025 * processed any references and orphanized any
4028 if (orphanized_ancestor) {
4029 ret = update_ref_path(sctx, cur);
4033 ret = send_unlink(sctx, cur->full_path);
4037 ret = dup_ref(cur, &check_dirs);
4042 * If the inode is still orphan, unlink the orphan. This may
4043 * happen when a previous inode did overwrite the first ref
4044 * of this inode and no new refs were added for the current
4045 * inode. Unlinking does not mean that the inode is deleted in
4046 * all cases. There may still be links to this inode in other
4050 ret = send_unlink(sctx, valid_path);
4057 * We did collect all parent dirs where cur_inode was once located. We
4058 * now go through all these dirs and check if they are pending for
4059 * deletion and if it's finally possible to perform the rmdir now.
4060 * We also update the inode stats of the parent dirs here.
4062 list_for_each_entry(cur, &check_dirs, list) {
4064 * In case we had refs into dirs that were not processed yet,
4065 * we don't need to do the utime and rmdir logic for these dirs.
4066 * The dir will be processed later.
4068 if (cur->dir > sctx->cur_ino)
4071 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen);
4075 if (ret == inode_state_did_create ||
4076 ret == inode_state_no_change) {
4077 /* TODO delayed utimes */
4078 ret = send_utimes(sctx, cur->dir, cur->dir_gen);
4081 } else if (ret == inode_state_did_delete &&
4082 cur->dir != last_dir_ino_rm) {
4083 ret = can_rmdir(sctx, cur->dir, cur->dir_gen,
4088 ret = get_cur_path(sctx, cur->dir,
4089 cur->dir_gen, valid_path);
4092 ret = send_rmdir(sctx, valid_path);
4095 last_dir_ino_rm = cur->dir;
4103 __free_recorded_refs(&check_dirs);
4104 free_recorded_refs(sctx);
4105 fs_path_free(valid_path);
4109 static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name,
4110 void *ctx, struct list_head *refs)
4113 struct send_ctx *sctx = ctx;
4117 p = fs_path_alloc();
4121 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL,
4126 ret = get_cur_path(sctx, dir, gen, p);
4129 ret = fs_path_add_path(p, name);
4133 ret = __record_ref(refs, dir, gen, p);
4141 static int __record_new_ref(int num, u64 dir, int index,
4142 struct fs_path *name,
4145 struct send_ctx *sctx = ctx;
4146 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs);
4150 static int __record_deleted_ref(int num, u64 dir, int index,
4151 struct fs_path *name,
4154 struct send_ctx *sctx = ctx;
4155 return record_ref(sctx->parent_root, dir, name, ctx,
4156 &sctx->deleted_refs);
4159 static int record_new_ref(struct send_ctx *sctx)
4163 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4164 sctx->cmp_key, 0, __record_new_ref, sctx);
4173 static int record_deleted_ref(struct send_ctx *sctx)
4177 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4178 sctx->cmp_key, 0, __record_deleted_ref, sctx);
4187 struct find_ref_ctx {
4190 struct btrfs_root *root;
4191 struct fs_path *name;
4195 static int __find_iref(int num, u64 dir, int index,
4196 struct fs_path *name,
4199 struct find_ref_ctx *ctx = ctx_;
4203 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) &&
4204 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) {
4206 * To avoid doing extra lookups we'll only do this if everything
4209 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL,
4213 if (dir_gen != ctx->dir_gen)
4215 ctx->found_idx = num;
4221 static int find_iref(struct btrfs_root *root,
4222 struct btrfs_path *path,
4223 struct btrfs_key *key,
4224 u64 dir, u64 dir_gen, struct fs_path *name)
4227 struct find_ref_ctx ctx;
4231 ctx.dir_gen = dir_gen;
4235 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx);
4239 if (ctx.found_idx == -1)
4242 return ctx.found_idx;
4245 static int __record_changed_new_ref(int num, u64 dir, int index,
4246 struct fs_path *name,
4251 struct send_ctx *sctx = ctx;
4253 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL,
4258 ret = find_iref(sctx->parent_root, sctx->right_path,
4259 sctx->cmp_key, dir, dir_gen, name);
4261 ret = __record_new_ref(num, dir, index, name, sctx);
4268 static int __record_changed_deleted_ref(int num, u64 dir, int index,
4269 struct fs_path *name,
4274 struct send_ctx *sctx = ctx;
4276 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL,
4281 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key,
4282 dir, dir_gen, name);
4284 ret = __record_deleted_ref(num, dir, index, name, sctx);
4291 static int record_changed_ref(struct send_ctx *sctx)
4295 ret = iterate_inode_ref(sctx->send_root, sctx->left_path,
4296 sctx->cmp_key, 0, __record_changed_new_ref, sctx);
4299 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path,
4300 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx);
4310 * Record and process all refs at once. Needed when an inode changes the
4311 * generation number, which means that it was deleted and recreated.
4313 static int process_all_refs(struct send_ctx *sctx,
4314 enum btrfs_compare_tree_result cmd)
4317 struct btrfs_root *root;
4318 struct btrfs_path *path;
4319 struct btrfs_key key;
4320 struct btrfs_key found_key;
4321 struct extent_buffer *eb;
4323 iterate_inode_ref_t cb;
4324 int pending_move = 0;
4326 path = alloc_path_for_send();
4330 if (cmd == BTRFS_COMPARE_TREE_NEW) {
4331 root = sctx->send_root;
4332 cb = __record_new_ref;
4333 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) {
4334 root = sctx->parent_root;
4335 cb = __record_deleted_ref;
4337 btrfs_err(sctx->send_root->fs_info,
4338 "Wrong command %d in process_all_refs", cmd);
4343 key.objectid = sctx->cmp_key->objectid;
4344 key.type = BTRFS_INODE_REF_KEY;
4346 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4351 eb = path->nodes[0];
4352 slot = path->slots[0];
4353 if (slot >= btrfs_header_nritems(eb)) {
4354 ret = btrfs_next_leaf(root, path);
4362 btrfs_item_key_to_cpu(eb, &found_key, slot);
4364 if (found_key.objectid != key.objectid ||
4365 (found_key.type != BTRFS_INODE_REF_KEY &&
4366 found_key.type != BTRFS_INODE_EXTREF_KEY))
4369 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx);
4375 btrfs_release_path(path);
4378 * We don't actually care about pending_move as we are simply
4379 * re-creating this inode and will be rename'ing it into place once we
4380 * rename the parent directory.
4382 ret = process_recorded_refs(sctx, &pending_move);
4384 btrfs_free_path(path);
4388 static int send_set_xattr(struct send_ctx *sctx,
4389 struct fs_path *path,
4390 const char *name, int name_len,
4391 const char *data, int data_len)
4395 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR);
4399 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4400 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4401 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len);
4403 ret = send_cmd(sctx);
4410 static int send_remove_xattr(struct send_ctx *sctx,
4411 struct fs_path *path,
4412 const char *name, int name_len)
4416 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR);
4420 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path);
4421 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len);
4423 ret = send_cmd(sctx);
4430 static int __process_new_xattr(int num, struct btrfs_key *di_key,
4431 const char *name, int name_len,
4432 const char *data, int data_len,
4436 struct send_ctx *sctx = ctx;
4438 struct posix_acl_xattr_header dummy_acl;
4440 p = fs_path_alloc();
4445 * This hack is needed because empty acls are stored as zero byte
4446 * data in xattrs. Problem with that is, that receiving these zero byte
4447 * acls will fail later. To fix this, we send a dummy acl list that
4448 * only contains the version number and no entries.
4450 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) ||
4451 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) {
4452 if (data_len == 0) {
4453 dummy_acl.a_version =
4454 cpu_to_le32(POSIX_ACL_XATTR_VERSION);
4455 data = (char *)&dummy_acl;
4456 data_len = sizeof(dummy_acl);
4460 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4464 ret = send_set_xattr(sctx, p, name, name_len, data, data_len);
4471 static int __process_deleted_xattr(int num, struct btrfs_key *di_key,
4472 const char *name, int name_len,
4473 const char *data, int data_len,
4477 struct send_ctx *sctx = ctx;
4480 p = fs_path_alloc();
4484 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4488 ret = send_remove_xattr(sctx, p, name, name_len);
4495 static int process_new_xattr(struct send_ctx *sctx)
4499 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4500 __process_new_xattr, sctx);
4505 static int process_deleted_xattr(struct send_ctx *sctx)
4507 return iterate_dir_item(sctx->parent_root, sctx->right_path,
4508 __process_deleted_xattr, sctx);
4511 struct find_xattr_ctx {
4519 static int __find_xattr(int num, struct btrfs_key *di_key,
4520 const char *name, int name_len,
4521 const char *data, int data_len,
4522 u8 type, void *vctx)
4524 struct find_xattr_ctx *ctx = vctx;
4526 if (name_len == ctx->name_len &&
4527 strncmp(name, ctx->name, name_len) == 0) {
4528 ctx->found_idx = num;
4529 ctx->found_data_len = data_len;
4530 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL);
4531 if (!ctx->found_data)
4538 static int find_xattr(struct btrfs_root *root,
4539 struct btrfs_path *path,
4540 struct btrfs_key *key,
4541 const char *name, int name_len,
4542 char **data, int *data_len)
4545 struct find_xattr_ctx ctx;
4548 ctx.name_len = name_len;
4550 ctx.found_data = NULL;
4551 ctx.found_data_len = 0;
4553 ret = iterate_dir_item(root, path, __find_xattr, &ctx);
4557 if (ctx.found_idx == -1)
4560 *data = ctx.found_data;
4561 *data_len = ctx.found_data_len;
4563 kfree(ctx.found_data);
4565 return ctx.found_idx;
4569 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key,
4570 const char *name, int name_len,
4571 const char *data, int data_len,
4575 struct send_ctx *sctx = ctx;
4576 char *found_data = NULL;
4577 int found_data_len = 0;
4579 ret = find_xattr(sctx->parent_root, sctx->right_path,
4580 sctx->cmp_key, name, name_len, &found_data,
4582 if (ret == -ENOENT) {
4583 ret = __process_new_xattr(num, di_key, name, name_len, data,
4584 data_len, type, ctx);
4585 } else if (ret >= 0) {
4586 if (data_len != found_data_len ||
4587 memcmp(data, found_data, data_len)) {
4588 ret = __process_new_xattr(num, di_key, name, name_len,
4589 data, data_len, type, ctx);
4599 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key,
4600 const char *name, int name_len,
4601 const char *data, int data_len,
4605 struct send_ctx *sctx = ctx;
4607 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key,
4608 name, name_len, NULL, NULL);
4610 ret = __process_deleted_xattr(num, di_key, name, name_len, data,
4611 data_len, type, ctx);
4618 static int process_changed_xattr(struct send_ctx *sctx)
4622 ret = iterate_dir_item(sctx->send_root, sctx->left_path,
4623 __process_changed_new_xattr, sctx);
4626 ret = iterate_dir_item(sctx->parent_root, sctx->right_path,
4627 __process_changed_deleted_xattr, sctx);
4633 static int process_all_new_xattrs(struct send_ctx *sctx)
4636 struct btrfs_root *root;
4637 struct btrfs_path *path;
4638 struct btrfs_key key;
4639 struct btrfs_key found_key;
4640 struct extent_buffer *eb;
4643 path = alloc_path_for_send();
4647 root = sctx->send_root;
4649 key.objectid = sctx->cmp_key->objectid;
4650 key.type = BTRFS_XATTR_ITEM_KEY;
4652 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4657 eb = path->nodes[0];
4658 slot = path->slots[0];
4659 if (slot >= btrfs_header_nritems(eb)) {
4660 ret = btrfs_next_leaf(root, path);
4663 } else if (ret > 0) {
4670 btrfs_item_key_to_cpu(eb, &found_key, slot);
4671 if (found_key.objectid != key.objectid ||
4672 found_key.type != key.type) {
4677 ret = iterate_dir_item(root, path, __process_new_xattr, sctx);
4685 btrfs_free_path(path);
4689 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len)
4691 struct btrfs_root *root = sctx->send_root;
4692 struct btrfs_fs_info *fs_info = root->fs_info;
4693 struct inode *inode;
4696 struct btrfs_key key;
4697 pgoff_t index = offset >> PAGE_SHIFT;
4699 unsigned pg_offset = offset & ~PAGE_MASK;
4702 key.objectid = sctx->cur_ino;
4703 key.type = BTRFS_INODE_ITEM_KEY;
4706 inode = btrfs_iget(fs_info->sb, &key, root, NULL);
4708 return PTR_ERR(inode);
4710 if (offset + len > i_size_read(inode)) {
4711 if (offset > i_size_read(inode))
4714 len = offset - i_size_read(inode);
4719 last_index = (offset + len - 1) >> PAGE_SHIFT;
4721 /* initial readahead */
4722 memset(&sctx->ra, 0, sizeof(struct file_ra_state));
4723 file_ra_state_init(&sctx->ra, inode->i_mapping);
4725 while (index <= last_index) {
4726 unsigned cur_len = min_t(unsigned, len,
4727 PAGE_SIZE - pg_offset);
4729 page = find_lock_page(inode->i_mapping, index);
4731 page_cache_sync_readahead(inode->i_mapping, &sctx->ra,
4732 NULL, index, last_index + 1 - index);
4734 page = find_or_create_page(inode->i_mapping, index,
4742 if (PageReadahead(page)) {
4743 page_cache_async_readahead(inode->i_mapping, &sctx->ra,
4744 NULL, page, index, last_index + 1 - index);
4747 if (!PageUptodate(page)) {
4748 btrfs_readpage(NULL, page);
4750 if (!PageUptodate(page)) {
4759 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len);
4774 * Read some bytes from the current inode/file and send a write command to
4777 static int send_write(struct send_ctx *sctx, u64 offset, u32 len)
4779 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info;
4782 ssize_t num_read = 0;
4784 p = fs_path_alloc();
4788 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len);
4790 num_read = fill_read_buf(sctx, offset, len);
4791 if (num_read <= 0) {
4797 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4801 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4805 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4806 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4807 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read);
4809 ret = send_cmd(sctx);
4820 * Send a clone command to user space.
4822 static int send_clone(struct send_ctx *sctx,
4823 u64 offset, u32 len,
4824 struct clone_root *clone_root)
4830 btrfs_debug(sctx->send_root->fs_info,
4831 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu",
4832 offset, len, clone_root->root->objectid, clone_root->ino,
4833 clone_root->offset);
4835 p = fs_path_alloc();
4839 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE);
4843 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4847 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4848 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len);
4849 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4851 if (clone_root->root == sctx->send_root) {
4852 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL,
4853 &gen, NULL, NULL, NULL, NULL);
4856 ret = get_cur_path(sctx, clone_root->ino, gen, p);
4858 ret = get_inode_path(clone_root->root, clone_root->ino, p);
4864 * If the parent we're using has a received_uuid set then use that as
4865 * our clone source as that is what we will look for when doing a
4868 * This covers the case that we create a snapshot off of a received
4869 * subvolume and then use that as the parent and try to receive on a
4872 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid))
4873 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4874 clone_root->root->root_item.received_uuid);
4876 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID,
4877 clone_root->root->root_item.uuid);
4878 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID,
4879 le64_to_cpu(clone_root->root->root_item.ctransid));
4880 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p);
4881 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET,
4882 clone_root->offset);
4884 ret = send_cmd(sctx);
4893 * Send an update extent command to user space.
4895 static int send_update_extent(struct send_ctx *sctx,
4896 u64 offset, u32 len)
4901 p = fs_path_alloc();
4905 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT);
4909 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4913 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4914 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4915 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len);
4917 ret = send_cmd(sctx);
4925 static int send_hole(struct send_ctx *sctx, u64 end)
4927 struct fs_path *p = NULL;
4928 u64 offset = sctx->cur_inode_last_extent;
4932 p = fs_path_alloc();
4935 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p);
4937 goto tlv_put_failure;
4938 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE);
4939 while (offset < end) {
4940 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE);
4942 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE);
4945 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
4946 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset);
4947 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len);
4948 ret = send_cmd(sctx);
4958 static int send_extent_data(struct send_ctx *sctx,
4964 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA)
4965 return send_update_extent(sctx, offset, len);
4967 while (sent < len) {
4968 u64 size = len - sent;
4971 if (size > BTRFS_SEND_READ_SIZE)
4972 size = BTRFS_SEND_READ_SIZE;
4973 ret = send_write(sctx, offset + sent, size);
4983 static int clone_range(struct send_ctx *sctx,
4984 struct clone_root *clone_root,
4985 const u64 disk_byte,
4990 struct btrfs_path *path;
4991 struct btrfs_key key;
4995 * Prevent cloning from a zero offset with a length matching the sector
4996 * size because in some scenarios this will make the receiver fail.
4998 * For example, if in the source filesystem the extent at offset 0
4999 * has a length of sectorsize and it was written using direct IO, then
5000 * it can never be an inline extent (even if compression is enabled).
5001 * Then this extent can be cloned in the original filesystem to a non
5002 * zero file offset, but it may not be possible to clone in the
5003 * destination filesystem because it can be inlined due to compression
5004 * on the destination filesystem (as the receiver's write operations are
5005 * always done using buffered IO). The same happens when the original
5006 * filesystem does not have compression enabled but the destination
5009 if (clone_root->offset == 0 &&
5010 len == sctx->send_root->fs_info->sectorsize)
5011 return send_extent_data(sctx, offset, len);
5013 path = alloc_path_for_send();
5018 * We can't send a clone operation for the entire range if we find
5019 * extent items in the respective range in the source file that
5020 * refer to different extents or if we find holes.
5021 * So check for that and do a mix of clone and regular write/copy
5022 * operations if needed.
5026 * mkfs.btrfs -f /dev/sda
5027 * mount /dev/sda /mnt
5028 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
5029 * cp --reflink=always /mnt/foo /mnt/bar
5030 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
5031 * btrfs subvolume snapshot -r /mnt /mnt/snap
5033 * If when we send the snapshot and we are processing file bar (which
5034 * has a higher inode number than foo) we blindly send a clone operation
5035 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
5036 * a file bar that matches the content of file foo - iow, doesn't match
5037 * the content from bar in the original filesystem.
5039 key.objectid = clone_root->ino;
5040 key.type = BTRFS_EXTENT_DATA_KEY;
5041 key.offset = clone_root->offset;
5042 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0);
5045 if (ret > 0 && path->slots[0] > 0) {
5046 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1);
5047 if (key.objectid == clone_root->ino &&
5048 key.type == BTRFS_EXTENT_DATA_KEY)
5053 struct extent_buffer *leaf = path->nodes[0];
5054 int slot = path->slots[0];
5055 struct btrfs_file_extent_item *ei;
5060 if (slot >= btrfs_header_nritems(leaf)) {
5061 ret = btrfs_next_leaf(clone_root->root, path);
5069 btrfs_item_key_to_cpu(leaf, &key, slot);
5072 * We might have an implicit trailing hole (NO_HOLES feature
5073 * enabled). We deal with it after leaving this loop.
5075 if (key.objectid != clone_root->ino ||
5076 key.type != BTRFS_EXTENT_DATA_KEY)
5079 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5080 type = btrfs_file_extent_type(leaf, ei);
5081 if (type == BTRFS_FILE_EXTENT_INLINE) {
5082 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei);
5083 ext_len = PAGE_ALIGN(ext_len);
5085 ext_len = btrfs_file_extent_num_bytes(leaf, ei);
5088 if (key.offset + ext_len <= clone_root->offset)
5091 if (key.offset > clone_root->offset) {
5092 /* Implicit hole, NO_HOLES feature enabled. */
5093 u64 hole_len = key.offset - clone_root->offset;
5097 ret = send_extent_data(sctx, offset, hole_len);
5105 clone_root->offset += hole_len;
5106 data_offset += hole_len;
5109 if (key.offset >= clone_root->offset + len)
5112 clone_len = min_t(u64, ext_len, len);
5114 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte &&
5115 btrfs_file_extent_offset(leaf, ei) == data_offset)
5116 ret = send_clone(sctx, offset, clone_len, clone_root);
5118 ret = send_extent_data(sctx, offset, clone_len);
5126 offset += clone_len;
5127 clone_root->offset += clone_len;
5128 data_offset += clone_len;
5134 ret = send_extent_data(sctx, offset, len);
5138 btrfs_free_path(path);
5142 static int send_write_or_clone(struct send_ctx *sctx,
5143 struct btrfs_path *path,
5144 struct btrfs_key *key,
5145 struct clone_root *clone_root)
5148 struct btrfs_file_extent_item *ei;
5149 u64 offset = key->offset;
5152 u64 bs = sctx->send_root->fs_info->sb->s_blocksize;
5154 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5155 struct btrfs_file_extent_item);
5156 type = btrfs_file_extent_type(path->nodes[0], ei);
5157 if (type == BTRFS_FILE_EXTENT_INLINE) {
5158 len = btrfs_file_extent_inline_len(path->nodes[0],
5159 path->slots[0], ei);
5161 * it is possible the inline item won't cover the whole page,
5162 * but there may be items after this page. Make
5163 * sure to send the whole thing
5165 len = PAGE_ALIGN(len);
5167 len = btrfs_file_extent_num_bytes(path->nodes[0], ei);
5170 if (offset + len > sctx->cur_inode_size)
5171 len = sctx->cur_inode_size - offset;
5177 if (clone_root && IS_ALIGNED(offset + len, bs)) {
5181 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
5182 data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
5183 ret = clone_range(sctx, clone_root, disk_byte, data_offset,
5186 ret = send_extent_data(sctx, offset, len);
5192 static int is_extent_unchanged(struct send_ctx *sctx,
5193 struct btrfs_path *left_path,
5194 struct btrfs_key *ekey)
5197 struct btrfs_key key;
5198 struct btrfs_path *path = NULL;
5199 struct extent_buffer *eb;
5201 struct btrfs_key found_key;
5202 struct btrfs_file_extent_item *ei;
5207 u64 left_offset_fixed;
5215 path = alloc_path_for_send();
5219 eb = left_path->nodes[0];
5220 slot = left_path->slots[0];
5221 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5222 left_type = btrfs_file_extent_type(eb, ei);
5224 if (left_type != BTRFS_FILE_EXTENT_REG) {
5228 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5229 left_len = btrfs_file_extent_num_bytes(eb, ei);
5230 left_offset = btrfs_file_extent_offset(eb, ei);
5231 left_gen = btrfs_file_extent_generation(eb, ei);
5234 * Following comments will refer to these graphics. L is the left
5235 * extents which we are checking at the moment. 1-8 are the right
5236 * extents that we iterate.
5239 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5242 * |--1--|-2b-|...(same as above)
5244 * Alternative situation. Happens on files where extents got split.
5246 * |-----------7-----------|-6-|
5248 * Alternative situation. Happens on files which got larger.
5251 * Nothing follows after 8.
5254 key.objectid = ekey->objectid;
5255 key.type = BTRFS_EXTENT_DATA_KEY;
5256 key.offset = ekey->offset;
5257 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0);
5266 * Handle special case where the right side has no extents at all.
5268 eb = path->nodes[0];
5269 slot = path->slots[0];
5270 btrfs_item_key_to_cpu(eb, &found_key, slot);
5271 if (found_key.objectid != key.objectid ||
5272 found_key.type != key.type) {
5273 /* If we're a hole then just pretend nothing changed */
5274 ret = (left_disknr) ? 0 : 1;
5279 * We're now on 2a, 2b or 7.
5282 while (key.offset < ekey->offset + left_len) {
5283 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
5284 right_type = btrfs_file_extent_type(eb, ei);
5285 if (right_type != BTRFS_FILE_EXTENT_REG &&
5286 right_type != BTRFS_FILE_EXTENT_INLINE) {
5291 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5292 right_len = btrfs_file_extent_inline_len(eb, slot, ei);
5293 right_len = PAGE_ALIGN(right_len);
5295 right_len = btrfs_file_extent_num_bytes(eb, ei);
5299 * Are we at extent 8? If yes, we know the extent is changed.
5300 * This may only happen on the first iteration.
5302 if (found_key.offset + right_len <= ekey->offset) {
5303 /* If we're a hole just pretend nothing changed */
5304 ret = (left_disknr) ? 0 : 1;
5309 * We just wanted to see if when we have an inline extent, what
5310 * follows it is a regular extent (wanted to check the above
5311 * condition for inline extents too). This should normally not
5312 * happen but it's possible for example when we have an inline
5313 * compressed extent representing data with a size matching
5314 * the page size (currently the same as sector size).
5316 if (right_type == BTRFS_FILE_EXTENT_INLINE) {
5321 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei);
5322 right_offset = btrfs_file_extent_offset(eb, ei);
5323 right_gen = btrfs_file_extent_generation(eb, ei);
5325 left_offset_fixed = left_offset;
5326 if (key.offset < ekey->offset) {
5327 /* Fix the right offset for 2a and 7. */
5328 right_offset += ekey->offset - key.offset;
5330 /* Fix the left offset for all behind 2a and 2b */
5331 left_offset_fixed += key.offset - ekey->offset;
5335 * Check if we have the same extent.
5337 if (left_disknr != right_disknr ||
5338 left_offset_fixed != right_offset ||
5339 left_gen != right_gen) {
5345 * Go to the next extent.
5347 ret = btrfs_next_item(sctx->parent_root, path);
5351 eb = path->nodes[0];
5352 slot = path->slots[0];
5353 btrfs_item_key_to_cpu(eb, &found_key, slot);
5355 if (ret || found_key.objectid != key.objectid ||
5356 found_key.type != key.type) {
5357 key.offset += right_len;
5360 if (found_key.offset != key.offset + right_len) {
5368 * We're now behind the left extent (treat as unchanged) or at the end
5369 * of the right side (treat as changed).
5371 if (key.offset >= ekey->offset + left_len)
5378 btrfs_free_path(path);
5382 static int get_last_extent(struct send_ctx *sctx, u64 offset)
5384 struct btrfs_path *path;
5385 struct btrfs_root *root = sctx->send_root;
5386 struct btrfs_file_extent_item *fi;
5387 struct btrfs_key key;
5392 path = alloc_path_for_send();
5396 sctx->cur_inode_last_extent = 0;
5398 key.objectid = sctx->cur_ino;
5399 key.type = BTRFS_EXTENT_DATA_KEY;
5400 key.offset = offset;
5401 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1);
5405 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
5406 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY)
5409 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5410 struct btrfs_file_extent_item);
5411 type = btrfs_file_extent_type(path->nodes[0], fi);
5412 if (type == BTRFS_FILE_EXTENT_INLINE) {
5413 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
5414 path->slots[0], fi);
5415 extent_end = ALIGN(key.offset + size,
5416 sctx->send_root->fs_info->sectorsize);
5418 extent_end = key.offset +
5419 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5421 sctx->cur_inode_last_extent = extent_end;
5423 btrfs_free_path(path);
5427 static int range_is_hole_in_parent(struct send_ctx *sctx,
5431 struct btrfs_path *path;
5432 struct btrfs_key key;
5433 struct btrfs_root *root = sctx->parent_root;
5434 u64 search_start = start;
5437 path = alloc_path_for_send();
5441 key.objectid = sctx->cur_ino;
5442 key.type = BTRFS_EXTENT_DATA_KEY;
5443 key.offset = search_start;
5444 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5447 if (ret > 0 && path->slots[0] > 0)
5450 while (search_start < end) {
5451 struct extent_buffer *leaf = path->nodes[0];
5452 int slot = path->slots[0];
5453 struct btrfs_file_extent_item *fi;
5456 if (slot >= btrfs_header_nritems(leaf)) {
5457 ret = btrfs_next_leaf(root, path);
5465 btrfs_item_key_to_cpu(leaf, &key, slot);
5466 if (key.objectid < sctx->cur_ino ||
5467 key.type < BTRFS_EXTENT_DATA_KEY)
5469 if (key.objectid > sctx->cur_ino ||
5470 key.type > BTRFS_EXTENT_DATA_KEY ||
5474 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
5475 if (btrfs_file_extent_type(leaf, fi) ==
5476 BTRFS_FILE_EXTENT_INLINE) {
5477 u64 size = btrfs_file_extent_inline_len(leaf, slot, fi);
5479 extent_end = ALIGN(key.offset + size,
5480 root->fs_info->sectorsize);
5482 extent_end = key.offset +
5483 btrfs_file_extent_num_bytes(leaf, fi);
5485 if (extent_end <= start)
5487 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) {
5488 search_start = extent_end;
5498 btrfs_free_path(path);
5502 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path,
5503 struct btrfs_key *key)
5505 struct btrfs_file_extent_item *fi;
5510 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx))
5513 if (sctx->cur_inode_last_extent == (u64)-1) {
5514 ret = get_last_extent(sctx, key->offset - 1);
5519 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
5520 struct btrfs_file_extent_item);
5521 type = btrfs_file_extent_type(path->nodes[0], fi);
5522 if (type == BTRFS_FILE_EXTENT_INLINE) {
5523 u64 size = btrfs_file_extent_inline_len(path->nodes[0],
5524 path->slots[0], fi);
5525 extent_end = ALIGN(key->offset + size,
5526 sctx->send_root->fs_info->sectorsize);
5528 extent_end = key->offset +
5529 btrfs_file_extent_num_bytes(path->nodes[0], fi);
5532 if (path->slots[0] == 0 &&
5533 sctx->cur_inode_last_extent < key->offset) {
5535 * We might have skipped entire leafs that contained only
5536 * file extent items for our current inode. These leafs have
5537 * a generation number smaller (older) than the one in the
5538 * current leaf and the leaf our last extent came from, and
5539 * are located between these 2 leafs.
5541 ret = get_last_extent(sctx, key->offset - 1);
5546 if (sctx->cur_inode_last_extent < key->offset) {
5547 ret = range_is_hole_in_parent(sctx,
5548 sctx->cur_inode_last_extent,
5553 ret = send_hole(sctx, key->offset);
5557 sctx->cur_inode_last_extent = extent_end;
5561 static int process_extent(struct send_ctx *sctx,
5562 struct btrfs_path *path,
5563 struct btrfs_key *key)
5565 struct clone_root *found_clone = NULL;
5568 if (S_ISLNK(sctx->cur_inode_mode))
5571 if (sctx->parent_root && !sctx->cur_inode_new) {
5572 ret = is_extent_unchanged(sctx, path, key);
5580 struct btrfs_file_extent_item *ei;
5583 ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
5584 struct btrfs_file_extent_item);
5585 type = btrfs_file_extent_type(path->nodes[0], ei);
5586 if (type == BTRFS_FILE_EXTENT_PREALLOC ||
5587 type == BTRFS_FILE_EXTENT_REG) {
5589 * The send spec does not have a prealloc command yet,
5590 * so just leave a hole for prealloc'ed extents until
5591 * we have enough commands queued up to justify rev'ing
5594 if (type == BTRFS_FILE_EXTENT_PREALLOC) {
5599 /* Have a hole, just skip it. */
5600 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) {
5607 ret = find_extent_clone(sctx, path, key->objectid, key->offset,
5608 sctx->cur_inode_size, &found_clone);
5609 if (ret != -ENOENT && ret < 0)
5612 ret = send_write_or_clone(sctx, path, key, found_clone);
5616 ret = maybe_send_hole(sctx, path, key);
5621 static int process_all_extents(struct send_ctx *sctx)
5624 struct btrfs_root *root;
5625 struct btrfs_path *path;
5626 struct btrfs_key key;
5627 struct btrfs_key found_key;
5628 struct extent_buffer *eb;
5631 root = sctx->send_root;
5632 path = alloc_path_for_send();
5636 key.objectid = sctx->cmp_key->objectid;
5637 key.type = BTRFS_EXTENT_DATA_KEY;
5639 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5644 eb = path->nodes[0];
5645 slot = path->slots[0];
5647 if (slot >= btrfs_header_nritems(eb)) {
5648 ret = btrfs_next_leaf(root, path);
5651 } else if (ret > 0) {
5658 btrfs_item_key_to_cpu(eb, &found_key, slot);
5660 if (found_key.objectid != key.objectid ||
5661 found_key.type != key.type) {
5666 ret = process_extent(sctx, path, &found_key);
5674 btrfs_free_path(path);
5678 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end,
5680 int *refs_processed)
5684 if (sctx->cur_ino == 0)
5686 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
5687 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
5689 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
5692 ret = process_recorded_refs(sctx, pending_move);
5696 *refs_processed = 1;
5701 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
5712 int pending_move = 0;
5713 int refs_processed = 0;
5715 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move,
5721 * We have processed the refs and thus need to advance send_progress.
5722 * Now, calls to get_cur_xxx will take the updated refs of the current
5723 * inode into account.
5725 * On the other hand, if our current inode is a directory and couldn't
5726 * be moved/renamed because its parent was renamed/moved too and it has
5727 * a higher inode number, we can only move/rename our current inode
5728 * after we moved/renamed its parent. Therefore in this case operate on
5729 * the old path (pre move/rename) of our current inode, and the
5730 * move/rename will be performed later.
5732 if (refs_processed && !pending_move)
5733 sctx->send_progress = sctx->cur_ino + 1;
5735 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted)
5737 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino)
5740 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL,
5741 &left_mode, &left_uid, &left_gid, NULL);
5745 if (!sctx->parent_root || sctx->cur_inode_new) {
5747 if (!S_ISLNK(sctx->cur_inode_mode))
5750 ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
5751 NULL, NULL, &right_mode, &right_uid,
5756 if (left_uid != right_uid || left_gid != right_gid)
5758 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
5762 if (S_ISREG(sctx->cur_inode_mode)) {
5763 if (need_send_hole(sctx)) {
5764 if (sctx->cur_inode_last_extent == (u64)-1 ||
5765 sctx->cur_inode_last_extent <
5766 sctx->cur_inode_size) {
5767 ret = get_last_extent(sctx, (u64)-1);
5771 if (sctx->cur_inode_last_extent <
5772 sctx->cur_inode_size) {
5773 ret = send_hole(sctx, sctx->cur_inode_size);
5778 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5779 sctx->cur_inode_size);
5785 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5786 left_uid, left_gid);
5791 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen,
5798 * If other directory inodes depended on our current directory
5799 * inode's move/rename, now do their move/rename operations.
5801 if (!is_waiting_for_move(sctx, sctx->cur_ino)) {
5802 ret = apply_children_dir_moves(sctx);
5806 * Need to send that every time, no matter if it actually
5807 * changed between the two trees as we have done changes to
5808 * the inode before. If our inode is a directory and it's
5809 * waiting to be moved/renamed, we will send its utimes when
5810 * it's moved/renamed, therefore we don't need to do it here.
5812 sctx->send_progress = sctx->cur_ino + 1;
5813 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen);
5822 static int changed_inode(struct send_ctx *sctx,
5823 enum btrfs_compare_tree_result result)
5826 struct btrfs_key *key = sctx->cmp_key;
5827 struct btrfs_inode_item *left_ii = NULL;
5828 struct btrfs_inode_item *right_ii = NULL;
5832 sctx->cur_ino = key->objectid;
5833 sctx->cur_inode_new_gen = 0;
5834 sctx->cur_inode_last_extent = (u64)-1;
5837 * Set send_progress to current inode. This will tell all get_cur_xxx
5838 * functions that the current inode's refs are not updated yet. Later,
5839 * when process_recorded_refs is finished, it is set to cur_ino + 1.
5841 sctx->send_progress = sctx->cur_ino;
5843 if (result == BTRFS_COMPARE_TREE_NEW ||
5844 result == BTRFS_COMPARE_TREE_CHANGED) {
5845 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0],
5846 sctx->left_path->slots[0],
5847 struct btrfs_inode_item);
5848 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0],
5851 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5852 sctx->right_path->slots[0],
5853 struct btrfs_inode_item);
5854 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5857 if (result == BTRFS_COMPARE_TREE_CHANGED) {
5858 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0],
5859 sctx->right_path->slots[0],
5860 struct btrfs_inode_item);
5862 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0],
5866 * The cur_ino = root dir case is special here. We can't treat
5867 * the inode as deleted+reused because it would generate a
5868 * stream that tries to delete/mkdir the root dir.
5870 if (left_gen != right_gen &&
5871 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5872 sctx->cur_inode_new_gen = 1;
5875 if (result == BTRFS_COMPARE_TREE_NEW) {
5876 sctx->cur_inode_gen = left_gen;
5877 sctx->cur_inode_new = 1;
5878 sctx->cur_inode_deleted = 0;
5879 sctx->cur_inode_size = btrfs_inode_size(
5880 sctx->left_path->nodes[0], left_ii);
5881 sctx->cur_inode_mode = btrfs_inode_mode(
5882 sctx->left_path->nodes[0], left_ii);
5883 sctx->cur_inode_rdev = btrfs_inode_rdev(
5884 sctx->left_path->nodes[0], left_ii);
5885 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID)
5886 ret = send_create_inode_if_needed(sctx);
5887 } else if (result == BTRFS_COMPARE_TREE_DELETED) {
5888 sctx->cur_inode_gen = right_gen;
5889 sctx->cur_inode_new = 0;
5890 sctx->cur_inode_deleted = 1;
5891 sctx->cur_inode_size = btrfs_inode_size(
5892 sctx->right_path->nodes[0], right_ii);
5893 sctx->cur_inode_mode = btrfs_inode_mode(
5894 sctx->right_path->nodes[0], right_ii);
5895 } else if (result == BTRFS_COMPARE_TREE_CHANGED) {
5897 * We need to do some special handling in case the inode was
5898 * reported as changed with a changed generation number. This
5899 * means that the original inode was deleted and new inode
5900 * reused the same inum. So we have to treat the old inode as
5901 * deleted and the new one as new.
5903 if (sctx->cur_inode_new_gen) {
5905 * First, process the inode as if it was deleted.
5907 sctx->cur_inode_gen = right_gen;
5908 sctx->cur_inode_new = 0;
5909 sctx->cur_inode_deleted = 1;
5910 sctx->cur_inode_size = btrfs_inode_size(
5911 sctx->right_path->nodes[0], right_ii);
5912 sctx->cur_inode_mode = btrfs_inode_mode(
5913 sctx->right_path->nodes[0], right_ii);
5914 ret = process_all_refs(sctx,
5915 BTRFS_COMPARE_TREE_DELETED);
5920 * Now process the inode as if it was new.
5922 sctx->cur_inode_gen = left_gen;
5923 sctx->cur_inode_new = 1;
5924 sctx->cur_inode_deleted = 0;
5925 sctx->cur_inode_size = btrfs_inode_size(
5926 sctx->left_path->nodes[0], left_ii);
5927 sctx->cur_inode_mode = btrfs_inode_mode(
5928 sctx->left_path->nodes[0], left_ii);
5929 sctx->cur_inode_rdev = btrfs_inode_rdev(
5930 sctx->left_path->nodes[0], left_ii);
5931 ret = send_create_inode_if_needed(sctx);
5935 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW);
5939 * Advance send_progress now as we did not get into
5940 * process_recorded_refs_if_needed in the new_gen case.
5942 sctx->send_progress = sctx->cur_ino + 1;
5945 * Now process all extents and xattrs of the inode as if
5946 * they were all new.
5948 ret = process_all_extents(sctx);
5951 ret = process_all_new_xattrs(sctx);
5955 sctx->cur_inode_gen = left_gen;
5956 sctx->cur_inode_new = 0;
5957 sctx->cur_inode_new_gen = 0;
5958 sctx->cur_inode_deleted = 0;
5959 sctx->cur_inode_size = btrfs_inode_size(
5960 sctx->left_path->nodes[0], left_ii);
5961 sctx->cur_inode_mode = btrfs_inode_mode(
5962 sctx->left_path->nodes[0], left_ii);
5971 * We have to process new refs before deleted refs, but compare_trees gives us
5972 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
5973 * first and later process them in process_recorded_refs.
5974 * For the cur_inode_new_gen case, we skip recording completely because
5975 * changed_inode did already initiate processing of refs. The reason for this is
5976 * that in this case, compare_tree actually compares the refs of 2 different
5977 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
5978 * refs of the right tree as deleted and all refs of the left tree as new.
5980 static int changed_ref(struct send_ctx *sctx,
5981 enum btrfs_compare_tree_result result)
5985 if (sctx->cur_ino != sctx->cmp_key->objectid) {
5986 inconsistent_snapshot_error(sctx, result, "reference");
5990 if (!sctx->cur_inode_new_gen &&
5991 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
5992 if (result == BTRFS_COMPARE_TREE_NEW)
5993 ret = record_new_ref(sctx);
5994 else if (result == BTRFS_COMPARE_TREE_DELETED)
5995 ret = record_deleted_ref(sctx);
5996 else if (result == BTRFS_COMPARE_TREE_CHANGED)
5997 ret = record_changed_ref(sctx);
6004 * Process new/deleted/changed xattrs. We skip processing in the
6005 * cur_inode_new_gen case because changed_inode did already initiate processing
6006 * of xattrs. The reason is the same as in changed_ref
6008 static int changed_xattr(struct send_ctx *sctx,
6009 enum btrfs_compare_tree_result result)
6013 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6014 inconsistent_snapshot_error(sctx, result, "xattr");
6018 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6019 if (result == BTRFS_COMPARE_TREE_NEW)
6020 ret = process_new_xattr(sctx);
6021 else if (result == BTRFS_COMPARE_TREE_DELETED)
6022 ret = process_deleted_xattr(sctx);
6023 else if (result == BTRFS_COMPARE_TREE_CHANGED)
6024 ret = process_changed_xattr(sctx);
6031 * Process new/deleted/changed extents. We skip processing in the
6032 * cur_inode_new_gen case because changed_inode did already initiate processing
6033 * of extents. The reason is the same as in changed_ref
6035 static int changed_extent(struct send_ctx *sctx,
6036 enum btrfs_compare_tree_result result)
6040 if (sctx->cur_ino != sctx->cmp_key->objectid) {
6042 if (result == BTRFS_COMPARE_TREE_CHANGED) {
6043 struct extent_buffer *leaf_l;
6044 struct extent_buffer *leaf_r;
6045 struct btrfs_file_extent_item *ei_l;
6046 struct btrfs_file_extent_item *ei_r;
6048 leaf_l = sctx->left_path->nodes[0];
6049 leaf_r = sctx->right_path->nodes[0];
6050 ei_l = btrfs_item_ptr(leaf_l,
6051 sctx->left_path->slots[0],
6052 struct btrfs_file_extent_item);
6053 ei_r = btrfs_item_ptr(leaf_r,
6054 sctx->right_path->slots[0],
6055 struct btrfs_file_extent_item);
6058 * We may have found an extent item that has changed
6059 * only its disk_bytenr field and the corresponding
6060 * inode item was not updated. This case happens due to
6061 * very specific timings during relocation when a leaf
6062 * that contains file extent items is COWed while
6063 * relocation is ongoing and its in the stage where it
6064 * updates data pointers. So when this happens we can
6065 * safely ignore it since we know it's the same extent,
6066 * but just at different logical and physical locations
6067 * (when an extent is fully replaced with a new one, we
6068 * know the generation number must have changed too,
6069 * since snapshot creation implies committing the current
6070 * transaction, and the inode item must have been updated
6072 * This replacement of the disk_bytenr happens at
6073 * relocation.c:replace_file_extents() through
6074 * relocation.c:btrfs_reloc_cow_block().
6076 if (btrfs_file_extent_generation(leaf_l, ei_l) ==
6077 btrfs_file_extent_generation(leaf_r, ei_r) &&
6078 btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
6079 btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
6080 btrfs_file_extent_compression(leaf_l, ei_l) ==
6081 btrfs_file_extent_compression(leaf_r, ei_r) &&
6082 btrfs_file_extent_encryption(leaf_l, ei_l) ==
6083 btrfs_file_extent_encryption(leaf_r, ei_r) &&
6084 btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
6085 btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
6086 btrfs_file_extent_type(leaf_l, ei_l) ==
6087 btrfs_file_extent_type(leaf_r, ei_r) &&
6088 btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
6089 btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
6090 btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
6091 btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
6092 btrfs_file_extent_offset(leaf_l, ei_l) ==
6093 btrfs_file_extent_offset(leaf_r, ei_r) &&
6094 btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
6095 btrfs_file_extent_num_bytes(leaf_r, ei_r))
6099 inconsistent_snapshot_error(sctx, result, "extent");
6103 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
6104 if (result != BTRFS_COMPARE_TREE_DELETED)
6105 ret = process_extent(sctx, sctx->left_path,
6112 static int dir_changed(struct send_ctx *sctx, u64 dir)
6114 u64 orig_gen, new_gen;
6117 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL,
6122 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL,
6127 return (orig_gen != new_gen) ? 1 : 0;
6130 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path,
6131 struct btrfs_key *key)
6133 struct btrfs_inode_extref *extref;
6134 struct extent_buffer *leaf;
6135 u64 dirid = 0, last_dirid = 0;
6142 /* Easy case, just check this one dirid */
6143 if (key->type == BTRFS_INODE_REF_KEY) {
6144 dirid = key->offset;
6146 ret = dir_changed(sctx, dirid);
6150 leaf = path->nodes[0];
6151 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
6152 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
6153 while (cur_offset < item_size) {
6154 extref = (struct btrfs_inode_extref *)(ptr +
6156 dirid = btrfs_inode_extref_parent(leaf, extref);
6157 ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
6158 cur_offset += ref_name_len + sizeof(*extref);
6159 if (dirid == last_dirid)
6161 ret = dir_changed(sctx, dirid);
6171 * Updates compare related fields in sctx and simply forwards to the actual
6172 * changed_xxx functions.
6174 static int changed_cb(struct btrfs_path *left_path,
6175 struct btrfs_path *right_path,
6176 struct btrfs_key *key,
6177 enum btrfs_compare_tree_result result,
6181 struct send_ctx *sctx = ctx;
6183 if (result == BTRFS_COMPARE_TREE_SAME) {
6184 if (key->type == BTRFS_INODE_REF_KEY ||
6185 key->type == BTRFS_INODE_EXTREF_KEY) {
6186 ret = compare_refs(sctx, left_path, key);
6191 } else if (key->type == BTRFS_EXTENT_DATA_KEY) {
6192 return maybe_send_hole(sctx, left_path, key);
6196 result = BTRFS_COMPARE_TREE_CHANGED;
6200 sctx->left_path = left_path;
6201 sctx->right_path = right_path;
6202 sctx->cmp_key = key;
6204 ret = finish_inode_if_needed(sctx, 0);
6208 /* Ignore non-FS objects */
6209 if (key->objectid == BTRFS_FREE_INO_OBJECTID ||
6210 key->objectid == BTRFS_FREE_SPACE_OBJECTID)
6213 if (key->type == BTRFS_INODE_ITEM_KEY)
6214 ret = changed_inode(sctx, result);
6215 else if (key->type == BTRFS_INODE_REF_KEY ||
6216 key->type == BTRFS_INODE_EXTREF_KEY)
6217 ret = changed_ref(sctx, result);
6218 else if (key->type == BTRFS_XATTR_ITEM_KEY)
6219 ret = changed_xattr(sctx, result);
6220 else if (key->type == BTRFS_EXTENT_DATA_KEY)
6221 ret = changed_extent(sctx, result);
6227 static int full_send_tree(struct send_ctx *sctx)
6230 struct btrfs_root *send_root = sctx->send_root;
6231 struct btrfs_key key;
6232 struct btrfs_key found_key;
6233 struct btrfs_path *path;
6234 struct extent_buffer *eb;
6237 path = alloc_path_for_send();
6241 key.objectid = BTRFS_FIRST_FREE_OBJECTID;
6242 key.type = BTRFS_INODE_ITEM_KEY;
6245 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0);
6252 eb = path->nodes[0];
6253 slot = path->slots[0];
6254 btrfs_item_key_to_cpu(eb, &found_key, slot);
6256 ret = changed_cb(path, NULL, &found_key,
6257 BTRFS_COMPARE_TREE_NEW, sctx);
6261 key.objectid = found_key.objectid;
6262 key.type = found_key.type;
6263 key.offset = found_key.offset + 1;
6265 ret = btrfs_next_item(send_root, path);
6275 ret = finish_inode_if_needed(sctx, 1);
6278 btrfs_free_path(path);
6282 static int send_subvol(struct send_ctx *sctx)
6286 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) {
6287 ret = send_header(sctx);
6292 ret = send_subvol_begin(sctx);
6296 if (sctx->parent_root) {
6297 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root,
6301 ret = finish_inode_if_needed(sctx, 1);
6305 ret = full_send_tree(sctx);
6311 free_recorded_refs(sctx);
6316 * If orphan cleanup did remove any orphans from a root, it means the tree
6317 * was modified and therefore the commit root is not the same as the current
6318 * root anymore. This is a problem, because send uses the commit root and
6319 * therefore can see inode items that don't exist in the current root anymore,
6320 * and for example make calls to btrfs_iget, which will do tree lookups based
6321 * on the current root and not on the commit root. Those lookups will fail,
6322 * returning a -ESTALE error, and making send fail with that error. So make
6323 * sure a send does not see any orphans we have just removed, and that it will
6324 * see the same inodes regardless of whether a transaction commit happened
6325 * before it started (meaning that the commit root will be the same as the
6326 * current root) or not.
6328 static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
6331 struct btrfs_trans_handle *trans = NULL;
6334 if (sctx->parent_root &&
6335 sctx->parent_root->node != sctx->parent_root->commit_root)
6338 for (i = 0; i < sctx->clone_roots_cnt; i++)
6339 if (sctx->clone_roots[i].root->node !=
6340 sctx->clone_roots[i].root->commit_root)
6344 return btrfs_end_transaction(trans);
6349 /* Use any root, all fs roots will get their commit roots updated. */
6351 trans = btrfs_join_transaction(sctx->send_root);
6353 return PTR_ERR(trans);
6357 return btrfs_commit_transaction(trans);
6360 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
6362 spin_lock(&root->root_item_lock);
6363 root->send_in_progress--;
6365 * Not much left to do, we don't know why it's unbalanced and
6366 * can't blindly reset it to 0.
6368 if (root->send_in_progress < 0)
6369 btrfs_err(root->fs_info,
6370 "send_in_progres unbalanced %d root %llu",
6371 root->send_in_progress, root->root_key.objectid);
6372 spin_unlock(&root->root_item_lock);
6375 long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg)
6378 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root;
6379 struct btrfs_fs_info *fs_info = send_root->fs_info;
6380 struct btrfs_root *clone_root;
6381 struct btrfs_key key;
6382 struct send_ctx *sctx = NULL;
6384 u64 *clone_sources_tmp = NULL;
6385 int clone_sources_to_rollback = 0;
6386 unsigned alloc_size;
6387 int sort_clone_roots = 0;
6390 if (!capable(CAP_SYS_ADMIN))
6394 * The subvolume must remain read-only during send, protect against
6395 * making it RW. This also protects against deletion.
6397 spin_lock(&send_root->root_item_lock);
6398 send_root->send_in_progress++;
6399 spin_unlock(&send_root->root_item_lock);
6402 * This is done when we lookup the root, it should already be complete
6403 * by the time we get here.
6405 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE);
6408 * Userspace tools do the checks and warn the user if it's
6411 if (!btrfs_root_readonly(send_root)) {
6417 * Check that we don't overflow at later allocations, we request
6418 * clone_sources_count + 1 items, and compare to unsigned long inside
6421 if (arg->clone_sources_count >
6422 ULONG_MAX / sizeof(struct clone_root) - 1) {
6427 if (!access_ok(VERIFY_READ, arg->clone_sources,
6428 sizeof(*arg->clone_sources) *
6429 arg->clone_sources_count)) {
6434 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) {
6439 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL);
6445 INIT_LIST_HEAD(&sctx->new_refs);
6446 INIT_LIST_HEAD(&sctx->deleted_refs);
6447 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL);
6448 INIT_LIST_HEAD(&sctx->name_cache_list);
6450 sctx->flags = arg->flags;
6452 sctx->send_filp = fget(arg->send_fd);
6453 if (!sctx->send_filp) {
6458 sctx->send_root = send_root;
6460 * Unlikely but possible, if the subvolume is marked for deletion but
6461 * is slow to remove the directory entry, send can still be started
6463 if (btrfs_root_dead(sctx->send_root)) {
6468 sctx->clone_roots_cnt = arg->clone_sources_count;
6470 sctx->send_max_size = BTRFS_SEND_BUF_SIZE;
6471 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL);
6472 if (!sctx->send_buf) {
6477 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL);
6478 if (!sctx->read_buf) {
6483 sctx->pending_dir_moves = RB_ROOT;
6484 sctx->waiting_dir_moves = RB_ROOT;
6485 sctx->orphan_dirs = RB_ROOT;
6487 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1);
6489 sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL);
6490 if (!sctx->clone_roots) {
6495 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources);
6497 if (arg->clone_sources_count) {
6498 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL);
6499 if (!clone_sources_tmp) {
6504 ret = copy_from_user(clone_sources_tmp, arg->clone_sources,
6511 for (i = 0; i < arg->clone_sources_count; i++) {
6512 key.objectid = clone_sources_tmp[i];
6513 key.type = BTRFS_ROOT_ITEM_KEY;
6514 key.offset = (u64)-1;
6516 index = srcu_read_lock(&fs_info->subvol_srcu);
6518 clone_root = btrfs_read_fs_root_no_name(fs_info, &key);
6519 if (IS_ERR(clone_root)) {
6520 srcu_read_unlock(&fs_info->subvol_srcu, index);
6521 ret = PTR_ERR(clone_root);
6524 spin_lock(&clone_root->root_item_lock);
6525 if (!btrfs_root_readonly(clone_root) ||
6526 btrfs_root_dead(clone_root)) {
6527 spin_unlock(&clone_root->root_item_lock);
6528 srcu_read_unlock(&fs_info->subvol_srcu, index);
6532 clone_root->send_in_progress++;
6533 spin_unlock(&clone_root->root_item_lock);
6534 srcu_read_unlock(&fs_info->subvol_srcu, index);
6536 sctx->clone_roots[i].root = clone_root;
6537 clone_sources_to_rollback = i + 1;
6539 kvfree(clone_sources_tmp);
6540 clone_sources_tmp = NULL;
6543 if (arg->parent_root) {
6544 key.objectid = arg->parent_root;
6545 key.type = BTRFS_ROOT_ITEM_KEY;
6546 key.offset = (u64)-1;
6548 index = srcu_read_lock(&fs_info->subvol_srcu);
6550 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key);
6551 if (IS_ERR(sctx->parent_root)) {
6552 srcu_read_unlock(&fs_info->subvol_srcu, index);
6553 ret = PTR_ERR(sctx->parent_root);
6557 spin_lock(&sctx->parent_root->root_item_lock);
6558 sctx->parent_root->send_in_progress++;
6559 if (!btrfs_root_readonly(sctx->parent_root) ||
6560 btrfs_root_dead(sctx->parent_root)) {
6561 spin_unlock(&sctx->parent_root->root_item_lock);
6562 srcu_read_unlock(&fs_info->subvol_srcu, index);
6566 spin_unlock(&sctx->parent_root->root_item_lock);
6568 srcu_read_unlock(&fs_info->subvol_srcu, index);
6572 * Clones from send_root are allowed, but only if the clone source
6573 * is behind the current send position. This is checked while searching
6574 * for possible clone sources.
6576 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root;
6578 /* We do a bsearch later */
6579 sort(sctx->clone_roots, sctx->clone_roots_cnt,
6580 sizeof(*sctx->clone_roots), __clone_root_cmp_sort,
6582 sort_clone_roots = 1;
6584 ret = ensure_commit_roots_uptodate(sctx);
6588 current->journal_info = BTRFS_SEND_TRANS_STUB;
6589 ret = send_subvol(sctx);
6590 current->journal_info = NULL;
6594 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) {
6595 ret = begin_cmd(sctx, BTRFS_SEND_C_END);
6598 ret = send_cmd(sctx);
6604 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves));
6605 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) {
6607 struct pending_dir_move *pm;
6609 n = rb_first(&sctx->pending_dir_moves);
6610 pm = rb_entry(n, struct pending_dir_move, node);
6611 while (!list_empty(&pm->list)) {
6612 struct pending_dir_move *pm2;
6614 pm2 = list_first_entry(&pm->list,
6615 struct pending_dir_move, list);
6616 free_pending_move(sctx, pm2);
6618 free_pending_move(sctx, pm);
6621 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves));
6622 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) {
6624 struct waiting_dir_move *dm;
6626 n = rb_first(&sctx->waiting_dir_moves);
6627 dm = rb_entry(n, struct waiting_dir_move, node);
6628 rb_erase(&dm->node, &sctx->waiting_dir_moves);
6632 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs));
6633 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) {
6635 struct orphan_dir_info *odi;
6637 n = rb_first(&sctx->orphan_dirs);
6638 odi = rb_entry(n, struct orphan_dir_info, node);
6639 free_orphan_dir_info(sctx, odi);
6642 if (sort_clone_roots) {
6643 for (i = 0; i < sctx->clone_roots_cnt; i++)
6644 btrfs_root_dec_send_in_progress(
6645 sctx->clone_roots[i].root);
6647 for (i = 0; sctx && i < clone_sources_to_rollback; i++)
6648 btrfs_root_dec_send_in_progress(
6649 sctx->clone_roots[i].root);
6651 btrfs_root_dec_send_in_progress(send_root);
6653 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root))
6654 btrfs_root_dec_send_in_progress(sctx->parent_root);
6656 kvfree(clone_sources_tmp);
6659 if (sctx->send_filp)
6660 fput(sctx->send_filp);
6662 kvfree(sctx->clone_roots);
6663 kvfree(sctx->send_buf);
6664 kvfree(sctx->read_buf);
6666 name_cache_free(sctx);