2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
38 static int g_verbose
= 0;
40 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
43 * A fs_path is a helper to dynamically build path names with unknown size.
44 * It reallocates the internal buffer on demand.
45 * It allows fast adding of path elements on the right side (normal path) and
46 * fast adding to the left side (reversed path). A reversed path can also be
47 * unreversed if needed.
56 unsigned short buf_len
:15;
57 unsigned short reversed
:1;
61 * Average path length does not exceed 200 bytes, we'll have
62 * better packing in the slab and higher chance to satisfy
63 * a allocation later during send.
68 #define FS_PATH_INLINE_SIZE \
69 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
72 /* reused for each extent */
74 struct btrfs_root
*root
;
81 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
82 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
85 struct file
*send_filp
;
91 u64 cmd_send_size
[BTRFS_SEND_C_MAX
+ 1];
92 u64 flags
; /* 'flags' member of btrfs_ioctl_send_args is u64 */
94 struct btrfs_root
*send_root
;
95 struct btrfs_root
*parent_root
;
96 struct clone_root
*clone_roots
;
99 /* current state of the compare_tree call */
100 struct btrfs_path
*left_path
;
101 struct btrfs_path
*right_path
;
102 struct btrfs_key
*cmp_key
;
105 * infos of the currently processed inode. In case of deleted inodes,
106 * these are the values from the deleted inode.
111 int cur_inode_new_gen
;
112 int cur_inode_deleted
;
116 u64 cur_inode_last_extent
;
120 struct list_head new_refs
;
121 struct list_head deleted_refs
;
123 struct radix_tree_root name_cache
;
124 struct list_head name_cache_list
;
127 struct file_ra_state ra
;
132 * We process inodes by their increasing order, so if before an
133 * incremental send we reverse the parent/child relationship of
134 * directories such that a directory with a lower inode number was
135 * the parent of a directory with a higher inode number, and the one
136 * becoming the new parent got renamed too, we can't rename/move the
137 * directory with lower inode number when we finish processing it - we
138 * must process the directory with higher inode number first, then
139 * rename/move it and then rename/move the directory with lower inode
140 * number. Example follows.
142 * Tree state when the first send was performed:
154 * Tree state when the second (incremental) send is performed:
163 * The sequence of steps that lead to the second state was:
165 * mv /a/b/c/d /a/b/c2/d2
166 * mv /a/b/c /a/b/c2/d2/cc
168 * "c" has lower inode number, but we can't move it (2nd mv operation)
169 * before we move "d", which has higher inode number.
171 * So we just memorize which move/rename operations must be performed
172 * later when their respective parent is processed and moved/renamed.
175 /* Indexed by parent directory inode number. */
176 struct rb_root pending_dir_moves
;
179 * Reverse index, indexed by the inode number of a directory that
180 * is waiting for the move/rename of its immediate parent before its
181 * own move/rename can be performed.
183 struct rb_root waiting_dir_moves
;
186 * A directory that is going to be rm'ed might have a child directory
187 * which is in the pending directory moves index above. In this case,
188 * the directory can only be removed after the move/rename of its child
189 * is performed. Example:
209 * Sequence of steps that lead to the send snapshot:
210 * rm -f /a/b/c/foo.txt
212 * mv /a/b/c/x /a/b/YY
215 * When the child is processed, its move/rename is delayed until its
216 * parent is processed (as explained above), but all other operations
217 * like update utimes, chown, chgrp, etc, are performed and the paths
218 * that it uses for those operations must use the orphanized name of
219 * its parent (the directory we're going to rm later), so we need to
220 * memorize that name.
222 * Indexed by the inode number of the directory to be deleted.
224 struct rb_root orphan_dirs
;
227 struct pending_dir_move
{
229 struct list_head list
;
233 struct list_head update_refs
;
236 struct waiting_dir_move
{
240 * There might be some directory that could not be removed because it
241 * was waiting for this directory inode to be moved first. Therefore
242 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
247 struct orphan_dir_info
{
253 struct name_cache_entry
{
254 struct list_head list
;
256 * radix_tree has only 32bit entries but we need to handle 64bit inums.
257 * We use the lower 32bit of the 64bit inum to store it in the tree. If
258 * more then one inum would fall into the same entry, we use radix_list
259 * to store the additional entries. radix_list is also used to store
260 * entries where two entries have the same inum but different
263 struct list_head radix_list
;
269 int need_later_update
;
274 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
);
276 static struct waiting_dir_move
*
277 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
);
279 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
);
281 static int need_send_hole(struct send_ctx
*sctx
)
283 return (sctx
->parent_root
&& !sctx
->cur_inode_new
&&
284 !sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
&&
285 S_ISREG(sctx
->cur_inode_mode
));
288 static void fs_path_reset(struct fs_path
*p
)
291 p
->start
= p
->buf
+ p
->buf_len
- 1;
301 static struct fs_path
*fs_path_alloc(void)
305 p
= kmalloc(sizeof(*p
), GFP_NOFS
);
309 p
->buf
= p
->inline_buf
;
310 p
->buf_len
= FS_PATH_INLINE_SIZE
;
315 static struct fs_path
*fs_path_alloc_reversed(void)
327 static void fs_path_free(struct fs_path
*p
)
331 if (p
->buf
!= p
->inline_buf
)
336 static int fs_path_len(struct fs_path
*p
)
338 return p
->end
- p
->start
;
341 static int fs_path_ensure_buf(struct fs_path
*p
, int len
)
349 if (p
->buf_len
>= len
)
352 path_len
= p
->end
- p
->start
;
353 old_buf_len
= p
->buf_len
;
356 * First time the inline_buf does not suffice
358 if (p
->buf
== p
->inline_buf
)
359 tmp_buf
= kmalloc(len
, GFP_NOFS
);
361 tmp_buf
= krealloc(p
->buf
, len
, GFP_NOFS
);
366 * The real size of the buffer is bigger, this will let the fast path
367 * happen most of the time
369 p
->buf_len
= ksize(p
->buf
);
372 tmp_buf
= p
->buf
+ old_buf_len
- path_len
- 1;
373 p
->end
= p
->buf
+ p
->buf_len
- 1;
374 p
->start
= p
->end
- path_len
;
375 memmove(p
->start
, tmp_buf
, path_len
+ 1);
378 p
->end
= p
->start
+ path_len
;
383 static int fs_path_prepare_for_add(struct fs_path
*p
, int name_len
,
389 new_len
= p
->end
- p
->start
+ name_len
;
390 if (p
->start
!= p
->end
)
392 ret
= fs_path_ensure_buf(p
, new_len
);
397 if (p
->start
!= p
->end
)
399 p
->start
-= name_len
;
400 *prepared
= p
->start
;
402 if (p
->start
!= p
->end
)
413 static int fs_path_add(struct fs_path
*p
, const char *name
, int name_len
)
418 ret
= fs_path_prepare_for_add(p
, name_len
, &prepared
);
421 memcpy(prepared
, name
, name_len
);
427 static int fs_path_add_path(struct fs_path
*p
, struct fs_path
*p2
)
432 ret
= fs_path_prepare_for_add(p
, p2
->end
- p2
->start
, &prepared
);
435 memcpy(prepared
, p2
->start
, p2
->end
- p2
->start
);
441 static int fs_path_add_from_extent_buffer(struct fs_path
*p
,
442 struct extent_buffer
*eb
,
443 unsigned long off
, int len
)
448 ret
= fs_path_prepare_for_add(p
, len
, &prepared
);
452 read_extent_buffer(eb
, prepared
, off
, len
);
458 static int fs_path_copy(struct fs_path
*p
, struct fs_path
*from
)
462 p
->reversed
= from
->reversed
;
465 ret
= fs_path_add_path(p
, from
);
471 static void fs_path_unreverse(struct fs_path
*p
)
480 len
= p
->end
- p
->start
;
482 p
->end
= p
->start
+ len
;
483 memmove(p
->start
, tmp
, len
+ 1);
487 static struct btrfs_path
*alloc_path_for_send(void)
489 struct btrfs_path
*path
;
491 path
= btrfs_alloc_path();
494 path
->search_commit_root
= 1;
495 path
->skip_locking
= 1;
499 static int write_buf(struct file
*filp
, const void *buf
, u32 len
, loff_t
*off
)
509 ret
= vfs_write(filp
, (char *)buf
+ pos
, len
- pos
, off
);
510 /* TODO handle that correctly */
511 /*if (ret == -ERESTARTSYS) {
530 static int tlv_put(struct send_ctx
*sctx
, u16 attr
, const void *data
, int len
)
532 struct btrfs_tlv_header
*hdr
;
533 int total_len
= sizeof(*hdr
) + len
;
534 int left
= sctx
->send_max_size
- sctx
->send_size
;
536 if (unlikely(left
< total_len
))
539 hdr
= (struct btrfs_tlv_header
*) (sctx
->send_buf
+ sctx
->send_size
);
540 hdr
->tlv_type
= cpu_to_le16(attr
);
541 hdr
->tlv_len
= cpu_to_le16(len
);
542 memcpy(hdr
+ 1, data
, len
);
543 sctx
->send_size
+= total_len
;
548 #define TLV_PUT_DEFINE_INT(bits) \
549 static int tlv_put_u##bits(struct send_ctx *sctx, \
550 u##bits attr, u##bits value) \
552 __le##bits __tmp = cpu_to_le##bits(value); \
553 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
556 TLV_PUT_DEFINE_INT(64)
558 static int tlv_put_string(struct send_ctx
*sctx
, u16 attr
,
559 const char *str
, int len
)
563 return tlv_put(sctx
, attr
, str
, len
);
566 static int tlv_put_uuid(struct send_ctx
*sctx
, u16 attr
,
569 return tlv_put(sctx
, attr
, uuid
, BTRFS_UUID_SIZE
);
572 static int tlv_put_btrfs_timespec(struct send_ctx
*sctx
, u16 attr
,
573 struct extent_buffer
*eb
,
574 struct btrfs_timespec
*ts
)
576 struct btrfs_timespec bts
;
577 read_extent_buffer(eb
, &bts
, (unsigned long)ts
, sizeof(bts
));
578 return tlv_put(sctx
, attr
, &bts
, sizeof(bts
));
582 #define TLV_PUT(sctx, attrtype, attrlen, data) \
584 ret = tlv_put(sctx, attrtype, attrlen, data); \
586 goto tlv_put_failure; \
589 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
591 ret = tlv_put_u##bits(sctx, attrtype, value); \
593 goto tlv_put_failure; \
596 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
597 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
598 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
599 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
600 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
602 ret = tlv_put_string(sctx, attrtype, str, len); \
604 goto tlv_put_failure; \
606 #define TLV_PUT_PATH(sctx, attrtype, p) \
608 ret = tlv_put_string(sctx, attrtype, p->start, \
609 p->end - p->start); \
611 goto tlv_put_failure; \
613 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
615 ret = tlv_put_uuid(sctx, attrtype, uuid); \
617 goto tlv_put_failure; \
619 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
621 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
623 goto tlv_put_failure; \
626 static int send_header(struct send_ctx
*sctx
)
628 struct btrfs_stream_header hdr
;
630 strcpy(hdr
.magic
, BTRFS_SEND_STREAM_MAGIC
);
631 hdr
.version
= cpu_to_le32(BTRFS_SEND_STREAM_VERSION
);
633 return write_buf(sctx
->send_filp
, &hdr
, sizeof(hdr
),
638 * For each command/item we want to send to userspace, we call this function.
640 static int begin_cmd(struct send_ctx
*sctx
, int cmd
)
642 struct btrfs_cmd_header
*hdr
;
644 if (WARN_ON(!sctx
->send_buf
))
647 BUG_ON(sctx
->send_size
);
649 sctx
->send_size
+= sizeof(*hdr
);
650 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
651 hdr
->cmd
= cpu_to_le16(cmd
);
656 static int send_cmd(struct send_ctx
*sctx
)
659 struct btrfs_cmd_header
*hdr
;
662 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
663 hdr
->len
= cpu_to_le32(sctx
->send_size
- sizeof(*hdr
));
666 crc
= btrfs_crc32c(0, (unsigned char *)sctx
->send_buf
, sctx
->send_size
);
667 hdr
->crc
= cpu_to_le32(crc
);
669 ret
= write_buf(sctx
->send_filp
, sctx
->send_buf
, sctx
->send_size
,
672 sctx
->total_send_size
+= sctx
->send_size
;
673 sctx
->cmd_send_size
[le16_to_cpu(hdr
->cmd
)] += sctx
->send_size
;
680 * Sends a move instruction to user space
682 static int send_rename(struct send_ctx
*sctx
,
683 struct fs_path
*from
, struct fs_path
*to
)
687 verbose_printk("btrfs: send_rename %s -> %s\n", from
->start
, to
->start
);
689 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RENAME
);
693 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, from
);
694 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_TO
, to
);
696 ret
= send_cmd(sctx
);
704 * Sends a link instruction to user space
706 static int send_link(struct send_ctx
*sctx
,
707 struct fs_path
*path
, struct fs_path
*lnk
)
711 verbose_printk("btrfs: send_link %s -> %s\n", path
->start
, lnk
->start
);
713 ret
= begin_cmd(sctx
, BTRFS_SEND_C_LINK
);
717 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
718 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, lnk
);
720 ret
= send_cmd(sctx
);
728 * Sends an unlink instruction to user space
730 static int send_unlink(struct send_ctx
*sctx
, struct fs_path
*path
)
734 verbose_printk("btrfs: send_unlink %s\n", path
->start
);
736 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UNLINK
);
740 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
742 ret
= send_cmd(sctx
);
750 * Sends a rmdir instruction to user space
752 static int send_rmdir(struct send_ctx
*sctx
, struct fs_path
*path
)
756 verbose_printk("btrfs: send_rmdir %s\n", path
->start
);
758 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RMDIR
);
762 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
764 ret
= send_cmd(sctx
);
772 * Helper function to retrieve some fields from an inode item.
774 static int get_inode_info(struct btrfs_root
*root
,
775 u64 ino
, u64
*size
, u64
*gen
,
776 u64
*mode
, u64
*uid
, u64
*gid
,
780 struct btrfs_inode_item
*ii
;
781 struct btrfs_key key
;
782 struct btrfs_path
*path
;
784 path
= alloc_path_for_send();
789 key
.type
= BTRFS_INODE_ITEM_KEY
;
791 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
799 ii
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
800 struct btrfs_inode_item
);
802 *size
= btrfs_inode_size(path
->nodes
[0], ii
);
804 *gen
= btrfs_inode_generation(path
->nodes
[0], ii
);
806 *mode
= btrfs_inode_mode(path
->nodes
[0], ii
);
808 *uid
= btrfs_inode_uid(path
->nodes
[0], ii
);
810 *gid
= btrfs_inode_gid(path
->nodes
[0], ii
);
812 *rdev
= btrfs_inode_rdev(path
->nodes
[0], ii
);
815 btrfs_free_path(path
);
819 typedef int (*iterate_inode_ref_t
)(int num
, u64 dir
, int index
,
824 * Helper function to iterate the entries in ONE btrfs_inode_ref or
825 * btrfs_inode_extref.
826 * The iterate callback may return a non zero value to stop iteration. This can
827 * be a negative value for error codes or 1 to simply stop it.
829 * path must point to the INODE_REF or INODE_EXTREF when called.
831 static int iterate_inode_ref(struct btrfs_root
*root
, struct btrfs_path
*path
,
832 struct btrfs_key
*found_key
, int resolve
,
833 iterate_inode_ref_t iterate
, void *ctx
)
835 struct extent_buffer
*eb
= path
->nodes
[0];
836 struct btrfs_item
*item
;
837 struct btrfs_inode_ref
*iref
;
838 struct btrfs_inode_extref
*extref
;
839 struct btrfs_path
*tmp_path
;
843 int slot
= path
->slots
[0];
850 unsigned long name_off
;
851 unsigned long elem_size
;
854 p
= fs_path_alloc_reversed();
858 tmp_path
= alloc_path_for_send();
865 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
866 ptr
= (unsigned long)btrfs_item_ptr(eb
, slot
,
867 struct btrfs_inode_ref
);
868 item
= btrfs_item_nr(slot
);
869 total
= btrfs_item_size(eb
, item
);
870 elem_size
= sizeof(*iref
);
872 ptr
= btrfs_item_ptr_offset(eb
, slot
);
873 total
= btrfs_item_size_nr(eb
, slot
);
874 elem_size
= sizeof(*extref
);
877 while (cur
< total
) {
880 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
881 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur
);
882 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
883 name_off
= (unsigned long)(iref
+ 1);
884 index
= btrfs_inode_ref_index(eb
, iref
);
885 dir
= found_key
->offset
;
887 extref
= (struct btrfs_inode_extref
*)(ptr
+ cur
);
888 name_len
= btrfs_inode_extref_name_len(eb
, extref
);
889 name_off
= (unsigned long)&extref
->name
;
890 index
= btrfs_inode_extref_index(eb
, extref
);
891 dir
= btrfs_inode_extref_parent(eb
, extref
);
895 start
= btrfs_ref_to_path(root
, tmp_path
, name_len
,
899 ret
= PTR_ERR(start
);
902 if (start
< p
->buf
) {
903 /* overflow , try again with larger buffer */
904 ret
= fs_path_ensure_buf(p
,
905 p
->buf_len
+ p
->buf
- start
);
908 start
= btrfs_ref_to_path(root
, tmp_path
,
913 ret
= PTR_ERR(start
);
916 BUG_ON(start
< p
->buf
);
920 ret
= fs_path_add_from_extent_buffer(p
, eb
, name_off
,
926 cur
+= elem_size
+ name_len
;
927 ret
= iterate(num
, dir
, index
, p
, ctx
);
934 btrfs_free_path(tmp_path
);
939 typedef int (*iterate_dir_item_t
)(int num
, struct btrfs_key
*di_key
,
940 const char *name
, int name_len
,
941 const char *data
, int data_len
,
945 * Helper function to iterate the entries in ONE btrfs_dir_item.
946 * The iterate callback may return a non zero value to stop iteration. This can
947 * be a negative value for error codes or 1 to simply stop it.
949 * path must point to the dir item when called.
951 static int iterate_dir_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
952 struct btrfs_key
*found_key
,
953 iterate_dir_item_t iterate
, void *ctx
)
956 struct extent_buffer
*eb
;
957 struct btrfs_item
*item
;
958 struct btrfs_dir_item
*di
;
959 struct btrfs_key di_key
;
961 const int buf_len
= PATH_MAX
;
971 buf
= kmalloc(buf_len
, GFP_NOFS
);
978 slot
= path
->slots
[0];
979 item
= btrfs_item_nr(slot
);
980 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
983 total
= btrfs_item_size(eb
, item
);
986 while (cur
< total
) {
987 name_len
= btrfs_dir_name_len(eb
, di
);
988 data_len
= btrfs_dir_data_len(eb
, di
);
989 type
= btrfs_dir_type(eb
, di
);
990 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
995 if (name_len
+ data_len
> buf_len
) {
1000 read_extent_buffer(eb
, buf
, (unsigned long)(di
+ 1),
1001 name_len
+ data_len
);
1003 len
= sizeof(*di
) + name_len
+ data_len
;
1004 di
= (struct btrfs_dir_item
*)((char *)di
+ len
);
1007 ret
= iterate(num
, &di_key
, buf
, name_len
, buf
+ name_len
,
1008 data_len
, type
, ctx
);
1024 static int __copy_first_ref(int num
, u64 dir
, int index
,
1025 struct fs_path
*p
, void *ctx
)
1028 struct fs_path
*pt
= ctx
;
1030 ret
= fs_path_copy(pt
, p
);
1034 /* we want the first only */
1039 * Retrieve the first path of an inode. If an inode has more then one
1040 * ref/hardlink, this is ignored.
1042 static int get_inode_path(struct btrfs_root
*root
,
1043 u64 ino
, struct fs_path
*path
)
1046 struct btrfs_key key
, found_key
;
1047 struct btrfs_path
*p
;
1049 p
= alloc_path_for_send();
1053 fs_path_reset(path
);
1056 key
.type
= BTRFS_INODE_REF_KEY
;
1059 ret
= btrfs_search_slot_for_read(root
, &key
, p
, 1, 0);
1066 btrfs_item_key_to_cpu(p
->nodes
[0], &found_key
, p
->slots
[0]);
1067 if (found_key
.objectid
!= ino
||
1068 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1069 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1074 ret
= iterate_inode_ref(root
, p
, &found_key
, 1,
1075 __copy_first_ref
, path
);
1085 struct backref_ctx
{
1086 struct send_ctx
*sctx
;
1088 /* number of total found references */
1092 * used for clones found in send_root. clones found behind cur_objectid
1093 * and cur_offset are not considered as allowed clones.
1098 /* may be truncated in case it's the last extent in a file */
1101 /* Just to check for bugs in backref resolving */
1105 static int __clone_root_cmp_bsearch(const void *key
, const void *elt
)
1107 u64 root
= (u64
)(uintptr_t)key
;
1108 struct clone_root
*cr
= (struct clone_root
*)elt
;
1110 if (root
< cr
->root
->objectid
)
1112 if (root
> cr
->root
->objectid
)
1117 static int __clone_root_cmp_sort(const void *e1
, const void *e2
)
1119 struct clone_root
*cr1
= (struct clone_root
*)e1
;
1120 struct clone_root
*cr2
= (struct clone_root
*)e2
;
1122 if (cr1
->root
->objectid
< cr2
->root
->objectid
)
1124 if (cr1
->root
->objectid
> cr2
->root
->objectid
)
1130 * Called for every backref that is found for the current extent.
1131 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1133 static int __iterate_backrefs(u64 ino
, u64 offset
, u64 root
, void *ctx_
)
1135 struct backref_ctx
*bctx
= ctx_
;
1136 struct clone_root
*found
;
1140 /* First check if the root is in the list of accepted clone sources */
1141 found
= bsearch((void *)(uintptr_t)root
, bctx
->sctx
->clone_roots
,
1142 bctx
->sctx
->clone_roots_cnt
,
1143 sizeof(struct clone_root
),
1144 __clone_root_cmp_bsearch
);
1148 if (found
->root
== bctx
->sctx
->send_root
&&
1149 ino
== bctx
->cur_objectid
&&
1150 offset
== bctx
->cur_offset
) {
1151 bctx
->found_itself
= 1;
1155 * There are inodes that have extents that lie behind its i_size. Don't
1156 * accept clones from these extents.
1158 ret
= get_inode_info(found
->root
, ino
, &i_size
, NULL
, NULL
, NULL
, NULL
,
1163 if (offset
+ bctx
->extent_len
> i_size
)
1167 * Make sure we don't consider clones from send_root that are
1168 * behind the current inode/offset.
1170 if (found
->root
== bctx
->sctx
->send_root
) {
1172 * TODO for the moment we don't accept clones from the inode
1173 * that is currently send. We may change this when
1174 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1177 if (ino
>= bctx
->cur_objectid
)
1180 if (ino
> bctx
->cur_objectid
)
1182 if (offset
+ bctx
->extent_len
> bctx
->cur_offset
)
1188 found
->found_refs
++;
1189 if (ino
< found
->ino
) {
1191 found
->offset
= offset
;
1192 } else if (found
->ino
== ino
) {
1194 * same extent found more then once in the same file.
1196 if (found
->offset
> offset
+ bctx
->extent_len
)
1197 found
->offset
= offset
;
1204 * Given an inode, offset and extent item, it finds a good clone for a clone
1205 * instruction. Returns -ENOENT when none could be found. The function makes
1206 * sure that the returned clone is usable at the point where sending is at the
1207 * moment. This means, that no clones are accepted which lie behind the current
1210 * path must point to the extent item when called.
1212 static int find_extent_clone(struct send_ctx
*sctx
,
1213 struct btrfs_path
*path
,
1214 u64 ino
, u64 data_offset
,
1216 struct clone_root
**found
)
1223 u64 extent_item_pos
;
1225 struct btrfs_file_extent_item
*fi
;
1226 struct extent_buffer
*eb
= path
->nodes
[0];
1227 struct backref_ctx
*backref_ctx
= NULL
;
1228 struct clone_root
*cur_clone_root
;
1229 struct btrfs_key found_key
;
1230 struct btrfs_path
*tmp_path
;
1234 tmp_path
= alloc_path_for_send();
1238 backref_ctx
= kmalloc(sizeof(*backref_ctx
), GFP_NOFS
);
1244 if (data_offset
>= ino_size
) {
1246 * There may be extents that lie behind the file's size.
1247 * I at least had this in combination with snapshotting while
1248 * writing large files.
1254 fi
= btrfs_item_ptr(eb
, path
->slots
[0],
1255 struct btrfs_file_extent_item
);
1256 extent_type
= btrfs_file_extent_type(eb
, fi
);
1257 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
1261 compressed
= btrfs_file_extent_compression(eb
, fi
);
1263 num_bytes
= btrfs_file_extent_num_bytes(eb
, fi
);
1264 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1265 if (disk_byte
== 0) {
1269 logical
= disk_byte
+ btrfs_file_extent_offset(eb
, fi
);
1271 ret
= extent_from_logical(sctx
->send_root
->fs_info
, disk_byte
, tmp_path
,
1272 &found_key
, &flags
);
1273 btrfs_release_path(tmp_path
);
1277 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1283 * Setup the clone roots.
1285 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1286 cur_clone_root
= sctx
->clone_roots
+ i
;
1287 cur_clone_root
->ino
= (u64
)-1;
1288 cur_clone_root
->offset
= 0;
1289 cur_clone_root
->found_refs
= 0;
1292 backref_ctx
->sctx
= sctx
;
1293 backref_ctx
->found
= 0;
1294 backref_ctx
->cur_objectid
= ino
;
1295 backref_ctx
->cur_offset
= data_offset
;
1296 backref_ctx
->found_itself
= 0;
1297 backref_ctx
->extent_len
= num_bytes
;
1300 * The last extent of a file may be too large due to page alignment.
1301 * We need to adjust extent_len in this case so that the checks in
1302 * __iterate_backrefs work.
1304 if (data_offset
+ num_bytes
>= ino_size
)
1305 backref_ctx
->extent_len
= ino_size
- data_offset
;
1308 * Now collect all backrefs.
1310 if (compressed
== BTRFS_COMPRESS_NONE
)
1311 extent_item_pos
= logical
- found_key
.objectid
;
1313 extent_item_pos
= 0;
1314 ret
= iterate_extent_inodes(sctx
->send_root
->fs_info
,
1315 found_key
.objectid
, extent_item_pos
, 1,
1316 __iterate_backrefs
, backref_ctx
);
1321 if (!backref_ctx
->found_itself
) {
1322 /* found a bug in backref code? */
1324 btrfs_err(sctx
->send_root
->fs_info
, "did not find backref in "
1325 "send_root. inode=%llu, offset=%llu, "
1326 "disk_byte=%llu found extent=%llu\n",
1327 ino
, data_offset
, disk_byte
, found_key
.objectid
);
1331 verbose_printk(KERN_DEBUG
"btrfs: find_extent_clone: data_offset=%llu, "
1333 "num_bytes=%llu, logical=%llu\n",
1334 data_offset
, ino
, num_bytes
, logical
);
1336 if (!backref_ctx
->found
)
1337 verbose_printk("btrfs: no clones found\n");
1339 cur_clone_root
= NULL
;
1340 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1341 if (sctx
->clone_roots
[i
].found_refs
) {
1342 if (!cur_clone_root
)
1343 cur_clone_root
= sctx
->clone_roots
+ i
;
1344 else if (sctx
->clone_roots
[i
].root
== sctx
->send_root
)
1345 /* prefer clones from send_root over others */
1346 cur_clone_root
= sctx
->clone_roots
+ i
;
1351 if (cur_clone_root
) {
1352 if (compressed
!= BTRFS_COMPRESS_NONE
) {
1354 * Offsets given by iterate_extent_inodes() are relative
1355 * to the start of the extent, we need to add logical
1356 * offset from the file extent item.
1357 * (See why at backref.c:check_extent_in_eb())
1359 cur_clone_root
->offset
+= btrfs_file_extent_offset(eb
,
1362 *found
= cur_clone_root
;
1369 btrfs_free_path(tmp_path
);
1374 static int read_symlink(struct btrfs_root
*root
,
1376 struct fs_path
*dest
)
1379 struct btrfs_path
*path
;
1380 struct btrfs_key key
;
1381 struct btrfs_file_extent_item
*ei
;
1387 path
= alloc_path_for_send();
1392 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1394 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1399 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1400 struct btrfs_file_extent_item
);
1401 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
1402 compression
= btrfs_file_extent_compression(path
->nodes
[0], ei
);
1403 BUG_ON(type
!= BTRFS_FILE_EXTENT_INLINE
);
1404 BUG_ON(compression
);
1406 off
= btrfs_file_extent_inline_start(ei
);
1407 len
= btrfs_file_extent_inline_len(path
->nodes
[0], path
->slots
[0], ei
);
1409 ret
= fs_path_add_from_extent_buffer(dest
, path
->nodes
[0], off
, len
);
1412 btrfs_free_path(path
);
1417 * Helper function to generate a file name that is unique in the root of
1418 * send_root and parent_root. This is used to generate names for orphan inodes.
1420 static int gen_unique_name(struct send_ctx
*sctx
,
1422 struct fs_path
*dest
)
1425 struct btrfs_path
*path
;
1426 struct btrfs_dir_item
*di
;
1431 path
= alloc_path_for_send();
1436 len
= snprintf(tmp
, sizeof(tmp
), "o%llu-%llu-%llu",
1438 ASSERT(len
< sizeof(tmp
));
1440 di
= btrfs_lookup_dir_item(NULL
, sctx
->send_root
,
1441 path
, BTRFS_FIRST_FREE_OBJECTID
,
1442 tmp
, strlen(tmp
), 0);
1443 btrfs_release_path(path
);
1449 /* not unique, try again */
1454 if (!sctx
->parent_root
) {
1460 di
= btrfs_lookup_dir_item(NULL
, sctx
->parent_root
,
1461 path
, BTRFS_FIRST_FREE_OBJECTID
,
1462 tmp
, strlen(tmp
), 0);
1463 btrfs_release_path(path
);
1469 /* not unique, try again */
1477 ret
= fs_path_add(dest
, tmp
, strlen(tmp
));
1480 btrfs_free_path(path
);
1485 inode_state_no_change
,
1486 inode_state_will_create
,
1487 inode_state_did_create
,
1488 inode_state_will_delete
,
1489 inode_state_did_delete
,
1492 static int get_cur_inode_state(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1500 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &left_gen
, NULL
, NULL
,
1502 if (ret
< 0 && ret
!= -ENOENT
)
1506 if (!sctx
->parent_root
) {
1507 right_ret
= -ENOENT
;
1509 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
, &right_gen
,
1510 NULL
, NULL
, NULL
, NULL
);
1511 if (ret
< 0 && ret
!= -ENOENT
)
1516 if (!left_ret
&& !right_ret
) {
1517 if (left_gen
== gen
&& right_gen
== gen
) {
1518 ret
= inode_state_no_change
;
1519 } else if (left_gen
== gen
) {
1520 if (ino
< sctx
->send_progress
)
1521 ret
= inode_state_did_create
;
1523 ret
= inode_state_will_create
;
1524 } else if (right_gen
== gen
) {
1525 if (ino
< sctx
->send_progress
)
1526 ret
= inode_state_did_delete
;
1528 ret
= inode_state_will_delete
;
1532 } else if (!left_ret
) {
1533 if (left_gen
== gen
) {
1534 if (ino
< sctx
->send_progress
)
1535 ret
= inode_state_did_create
;
1537 ret
= inode_state_will_create
;
1541 } else if (!right_ret
) {
1542 if (right_gen
== gen
) {
1543 if (ino
< sctx
->send_progress
)
1544 ret
= inode_state_did_delete
;
1546 ret
= inode_state_will_delete
;
1558 static int is_inode_existent(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1562 ret
= get_cur_inode_state(sctx
, ino
, gen
);
1566 if (ret
== inode_state_no_change
||
1567 ret
== inode_state_did_create
||
1568 ret
== inode_state_will_delete
)
1578 * Helper function to lookup a dir item in a dir.
1580 static int lookup_dir_item_inode(struct btrfs_root
*root
,
1581 u64 dir
, const char *name
, int name_len
,
1586 struct btrfs_dir_item
*di
;
1587 struct btrfs_key key
;
1588 struct btrfs_path
*path
;
1590 path
= alloc_path_for_send();
1594 di
= btrfs_lookup_dir_item(NULL
, root
, path
,
1595 dir
, name
, name_len
, 0);
1604 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
1605 *found_inode
= key
.objectid
;
1606 *found_type
= btrfs_dir_type(path
->nodes
[0], di
);
1609 btrfs_free_path(path
);
1614 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1615 * generation of the parent dir and the name of the dir entry.
1617 static int get_first_ref(struct btrfs_root
*root
, u64 ino
,
1618 u64
*dir
, u64
*dir_gen
, struct fs_path
*name
)
1621 struct btrfs_key key
;
1622 struct btrfs_key found_key
;
1623 struct btrfs_path
*path
;
1627 path
= alloc_path_for_send();
1632 key
.type
= BTRFS_INODE_REF_KEY
;
1635 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 1, 0);
1639 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1641 if (ret
|| found_key
.objectid
!= ino
||
1642 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1643 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1648 if (key
.type
== BTRFS_INODE_REF_KEY
) {
1649 struct btrfs_inode_ref
*iref
;
1650 iref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1651 struct btrfs_inode_ref
);
1652 len
= btrfs_inode_ref_name_len(path
->nodes
[0], iref
);
1653 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1654 (unsigned long)(iref
+ 1),
1656 parent_dir
= found_key
.offset
;
1658 struct btrfs_inode_extref
*extref
;
1659 extref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1660 struct btrfs_inode_extref
);
1661 len
= btrfs_inode_extref_name_len(path
->nodes
[0], extref
);
1662 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1663 (unsigned long)&extref
->name
, len
);
1664 parent_dir
= btrfs_inode_extref_parent(path
->nodes
[0], extref
);
1668 btrfs_release_path(path
);
1670 ret
= get_inode_info(root
, parent_dir
, NULL
, dir_gen
, NULL
, NULL
,
1678 btrfs_free_path(path
);
1682 static int is_first_ref(struct btrfs_root
*root
,
1684 const char *name
, int name_len
)
1687 struct fs_path
*tmp_name
;
1691 tmp_name
= fs_path_alloc();
1695 ret
= get_first_ref(root
, ino
, &tmp_dir
, &tmp_dir_gen
, tmp_name
);
1699 if (dir
!= tmp_dir
|| name_len
!= fs_path_len(tmp_name
)) {
1704 ret
= !memcmp(tmp_name
->start
, name
, name_len
);
1707 fs_path_free(tmp_name
);
1712 * Used by process_recorded_refs to determine if a new ref would overwrite an
1713 * already existing ref. In case it detects an overwrite, it returns the
1714 * inode/gen in who_ino/who_gen.
1715 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1716 * to make sure later references to the overwritten inode are possible.
1717 * Orphanizing is however only required for the first ref of an inode.
1718 * process_recorded_refs does an additional is_first_ref check to see if
1719 * orphanizing is really required.
1721 static int will_overwrite_ref(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
1722 const char *name
, int name_len
,
1723 u64
*who_ino
, u64
*who_gen
)
1727 u64 other_inode
= 0;
1730 if (!sctx
->parent_root
)
1733 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1738 * If we have a parent root we need to verify that the parent dir was
1739 * not delted and then re-created, if it was then we have no overwrite
1740 * and we can just unlink this entry.
1742 if (sctx
->parent_root
) {
1743 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &gen
, NULL
,
1745 if (ret
< 0 && ret
!= -ENOENT
)
1755 ret
= lookup_dir_item_inode(sctx
->parent_root
, dir
, name
, name_len
,
1756 &other_inode
, &other_type
);
1757 if (ret
< 0 && ret
!= -ENOENT
)
1765 * Check if the overwritten ref was already processed. If yes, the ref
1766 * was already unlinked/moved, so we can safely assume that we will not
1767 * overwrite anything at this point in time.
1769 if (other_inode
> sctx
->send_progress
) {
1770 ret
= get_inode_info(sctx
->parent_root
, other_inode
, NULL
,
1771 who_gen
, NULL
, NULL
, NULL
, NULL
);
1776 *who_ino
= other_inode
;
1786 * Checks if the ref was overwritten by an already processed inode. This is
1787 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1788 * thus the orphan name needs be used.
1789 * process_recorded_refs also uses it to avoid unlinking of refs that were
1792 static int did_overwrite_ref(struct send_ctx
*sctx
,
1793 u64 dir
, u64 dir_gen
,
1794 u64 ino
, u64 ino_gen
,
1795 const char *name
, int name_len
)
1802 if (!sctx
->parent_root
)
1805 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1809 /* check if the ref was overwritten by another ref */
1810 ret
= lookup_dir_item_inode(sctx
->send_root
, dir
, name
, name_len
,
1811 &ow_inode
, &other_type
);
1812 if (ret
< 0 && ret
!= -ENOENT
)
1815 /* was never and will never be overwritten */
1820 ret
= get_inode_info(sctx
->send_root
, ow_inode
, NULL
, &gen
, NULL
, NULL
,
1825 if (ow_inode
== ino
&& gen
== ino_gen
) {
1830 /* we know that it is or will be overwritten. check this now */
1831 if (ow_inode
< sctx
->send_progress
)
1841 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1842 * that got overwritten. This is used by process_recorded_refs to determine
1843 * if it has to use the path as returned by get_cur_path or the orphan name.
1845 static int did_overwrite_first_ref(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1848 struct fs_path
*name
= NULL
;
1852 if (!sctx
->parent_root
)
1855 name
= fs_path_alloc();
1859 ret
= get_first_ref(sctx
->parent_root
, ino
, &dir
, &dir_gen
, name
);
1863 ret
= did_overwrite_ref(sctx
, dir
, dir_gen
, ino
, gen
,
1864 name
->start
, fs_path_len(name
));
1872 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1873 * so we need to do some special handling in case we have clashes. This function
1874 * takes care of this with the help of name_cache_entry::radix_list.
1875 * In case of error, nce is kfreed.
1877 static int name_cache_insert(struct send_ctx
*sctx
,
1878 struct name_cache_entry
*nce
)
1881 struct list_head
*nce_head
;
1883 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
1884 (unsigned long)nce
->ino
);
1886 nce_head
= kmalloc(sizeof(*nce_head
), GFP_NOFS
);
1891 INIT_LIST_HEAD(nce_head
);
1893 ret
= radix_tree_insert(&sctx
->name_cache
, nce
->ino
, nce_head
);
1900 list_add_tail(&nce
->radix_list
, nce_head
);
1901 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
1902 sctx
->name_cache_size
++;
1907 static void name_cache_delete(struct send_ctx
*sctx
,
1908 struct name_cache_entry
*nce
)
1910 struct list_head
*nce_head
;
1912 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
1913 (unsigned long)nce
->ino
);
1915 btrfs_err(sctx
->send_root
->fs_info
,
1916 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
1917 nce
->ino
, sctx
->name_cache_size
);
1920 list_del(&nce
->radix_list
);
1921 list_del(&nce
->list
);
1922 sctx
->name_cache_size
--;
1925 * We may not get to the final release of nce_head if the lookup fails
1927 if (nce_head
&& list_empty(nce_head
)) {
1928 radix_tree_delete(&sctx
->name_cache
, (unsigned long)nce
->ino
);
1933 static struct name_cache_entry
*name_cache_search(struct send_ctx
*sctx
,
1936 struct list_head
*nce_head
;
1937 struct name_cache_entry
*cur
;
1939 nce_head
= radix_tree_lookup(&sctx
->name_cache
, (unsigned long)ino
);
1943 list_for_each_entry(cur
, nce_head
, radix_list
) {
1944 if (cur
->ino
== ino
&& cur
->gen
== gen
)
1951 * Removes the entry from the list and adds it back to the end. This marks the
1952 * entry as recently used so that name_cache_clean_unused does not remove it.
1954 static void name_cache_used(struct send_ctx
*sctx
, struct name_cache_entry
*nce
)
1956 list_del(&nce
->list
);
1957 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
1961 * Remove some entries from the beginning of name_cache_list.
1963 static void name_cache_clean_unused(struct send_ctx
*sctx
)
1965 struct name_cache_entry
*nce
;
1967 if (sctx
->name_cache_size
< SEND_CTX_NAME_CACHE_CLEAN_SIZE
)
1970 while (sctx
->name_cache_size
> SEND_CTX_MAX_NAME_CACHE_SIZE
) {
1971 nce
= list_entry(sctx
->name_cache_list
.next
,
1972 struct name_cache_entry
, list
);
1973 name_cache_delete(sctx
, nce
);
1978 static void name_cache_free(struct send_ctx
*sctx
)
1980 struct name_cache_entry
*nce
;
1982 while (!list_empty(&sctx
->name_cache_list
)) {
1983 nce
= list_entry(sctx
->name_cache_list
.next
,
1984 struct name_cache_entry
, list
);
1985 name_cache_delete(sctx
, nce
);
1991 * Used by get_cur_path for each ref up to the root.
1992 * Returns 0 if it succeeded.
1993 * Returns 1 if the inode is not existent or got overwritten. In that case, the
1994 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
1995 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
1996 * Returns <0 in case of error.
1998 static int __get_cur_name_and_parent(struct send_ctx
*sctx
,
2002 struct fs_path
*dest
)
2006 struct btrfs_path
*path
= NULL
;
2007 struct name_cache_entry
*nce
= NULL
;
2010 * First check if we already did a call to this function with the same
2011 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2012 * return the cached result.
2014 nce
= name_cache_search(sctx
, ino
, gen
);
2016 if (ino
< sctx
->send_progress
&& nce
->need_later_update
) {
2017 name_cache_delete(sctx
, nce
);
2021 name_cache_used(sctx
, nce
);
2022 *parent_ino
= nce
->parent_ino
;
2023 *parent_gen
= nce
->parent_gen
;
2024 ret
= fs_path_add(dest
, nce
->name
, nce
->name_len
);
2032 path
= alloc_path_for_send();
2037 * If the inode is not existent yet, add the orphan name and return 1.
2038 * This should only happen for the parent dir that we determine in
2041 ret
= is_inode_existent(sctx
, ino
, gen
);
2046 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2054 * Depending on whether the inode was already processed or not, use
2055 * send_root or parent_root for ref lookup.
2057 if (ino
< sctx
->send_progress
)
2058 ret
= get_first_ref(sctx
->send_root
, ino
,
2059 parent_ino
, parent_gen
, dest
);
2061 ret
= get_first_ref(sctx
->parent_root
, ino
,
2062 parent_ino
, parent_gen
, dest
);
2067 * Check if the ref was overwritten by an inode's ref that was processed
2068 * earlier. If yes, treat as orphan and return 1.
2070 ret
= did_overwrite_ref(sctx
, *parent_ino
, *parent_gen
, ino
, gen
,
2071 dest
->start
, dest
->end
- dest
->start
);
2075 fs_path_reset(dest
);
2076 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2084 * Store the result of the lookup in the name cache.
2086 nce
= kmalloc(sizeof(*nce
) + fs_path_len(dest
) + 1, GFP_NOFS
);
2094 nce
->parent_ino
= *parent_ino
;
2095 nce
->parent_gen
= *parent_gen
;
2096 nce
->name_len
= fs_path_len(dest
);
2098 strcpy(nce
->name
, dest
->start
);
2100 if (ino
< sctx
->send_progress
)
2101 nce
->need_later_update
= 0;
2103 nce
->need_later_update
= 1;
2105 nce_ret
= name_cache_insert(sctx
, nce
);
2108 name_cache_clean_unused(sctx
);
2111 btrfs_free_path(path
);
2116 * Magic happens here. This function returns the first ref to an inode as it
2117 * would look like while receiving the stream at this point in time.
2118 * We walk the path up to the root. For every inode in between, we check if it
2119 * was already processed/sent. If yes, we continue with the parent as found
2120 * in send_root. If not, we continue with the parent as found in parent_root.
2121 * If we encounter an inode that was deleted at this point in time, we use the
2122 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2123 * that were not created yet and overwritten inodes/refs.
2125 * When do we have have orphan inodes:
2126 * 1. When an inode is freshly created and thus no valid refs are available yet
2127 * 2. When a directory lost all it's refs (deleted) but still has dir items
2128 * inside which were not processed yet (pending for move/delete). If anyone
2129 * tried to get the path to the dir items, it would get a path inside that
2131 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2132 * of an unprocessed inode. If in that case the first ref would be
2133 * overwritten, the overwritten inode gets "orphanized". Later when we
2134 * process this overwritten inode, it is restored at a new place by moving
2137 * sctx->send_progress tells this function at which point in time receiving
2140 static int get_cur_path(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2141 struct fs_path
*dest
)
2144 struct fs_path
*name
= NULL
;
2145 u64 parent_inode
= 0;
2149 name
= fs_path_alloc();
2156 fs_path_reset(dest
);
2158 while (!stop
&& ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
2159 fs_path_reset(name
);
2161 if (is_waiting_for_rm(sctx
, ino
)) {
2162 ret
= gen_unique_name(sctx
, ino
, gen
, name
);
2165 ret
= fs_path_add_path(dest
, name
);
2169 if (is_waiting_for_move(sctx
, ino
)) {
2170 ret
= get_first_ref(sctx
->parent_root
, ino
,
2171 &parent_inode
, &parent_gen
, name
);
2173 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
,
2183 ret
= fs_path_add_path(dest
, name
);
2194 fs_path_unreverse(dest
);
2199 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2201 static int send_subvol_begin(struct send_ctx
*sctx
)
2204 struct btrfs_root
*send_root
= sctx
->send_root
;
2205 struct btrfs_root
*parent_root
= sctx
->parent_root
;
2206 struct btrfs_path
*path
;
2207 struct btrfs_key key
;
2208 struct btrfs_root_ref
*ref
;
2209 struct extent_buffer
*leaf
;
2213 path
= btrfs_alloc_path();
2217 name
= kmalloc(BTRFS_PATH_NAME_MAX
, GFP_NOFS
);
2219 btrfs_free_path(path
);
2223 key
.objectid
= send_root
->objectid
;
2224 key
.type
= BTRFS_ROOT_BACKREF_KEY
;
2227 ret
= btrfs_search_slot_for_read(send_root
->fs_info
->tree_root
,
2236 leaf
= path
->nodes
[0];
2237 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2238 if (key
.type
!= BTRFS_ROOT_BACKREF_KEY
||
2239 key
.objectid
!= send_root
->objectid
) {
2243 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
2244 namelen
= btrfs_root_ref_name_len(leaf
, ref
);
2245 read_extent_buffer(leaf
, name
, (unsigned long)(ref
+ 1), namelen
);
2246 btrfs_release_path(path
);
2249 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SNAPSHOT
);
2253 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SUBVOL
);
2258 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_PATH
, name
, namelen
);
2259 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2260 sctx
->send_root
->root_item
.uuid
);
2261 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CTRANSID
,
2262 le64_to_cpu(sctx
->send_root
->root_item
.ctransid
));
2264 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2265 sctx
->parent_root
->root_item
.uuid
);
2266 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
2267 le64_to_cpu(sctx
->parent_root
->root_item
.ctransid
));
2270 ret
= send_cmd(sctx
);
2274 btrfs_free_path(path
);
2279 static int send_truncate(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 size
)
2284 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino
, size
);
2286 p
= fs_path_alloc();
2290 ret
= begin_cmd(sctx
, BTRFS_SEND_C_TRUNCATE
);
2294 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2297 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2298 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, size
);
2300 ret
= send_cmd(sctx
);
2308 static int send_chmod(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 mode
)
2313 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino
, mode
);
2315 p
= fs_path_alloc();
2319 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHMOD
);
2323 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2326 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2327 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
& 07777);
2329 ret
= send_cmd(sctx
);
2337 static int send_chown(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 uid
, u64 gid
)
2342 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino
, uid
, gid
);
2344 p
= fs_path_alloc();
2348 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHOWN
);
2352 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2355 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2356 TLV_PUT_U64(sctx
, BTRFS_SEND_A_UID
, uid
);
2357 TLV_PUT_U64(sctx
, BTRFS_SEND_A_GID
, gid
);
2359 ret
= send_cmd(sctx
);
2367 static int send_utimes(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
2370 struct fs_path
*p
= NULL
;
2371 struct btrfs_inode_item
*ii
;
2372 struct btrfs_path
*path
= NULL
;
2373 struct extent_buffer
*eb
;
2374 struct btrfs_key key
;
2377 verbose_printk("btrfs: send_utimes %llu\n", ino
);
2379 p
= fs_path_alloc();
2383 path
= alloc_path_for_send();
2390 key
.type
= BTRFS_INODE_ITEM_KEY
;
2392 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2396 eb
= path
->nodes
[0];
2397 slot
= path
->slots
[0];
2398 ii
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
2400 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UTIMES
);
2404 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2407 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2408 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_ATIME
, eb
,
2409 btrfs_inode_atime(ii
));
2410 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_MTIME
, eb
,
2411 btrfs_inode_mtime(ii
));
2412 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_CTIME
, eb
,
2413 btrfs_inode_ctime(ii
));
2414 /* TODO Add otime support when the otime patches get into upstream */
2416 ret
= send_cmd(sctx
);
2421 btrfs_free_path(path
);
2426 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2427 * a valid path yet because we did not process the refs yet. So, the inode
2428 * is created as orphan.
2430 static int send_create_inode(struct send_ctx
*sctx
, u64 ino
)
2439 verbose_printk("btrfs: send_create_inode %llu\n", ino
);
2441 p
= fs_path_alloc();
2445 if (ino
!= sctx
->cur_ino
) {
2446 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &gen
, &mode
,
2451 gen
= sctx
->cur_inode_gen
;
2452 mode
= sctx
->cur_inode_mode
;
2453 rdev
= sctx
->cur_inode_rdev
;
2456 if (S_ISREG(mode
)) {
2457 cmd
= BTRFS_SEND_C_MKFILE
;
2458 } else if (S_ISDIR(mode
)) {
2459 cmd
= BTRFS_SEND_C_MKDIR
;
2460 } else if (S_ISLNK(mode
)) {
2461 cmd
= BTRFS_SEND_C_SYMLINK
;
2462 } else if (S_ISCHR(mode
) || S_ISBLK(mode
)) {
2463 cmd
= BTRFS_SEND_C_MKNOD
;
2464 } else if (S_ISFIFO(mode
)) {
2465 cmd
= BTRFS_SEND_C_MKFIFO
;
2466 } else if (S_ISSOCK(mode
)) {
2467 cmd
= BTRFS_SEND_C_MKSOCK
;
2469 printk(KERN_WARNING
"btrfs: unexpected inode type %o",
2470 (int)(mode
& S_IFMT
));
2475 ret
= begin_cmd(sctx
, cmd
);
2479 ret
= gen_unique_name(sctx
, ino
, gen
, p
);
2483 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2484 TLV_PUT_U64(sctx
, BTRFS_SEND_A_INO
, ino
);
2486 if (S_ISLNK(mode
)) {
2488 ret
= read_symlink(sctx
->send_root
, ino
, p
);
2491 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, p
);
2492 } else if (S_ISCHR(mode
) || S_ISBLK(mode
) ||
2493 S_ISFIFO(mode
) || S_ISSOCK(mode
)) {
2494 TLV_PUT_U64(sctx
, BTRFS_SEND_A_RDEV
, new_encode_dev(rdev
));
2495 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
);
2498 ret
= send_cmd(sctx
);
2510 * We need some special handling for inodes that get processed before the parent
2511 * directory got created. See process_recorded_refs for details.
2512 * This function does the check if we already created the dir out of order.
2514 static int did_create_dir(struct send_ctx
*sctx
, u64 dir
)
2517 struct btrfs_path
*path
= NULL
;
2518 struct btrfs_key key
;
2519 struct btrfs_key found_key
;
2520 struct btrfs_key di_key
;
2521 struct extent_buffer
*eb
;
2522 struct btrfs_dir_item
*di
;
2525 path
= alloc_path_for_send();
2532 key
.type
= BTRFS_DIR_INDEX_KEY
;
2534 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2539 eb
= path
->nodes
[0];
2540 slot
= path
->slots
[0];
2541 if (slot
>= btrfs_header_nritems(eb
)) {
2542 ret
= btrfs_next_leaf(sctx
->send_root
, path
);
2545 } else if (ret
> 0) {
2552 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
2553 if (found_key
.objectid
!= key
.objectid
||
2554 found_key
.type
!= key
.type
) {
2559 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
2560 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
2562 if (di_key
.type
!= BTRFS_ROOT_ITEM_KEY
&&
2563 di_key
.objectid
< sctx
->send_progress
) {
2572 btrfs_free_path(path
);
2577 * Only creates the inode if it is:
2578 * 1. Not a directory
2579 * 2. Or a directory which was not created already due to out of order
2580 * directories. See did_create_dir and process_recorded_refs for details.
2582 static int send_create_inode_if_needed(struct send_ctx
*sctx
)
2586 if (S_ISDIR(sctx
->cur_inode_mode
)) {
2587 ret
= did_create_dir(sctx
, sctx
->cur_ino
);
2596 ret
= send_create_inode(sctx
, sctx
->cur_ino
);
2604 struct recorded_ref
{
2605 struct list_head list
;
2608 struct fs_path
*full_path
;
2616 * We need to process new refs before deleted refs, but compare_tree gives us
2617 * everything mixed. So we first record all refs and later process them.
2618 * This function is a helper to record one ref.
2620 static int __record_ref(struct list_head
*head
, u64 dir
,
2621 u64 dir_gen
, struct fs_path
*path
)
2623 struct recorded_ref
*ref
;
2625 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
2630 ref
->dir_gen
= dir_gen
;
2631 ref
->full_path
= path
;
2633 ref
->name
= (char *)kbasename(ref
->full_path
->start
);
2634 ref
->name_len
= ref
->full_path
->end
- ref
->name
;
2635 ref
->dir_path
= ref
->full_path
->start
;
2636 if (ref
->name
== ref
->full_path
->start
)
2637 ref
->dir_path_len
= 0;
2639 ref
->dir_path_len
= ref
->full_path
->end
-
2640 ref
->full_path
->start
- 1 - ref
->name_len
;
2642 list_add_tail(&ref
->list
, head
);
2646 static int dup_ref(struct recorded_ref
*ref
, struct list_head
*list
)
2648 struct recorded_ref
*new;
2650 new = kmalloc(sizeof(*ref
), GFP_NOFS
);
2654 new->dir
= ref
->dir
;
2655 new->dir_gen
= ref
->dir_gen
;
2656 new->full_path
= NULL
;
2657 INIT_LIST_HEAD(&new->list
);
2658 list_add_tail(&new->list
, list
);
2662 static void __free_recorded_refs(struct list_head
*head
)
2664 struct recorded_ref
*cur
;
2666 while (!list_empty(head
)) {
2667 cur
= list_entry(head
->next
, struct recorded_ref
, list
);
2668 fs_path_free(cur
->full_path
);
2669 list_del(&cur
->list
);
2674 static void free_recorded_refs(struct send_ctx
*sctx
)
2676 __free_recorded_refs(&sctx
->new_refs
);
2677 __free_recorded_refs(&sctx
->deleted_refs
);
2681 * Renames/moves a file/dir to its orphan name. Used when the first
2682 * ref of an unprocessed inode gets overwritten and for all non empty
2685 static int orphanize_inode(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2686 struct fs_path
*path
)
2689 struct fs_path
*orphan
;
2691 orphan
= fs_path_alloc();
2695 ret
= gen_unique_name(sctx
, ino
, gen
, orphan
);
2699 ret
= send_rename(sctx
, path
, orphan
);
2702 fs_path_free(orphan
);
2706 static struct orphan_dir_info
*
2707 add_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2709 struct rb_node
**p
= &sctx
->orphan_dirs
.rb_node
;
2710 struct rb_node
*parent
= NULL
;
2711 struct orphan_dir_info
*entry
, *odi
;
2713 odi
= kmalloc(sizeof(*odi
), GFP_NOFS
);
2715 return ERR_PTR(-ENOMEM
);
2721 entry
= rb_entry(parent
, struct orphan_dir_info
, node
);
2722 if (dir_ino
< entry
->ino
) {
2724 } else if (dir_ino
> entry
->ino
) {
2725 p
= &(*p
)->rb_right
;
2732 rb_link_node(&odi
->node
, parent
, p
);
2733 rb_insert_color(&odi
->node
, &sctx
->orphan_dirs
);
2737 static struct orphan_dir_info
*
2738 get_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2740 struct rb_node
*n
= sctx
->orphan_dirs
.rb_node
;
2741 struct orphan_dir_info
*entry
;
2744 entry
= rb_entry(n
, struct orphan_dir_info
, node
);
2745 if (dir_ino
< entry
->ino
)
2747 else if (dir_ino
> entry
->ino
)
2755 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
)
2757 struct orphan_dir_info
*odi
= get_orphan_dir_info(sctx
, dir_ino
);
2762 static void free_orphan_dir_info(struct send_ctx
*sctx
,
2763 struct orphan_dir_info
*odi
)
2767 rb_erase(&odi
->node
, &sctx
->orphan_dirs
);
2772 * Returns 1 if a directory can be removed at this point in time.
2773 * We check this by iterating all dir items and checking if the inode behind
2774 * the dir item was already processed.
2776 static int can_rmdir(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
2780 struct btrfs_root
*root
= sctx
->parent_root
;
2781 struct btrfs_path
*path
;
2782 struct btrfs_key key
;
2783 struct btrfs_key found_key
;
2784 struct btrfs_key loc
;
2785 struct btrfs_dir_item
*di
;
2788 * Don't try to rmdir the top/root subvolume dir.
2790 if (dir
== BTRFS_FIRST_FREE_OBJECTID
)
2793 path
= alloc_path_for_send();
2798 key
.type
= BTRFS_DIR_INDEX_KEY
;
2800 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2805 struct waiting_dir_move
*dm
;
2807 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
2808 ret
= btrfs_next_leaf(root
, path
);
2815 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2817 if (found_key
.objectid
!= key
.objectid
||
2818 found_key
.type
!= key
.type
)
2821 di
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2822 struct btrfs_dir_item
);
2823 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &loc
);
2825 dm
= get_waiting_dir_move(sctx
, loc
.objectid
);
2827 struct orphan_dir_info
*odi
;
2829 odi
= add_orphan_dir_info(sctx
, dir
);
2835 dm
->rmdir_ino
= dir
;
2840 if (loc
.objectid
> send_progress
) {
2851 btrfs_free_path(path
);
2855 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
)
2857 struct waiting_dir_move
*entry
= get_waiting_dir_move(sctx
, ino
);
2859 return entry
!= NULL
;
2862 static int add_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
)
2864 struct rb_node
**p
= &sctx
->waiting_dir_moves
.rb_node
;
2865 struct rb_node
*parent
= NULL
;
2866 struct waiting_dir_move
*entry
, *dm
;
2868 dm
= kmalloc(sizeof(*dm
), GFP_NOFS
);
2876 entry
= rb_entry(parent
, struct waiting_dir_move
, node
);
2877 if (ino
< entry
->ino
) {
2879 } else if (ino
> entry
->ino
) {
2880 p
= &(*p
)->rb_right
;
2887 rb_link_node(&dm
->node
, parent
, p
);
2888 rb_insert_color(&dm
->node
, &sctx
->waiting_dir_moves
);
2892 static struct waiting_dir_move
*
2893 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
)
2895 struct rb_node
*n
= sctx
->waiting_dir_moves
.rb_node
;
2896 struct waiting_dir_move
*entry
;
2899 entry
= rb_entry(n
, struct waiting_dir_move
, node
);
2900 if (ino
< entry
->ino
)
2902 else if (ino
> entry
->ino
)
2910 static void free_waiting_dir_move(struct send_ctx
*sctx
,
2911 struct waiting_dir_move
*dm
)
2915 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
2919 static int add_pending_dir_move(struct send_ctx
*sctx
, u64 parent_ino
)
2921 struct rb_node
**p
= &sctx
->pending_dir_moves
.rb_node
;
2922 struct rb_node
*parent
= NULL
;
2923 struct pending_dir_move
*entry
, *pm
;
2924 struct recorded_ref
*cur
;
2928 pm
= kmalloc(sizeof(*pm
), GFP_NOFS
);
2931 pm
->parent_ino
= parent_ino
;
2932 pm
->ino
= sctx
->cur_ino
;
2933 pm
->gen
= sctx
->cur_inode_gen
;
2934 INIT_LIST_HEAD(&pm
->list
);
2935 INIT_LIST_HEAD(&pm
->update_refs
);
2936 RB_CLEAR_NODE(&pm
->node
);
2940 entry
= rb_entry(parent
, struct pending_dir_move
, node
);
2941 if (parent_ino
< entry
->parent_ino
) {
2943 } else if (parent_ino
> entry
->parent_ino
) {
2944 p
= &(*p
)->rb_right
;
2951 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
2952 ret
= dup_ref(cur
, &pm
->update_refs
);
2956 list_for_each_entry(cur
, &sctx
->new_refs
, list
) {
2957 ret
= dup_ref(cur
, &pm
->update_refs
);
2962 ret
= add_waiting_dir_move(sctx
, pm
->ino
);
2967 list_add_tail(&pm
->list
, &entry
->list
);
2969 rb_link_node(&pm
->node
, parent
, p
);
2970 rb_insert_color(&pm
->node
, &sctx
->pending_dir_moves
);
2975 __free_recorded_refs(&pm
->update_refs
);
2981 static struct pending_dir_move
*get_pending_dir_moves(struct send_ctx
*sctx
,
2984 struct rb_node
*n
= sctx
->pending_dir_moves
.rb_node
;
2985 struct pending_dir_move
*entry
;
2988 entry
= rb_entry(n
, struct pending_dir_move
, node
);
2989 if (parent_ino
< entry
->parent_ino
)
2991 else if (parent_ino
> entry
->parent_ino
)
2999 static int apply_dir_move(struct send_ctx
*sctx
, struct pending_dir_move
*pm
)
3001 struct fs_path
*from_path
= NULL
;
3002 struct fs_path
*to_path
= NULL
;
3003 struct fs_path
*name
= NULL
;
3004 u64 orig_progress
= sctx
->send_progress
;
3005 struct recorded_ref
*cur
;
3006 u64 parent_ino
, parent_gen
;
3007 struct waiting_dir_move
*dm
= NULL
;
3011 name
= fs_path_alloc();
3012 from_path
= fs_path_alloc();
3013 if (!name
|| !from_path
) {
3018 dm
= get_waiting_dir_move(sctx
, pm
->ino
);
3020 rmdir_ino
= dm
->rmdir_ino
;
3021 free_waiting_dir_move(sctx
, dm
);
3023 ret
= get_first_ref(sctx
->parent_root
, pm
->ino
,
3024 &parent_ino
, &parent_gen
, name
);
3028 if (parent_ino
== sctx
->cur_ino
) {
3029 /* child only renamed, not moved */
3030 ASSERT(parent_gen
== sctx
->cur_inode_gen
);
3031 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3035 ret
= fs_path_add_path(from_path
, name
);
3039 /* child moved and maybe renamed too */
3040 sctx
->send_progress
= pm
->ino
;
3041 ret
= get_cur_path(sctx
, pm
->ino
, pm
->gen
, from_path
);
3049 to_path
= fs_path_alloc();
3055 sctx
->send_progress
= sctx
->cur_ino
+ 1;
3056 ret
= get_cur_path(sctx
, pm
->ino
, pm
->gen
, to_path
);
3060 ret
= send_rename(sctx
, from_path
, to_path
);
3065 struct orphan_dir_info
*odi
;
3067 odi
= get_orphan_dir_info(sctx
, rmdir_ino
);
3069 /* already deleted */
3072 ret
= can_rmdir(sctx
, rmdir_ino
, odi
->gen
, sctx
->cur_ino
+ 1);
3078 name
= fs_path_alloc();
3083 ret
= get_cur_path(sctx
, rmdir_ino
, odi
->gen
, name
);
3086 ret
= send_rmdir(sctx
, name
);
3089 free_orphan_dir_info(sctx
, odi
);
3093 ret
= send_utimes(sctx
, pm
->ino
, pm
->gen
);
3098 * After rename/move, need to update the utimes of both new parent(s)
3099 * and old parent(s).
3101 list_for_each_entry(cur
, &pm
->update_refs
, list
) {
3102 if (cur
->dir
== rmdir_ino
)
3104 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
3111 fs_path_free(from_path
);
3112 fs_path_free(to_path
);
3113 sctx
->send_progress
= orig_progress
;
3118 static void free_pending_move(struct send_ctx
*sctx
, struct pending_dir_move
*m
)
3120 if (!list_empty(&m
->list
))
3122 if (!RB_EMPTY_NODE(&m
->node
))
3123 rb_erase(&m
->node
, &sctx
->pending_dir_moves
);
3124 __free_recorded_refs(&m
->update_refs
);
3128 static void tail_append_pending_moves(struct pending_dir_move
*moves
,
3129 struct list_head
*stack
)
3131 if (list_empty(&moves
->list
)) {
3132 list_add_tail(&moves
->list
, stack
);
3135 list_splice_init(&moves
->list
, &list
);
3136 list_add_tail(&moves
->list
, stack
);
3137 list_splice_tail(&list
, stack
);
3141 static int apply_children_dir_moves(struct send_ctx
*sctx
)
3143 struct pending_dir_move
*pm
;
3144 struct list_head stack
;
3145 u64 parent_ino
= sctx
->cur_ino
;
3148 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3152 INIT_LIST_HEAD(&stack
);
3153 tail_append_pending_moves(pm
, &stack
);
3155 while (!list_empty(&stack
)) {
3156 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3157 parent_ino
= pm
->ino
;
3158 ret
= apply_dir_move(sctx
, pm
);
3159 free_pending_move(sctx
, pm
);
3162 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3164 tail_append_pending_moves(pm
, &stack
);
3169 while (!list_empty(&stack
)) {
3170 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3171 free_pending_move(sctx
, pm
);
3176 static int wait_for_parent_move(struct send_ctx
*sctx
,
3177 struct recorded_ref
*parent_ref
)
3180 u64 ino
= parent_ref
->dir
;
3181 u64 parent_ino_before
, parent_ino_after
;
3182 u64 new_gen
, old_gen
;
3183 struct fs_path
*path_before
= NULL
;
3184 struct fs_path
*path_after
= NULL
;
3187 if (parent_ref
->dir
<= sctx
->cur_ino
)
3190 if (is_waiting_for_move(sctx
, ino
))
3193 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
, &old_gen
,
3194 NULL
, NULL
, NULL
, NULL
);
3200 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &new_gen
,
3201 NULL
, NULL
, NULL
, NULL
);
3205 if (new_gen
!= old_gen
)
3208 path_before
= fs_path_alloc();
3212 ret
= get_first_ref(sctx
->parent_root
, ino
, &parent_ino_before
,
3214 if (ret
== -ENOENT
) {
3217 } else if (ret
< 0) {
3221 path_after
= fs_path_alloc();
3227 ret
= get_first_ref(sctx
->send_root
, ino
, &parent_ino_after
,
3229 if (ret
== -ENOENT
) {
3232 } else if (ret
< 0) {
3236 len1
= fs_path_len(path_before
);
3237 len2
= fs_path_len(path_after
);
3238 if (parent_ino_before
!= parent_ino_after
|| len1
!= len2
||
3239 memcmp(path_before
->start
, path_after
->start
, len1
)) {
3246 fs_path_free(path_before
);
3247 fs_path_free(path_after
);
3253 * This does all the move/link/unlink/rmdir magic.
3255 static int process_recorded_refs(struct send_ctx
*sctx
, int *pending_move
)
3258 struct recorded_ref
*cur
;
3259 struct recorded_ref
*cur2
;
3260 struct list_head check_dirs
;
3261 struct fs_path
*valid_path
= NULL
;
3264 int did_overwrite
= 0;
3266 u64 last_dir_ino_rm
= 0;
3268 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx
->cur_ino
);
3271 * This should never happen as the root dir always has the same ref
3272 * which is always '..'
3274 BUG_ON(sctx
->cur_ino
<= BTRFS_FIRST_FREE_OBJECTID
);
3275 INIT_LIST_HEAD(&check_dirs
);
3277 valid_path
= fs_path_alloc();
3284 * First, check if the first ref of the current inode was overwritten
3285 * before. If yes, we know that the current inode was already orphanized
3286 * and thus use the orphan name. If not, we can use get_cur_path to
3287 * get the path of the first ref as it would like while receiving at
3288 * this point in time.
3289 * New inodes are always orphan at the beginning, so force to use the
3290 * orphan name in this case.
3291 * The first ref is stored in valid_path and will be updated if it
3292 * gets moved around.
3294 if (!sctx
->cur_inode_new
) {
3295 ret
= did_overwrite_first_ref(sctx
, sctx
->cur_ino
,
3296 sctx
->cur_inode_gen
);
3302 if (sctx
->cur_inode_new
|| did_overwrite
) {
3303 ret
= gen_unique_name(sctx
, sctx
->cur_ino
,
3304 sctx
->cur_inode_gen
, valid_path
);
3309 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3315 list_for_each_entry(cur
, &sctx
->new_refs
, list
) {
3317 * We may have refs where the parent directory does not exist
3318 * yet. This happens if the parent directories inum is higher
3319 * the the current inum. To handle this case, we create the
3320 * parent directory out of order. But we need to check if this
3321 * did already happen before due to other refs in the same dir.
3323 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3326 if (ret
== inode_state_will_create
) {
3329 * First check if any of the current inodes refs did
3330 * already create the dir.
3332 list_for_each_entry(cur2
, &sctx
->new_refs
, list
) {
3335 if (cur2
->dir
== cur
->dir
) {
3342 * If that did not happen, check if a previous inode
3343 * did already create the dir.
3346 ret
= did_create_dir(sctx
, cur
->dir
);
3350 ret
= send_create_inode(sctx
, cur
->dir
);
3357 * Check if this new ref would overwrite the first ref of
3358 * another unprocessed inode. If yes, orphanize the
3359 * overwritten inode. If we find an overwritten ref that is
3360 * not the first ref, simply unlink it.
3362 ret
= will_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3363 cur
->name
, cur
->name_len
,
3364 &ow_inode
, &ow_gen
);
3368 ret
= is_first_ref(sctx
->parent_root
,
3369 ow_inode
, cur
->dir
, cur
->name
,
3374 ret
= orphanize_inode(sctx
, ow_inode
, ow_gen
,
3379 ret
= send_unlink(sctx
, cur
->full_path
);
3386 * link/move the ref to the new place. If we have an orphan
3387 * inode, move it and update valid_path. If not, link or move
3388 * it depending on the inode mode.
3391 ret
= send_rename(sctx
, valid_path
, cur
->full_path
);
3395 ret
= fs_path_copy(valid_path
, cur
->full_path
);
3399 if (S_ISDIR(sctx
->cur_inode_mode
)) {
3401 * Dirs can't be linked, so move it. For moved
3402 * dirs, we always have one new and one deleted
3403 * ref. The deleted ref is ignored later.
3405 ret
= wait_for_parent_move(sctx
, cur
);
3409 ret
= add_pending_dir_move(sctx
,
3413 ret
= send_rename(sctx
, valid_path
,
3416 ret
= fs_path_copy(valid_path
,
3422 ret
= send_link(sctx
, cur
->full_path
,
3428 ret
= dup_ref(cur
, &check_dirs
);
3433 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->cur_inode_deleted
) {
3435 * Check if we can already rmdir the directory. If not,
3436 * orphanize it. For every dir item inside that gets deleted
3437 * later, we do this check again and rmdir it then if possible.
3438 * See the use of check_dirs for more details.
3440 ret
= can_rmdir(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3445 ret
= send_rmdir(sctx
, valid_path
);
3448 } else if (!is_orphan
) {
3449 ret
= orphanize_inode(sctx
, sctx
->cur_ino
,
3450 sctx
->cur_inode_gen
, valid_path
);
3456 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
3457 ret
= dup_ref(cur
, &check_dirs
);
3461 } else if (S_ISDIR(sctx
->cur_inode_mode
) &&
3462 !list_empty(&sctx
->deleted_refs
)) {
3464 * We have a moved dir. Add the old parent to check_dirs
3466 cur
= list_entry(sctx
->deleted_refs
.next
, struct recorded_ref
,
3468 ret
= dup_ref(cur
, &check_dirs
);
3471 } else if (!S_ISDIR(sctx
->cur_inode_mode
)) {
3473 * We have a non dir inode. Go through all deleted refs and
3474 * unlink them if they were not already overwritten by other
3477 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
3478 ret
= did_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3479 sctx
->cur_ino
, sctx
->cur_inode_gen
,
3480 cur
->name
, cur
->name_len
);
3484 ret
= send_unlink(sctx
, cur
->full_path
);
3488 ret
= dup_ref(cur
, &check_dirs
);
3493 * If the inode is still orphan, unlink the orphan. This may
3494 * happen when a previous inode did overwrite the first ref
3495 * of this inode and no new refs were added for the current
3496 * inode. Unlinking does not mean that the inode is deleted in
3497 * all cases. There may still be links to this inode in other
3501 ret
= send_unlink(sctx
, valid_path
);
3508 * We did collect all parent dirs where cur_inode was once located. We
3509 * now go through all these dirs and check if they are pending for
3510 * deletion and if it's finally possible to perform the rmdir now.
3511 * We also update the inode stats of the parent dirs here.
3513 list_for_each_entry(cur
, &check_dirs
, list
) {
3515 * In case we had refs into dirs that were not processed yet,
3516 * we don't need to do the utime and rmdir logic for these dirs.
3517 * The dir will be processed later.
3519 if (cur
->dir
> sctx
->cur_ino
)
3522 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3526 if (ret
== inode_state_did_create
||
3527 ret
== inode_state_no_change
) {
3528 /* TODO delayed utimes */
3529 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
3532 } else if (ret
== inode_state_did_delete
&&
3533 cur
->dir
!= last_dir_ino_rm
) {
3534 ret
= can_rmdir(sctx
, cur
->dir
, cur
->dir_gen
,
3539 ret
= get_cur_path(sctx
, cur
->dir
,
3540 cur
->dir_gen
, valid_path
);
3543 ret
= send_rmdir(sctx
, valid_path
);
3546 last_dir_ino_rm
= cur
->dir
;
3554 __free_recorded_refs(&check_dirs
);
3555 free_recorded_refs(sctx
);
3556 fs_path_free(valid_path
);
3560 static int record_ref(struct btrfs_root
*root
, int num
, u64 dir
, int index
,
3561 struct fs_path
*name
, void *ctx
, struct list_head
*refs
)
3564 struct send_ctx
*sctx
= ctx
;
3568 p
= fs_path_alloc();
3572 ret
= get_inode_info(root
, dir
, NULL
, &gen
, NULL
, NULL
,
3577 ret
= get_cur_path(sctx
, dir
, gen
, p
);
3580 ret
= fs_path_add_path(p
, name
);
3584 ret
= __record_ref(refs
, dir
, gen
, p
);
3592 static int __record_new_ref(int num
, u64 dir
, int index
,
3593 struct fs_path
*name
,
3596 struct send_ctx
*sctx
= ctx
;
3597 return record_ref(sctx
->send_root
, num
, dir
, index
, name
,
3598 ctx
, &sctx
->new_refs
);
3602 static int __record_deleted_ref(int num
, u64 dir
, int index
,
3603 struct fs_path
*name
,
3606 struct send_ctx
*sctx
= ctx
;
3607 return record_ref(sctx
->parent_root
, num
, dir
, index
, name
,
3608 ctx
, &sctx
->deleted_refs
);
3611 static int record_new_ref(struct send_ctx
*sctx
)
3615 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
3616 sctx
->cmp_key
, 0, __record_new_ref
, sctx
);
3625 static int record_deleted_ref(struct send_ctx
*sctx
)
3629 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
3630 sctx
->cmp_key
, 0, __record_deleted_ref
, sctx
);
3639 struct find_ref_ctx
{
3642 struct btrfs_root
*root
;
3643 struct fs_path
*name
;
3647 static int __find_iref(int num
, u64 dir
, int index
,
3648 struct fs_path
*name
,
3651 struct find_ref_ctx
*ctx
= ctx_
;
3655 if (dir
== ctx
->dir
&& fs_path_len(name
) == fs_path_len(ctx
->name
) &&
3656 strncmp(name
->start
, ctx
->name
->start
, fs_path_len(name
)) == 0) {
3658 * To avoid doing extra lookups we'll only do this if everything
3661 ret
= get_inode_info(ctx
->root
, dir
, NULL
, &dir_gen
, NULL
,
3665 if (dir_gen
!= ctx
->dir_gen
)
3667 ctx
->found_idx
= num
;
3673 static int find_iref(struct btrfs_root
*root
,
3674 struct btrfs_path
*path
,
3675 struct btrfs_key
*key
,
3676 u64 dir
, u64 dir_gen
, struct fs_path
*name
)
3679 struct find_ref_ctx ctx
;
3683 ctx
.dir_gen
= dir_gen
;
3687 ret
= iterate_inode_ref(root
, path
, key
, 0, __find_iref
, &ctx
);
3691 if (ctx
.found_idx
== -1)
3694 return ctx
.found_idx
;
3697 static int __record_changed_new_ref(int num
, u64 dir
, int index
,
3698 struct fs_path
*name
,
3703 struct send_ctx
*sctx
= ctx
;
3705 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &dir_gen
, NULL
,
3710 ret
= find_iref(sctx
->parent_root
, sctx
->right_path
,
3711 sctx
->cmp_key
, dir
, dir_gen
, name
);
3713 ret
= __record_new_ref(num
, dir
, index
, name
, sctx
);
3720 static int __record_changed_deleted_ref(int num
, u64 dir
, int index
,
3721 struct fs_path
*name
,
3726 struct send_ctx
*sctx
= ctx
;
3728 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &dir_gen
, NULL
,
3733 ret
= find_iref(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
3734 dir
, dir_gen
, name
);
3736 ret
= __record_deleted_ref(num
, dir
, index
, name
, sctx
);
3743 static int record_changed_ref(struct send_ctx
*sctx
)
3747 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
3748 sctx
->cmp_key
, 0, __record_changed_new_ref
, sctx
);
3751 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
3752 sctx
->cmp_key
, 0, __record_changed_deleted_ref
, sctx
);
3762 * Record and process all refs at once. Needed when an inode changes the
3763 * generation number, which means that it was deleted and recreated.
3765 static int process_all_refs(struct send_ctx
*sctx
,
3766 enum btrfs_compare_tree_result cmd
)
3769 struct btrfs_root
*root
;
3770 struct btrfs_path
*path
;
3771 struct btrfs_key key
;
3772 struct btrfs_key found_key
;
3773 struct extent_buffer
*eb
;
3775 iterate_inode_ref_t cb
;
3776 int pending_move
= 0;
3778 path
= alloc_path_for_send();
3782 if (cmd
== BTRFS_COMPARE_TREE_NEW
) {
3783 root
= sctx
->send_root
;
3784 cb
= __record_new_ref
;
3785 } else if (cmd
== BTRFS_COMPARE_TREE_DELETED
) {
3786 root
= sctx
->parent_root
;
3787 cb
= __record_deleted_ref
;
3789 btrfs_err(sctx
->send_root
->fs_info
,
3790 "Wrong command %d in process_all_refs", cmd
);
3795 key
.objectid
= sctx
->cmp_key
->objectid
;
3796 key
.type
= BTRFS_INODE_REF_KEY
;
3798 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3803 eb
= path
->nodes
[0];
3804 slot
= path
->slots
[0];
3805 if (slot
>= btrfs_header_nritems(eb
)) {
3806 ret
= btrfs_next_leaf(root
, path
);
3814 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
3816 if (found_key
.objectid
!= key
.objectid
||
3817 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
3818 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
))
3821 ret
= iterate_inode_ref(root
, path
, &found_key
, 0, cb
, sctx
);
3827 btrfs_release_path(path
);
3829 ret
= process_recorded_refs(sctx
, &pending_move
);
3830 /* Only applicable to an incremental send. */
3831 ASSERT(pending_move
== 0);
3834 btrfs_free_path(path
);
3838 static int send_set_xattr(struct send_ctx
*sctx
,
3839 struct fs_path
*path
,
3840 const char *name
, int name_len
,
3841 const char *data
, int data_len
)
3845 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SET_XATTR
);
3849 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
3850 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
3851 TLV_PUT(sctx
, BTRFS_SEND_A_XATTR_DATA
, data
, data_len
);
3853 ret
= send_cmd(sctx
);
3860 static int send_remove_xattr(struct send_ctx
*sctx
,
3861 struct fs_path
*path
,
3862 const char *name
, int name_len
)
3866 ret
= begin_cmd(sctx
, BTRFS_SEND_C_REMOVE_XATTR
);
3870 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
3871 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
3873 ret
= send_cmd(sctx
);
3880 static int __process_new_xattr(int num
, struct btrfs_key
*di_key
,
3881 const char *name
, int name_len
,
3882 const char *data
, int data_len
,
3886 struct send_ctx
*sctx
= ctx
;
3888 posix_acl_xattr_header dummy_acl
;
3890 p
= fs_path_alloc();
3895 * This hack is needed because empty acl's are stored as zero byte
3896 * data in xattrs. Problem with that is, that receiving these zero byte
3897 * acl's will fail later. To fix this, we send a dummy acl list that
3898 * only contains the version number and no entries.
3900 if (!strncmp(name
, XATTR_NAME_POSIX_ACL_ACCESS
, name_len
) ||
3901 !strncmp(name
, XATTR_NAME_POSIX_ACL_DEFAULT
, name_len
)) {
3902 if (data_len
== 0) {
3903 dummy_acl
.a_version
=
3904 cpu_to_le32(POSIX_ACL_XATTR_VERSION
);
3905 data
= (char *)&dummy_acl
;
3906 data_len
= sizeof(dummy_acl
);
3910 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
3914 ret
= send_set_xattr(sctx
, p
, name
, name_len
, data
, data_len
);
3921 static int __process_deleted_xattr(int num
, struct btrfs_key
*di_key
,
3922 const char *name
, int name_len
,
3923 const char *data
, int data_len
,
3927 struct send_ctx
*sctx
= ctx
;
3930 p
= fs_path_alloc();
3934 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
3938 ret
= send_remove_xattr(sctx
, p
, name
, name_len
);
3945 static int process_new_xattr(struct send_ctx
*sctx
)
3949 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
3950 sctx
->cmp_key
, __process_new_xattr
, sctx
);
3955 static int process_deleted_xattr(struct send_ctx
*sctx
)
3959 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
3960 sctx
->cmp_key
, __process_deleted_xattr
, sctx
);
3965 struct find_xattr_ctx
{
3973 static int __find_xattr(int num
, struct btrfs_key
*di_key
,
3974 const char *name
, int name_len
,
3975 const char *data
, int data_len
,
3976 u8 type
, void *vctx
)
3978 struct find_xattr_ctx
*ctx
= vctx
;
3980 if (name_len
== ctx
->name_len
&&
3981 strncmp(name
, ctx
->name
, name_len
) == 0) {
3982 ctx
->found_idx
= num
;
3983 ctx
->found_data_len
= data_len
;
3984 ctx
->found_data
= kmemdup(data
, data_len
, GFP_NOFS
);
3985 if (!ctx
->found_data
)
3992 static int find_xattr(struct btrfs_root
*root
,
3993 struct btrfs_path
*path
,
3994 struct btrfs_key
*key
,
3995 const char *name
, int name_len
,
3996 char **data
, int *data_len
)
3999 struct find_xattr_ctx ctx
;
4002 ctx
.name_len
= name_len
;
4004 ctx
.found_data
= NULL
;
4005 ctx
.found_data_len
= 0;
4007 ret
= iterate_dir_item(root
, path
, key
, __find_xattr
, &ctx
);
4011 if (ctx
.found_idx
== -1)
4014 *data
= ctx
.found_data
;
4015 *data_len
= ctx
.found_data_len
;
4017 kfree(ctx
.found_data
);
4019 return ctx
.found_idx
;
4023 static int __process_changed_new_xattr(int num
, struct btrfs_key
*di_key
,
4024 const char *name
, int name_len
,
4025 const char *data
, int data_len
,
4029 struct send_ctx
*sctx
= ctx
;
4030 char *found_data
= NULL
;
4031 int found_data_len
= 0;
4033 ret
= find_xattr(sctx
->parent_root
, sctx
->right_path
,
4034 sctx
->cmp_key
, name
, name_len
, &found_data
,
4036 if (ret
== -ENOENT
) {
4037 ret
= __process_new_xattr(num
, di_key
, name
, name_len
, data
,
4038 data_len
, type
, ctx
);
4039 } else if (ret
>= 0) {
4040 if (data_len
!= found_data_len
||
4041 memcmp(data
, found_data
, data_len
)) {
4042 ret
= __process_new_xattr(num
, di_key
, name
, name_len
,
4043 data
, data_len
, type
, ctx
);
4053 static int __process_changed_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4054 const char *name
, int name_len
,
4055 const char *data
, int data_len
,
4059 struct send_ctx
*sctx
= ctx
;
4061 ret
= find_xattr(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
4062 name
, name_len
, NULL
, NULL
);
4064 ret
= __process_deleted_xattr(num
, di_key
, name
, name_len
, data
,
4065 data_len
, type
, ctx
);
4072 static int process_changed_xattr(struct send_ctx
*sctx
)
4076 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4077 sctx
->cmp_key
, __process_changed_new_xattr
, sctx
);
4080 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4081 sctx
->cmp_key
, __process_changed_deleted_xattr
, sctx
);
4087 static int process_all_new_xattrs(struct send_ctx
*sctx
)
4090 struct btrfs_root
*root
;
4091 struct btrfs_path
*path
;
4092 struct btrfs_key key
;
4093 struct btrfs_key found_key
;
4094 struct extent_buffer
*eb
;
4097 path
= alloc_path_for_send();
4101 root
= sctx
->send_root
;
4103 key
.objectid
= sctx
->cmp_key
->objectid
;
4104 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4106 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4111 eb
= path
->nodes
[0];
4112 slot
= path
->slots
[0];
4113 if (slot
>= btrfs_header_nritems(eb
)) {
4114 ret
= btrfs_next_leaf(root
, path
);
4117 } else if (ret
> 0) {
4124 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4125 if (found_key
.objectid
!= key
.objectid
||
4126 found_key
.type
!= key
.type
) {
4131 ret
= iterate_dir_item(root
, path
, &found_key
,
4132 __process_new_xattr
, sctx
);
4140 btrfs_free_path(path
);
4144 static ssize_t
fill_read_buf(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4146 struct btrfs_root
*root
= sctx
->send_root
;
4147 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4148 struct inode
*inode
;
4151 struct btrfs_key key
;
4152 pgoff_t index
= offset
>> PAGE_CACHE_SHIFT
;
4154 unsigned pg_offset
= offset
& ~PAGE_CACHE_MASK
;
4157 key
.objectid
= sctx
->cur_ino
;
4158 key
.type
= BTRFS_INODE_ITEM_KEY
;
4161 inode
= btrfs_iget(fs_info
->sb
, &key
, root
, NULL
);
4163 return PTR_ERR(inode
);
4165 if (offset
+ len
> i_size_read(inode
)) {
4166 if (offset
> i_size_read(inode
))
4169 len
= offset
- i_size_read(inode
);
4174 last_index
= (offset
+ len
- 1) >> PAGE_CACHE_SHIFT
;
4176 /* initial readahead */
4177 memset(&sctx
->ra
, 0, sizeof(struct file_ra_state
));
4178 file_ra_state_init(&sctx
->ra
, inode
->i_mapping
);
4179 btrfs_force_ra(inode
->i_mapping
, &sctx
->ra
, NULL
, index
,
4180 last_index
- index
+ 1);
4182 while (index
<= last_index
) {
4183 unsigned cur_len
= min_t(unsigned, len
,
4184 PAGE_CACHE_SIZE
- pg_offset
);
4185 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
4191 if (!PageUptodate(page
)) {
4192 btrfs_readpage(NULL
, page
);
4194 if (!PageUptodate(page
)) {
4196 page_cache_release(page
);
4203 memcpy(sctx
->read_buf
+ ret
, addr
+ pg_offset
, cur_len
);
4206 page_cache_release(page
);
4218 * Read some bytes from the current inode/file and send a write command to
4221 static int send_write(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4225 ssize_t num_read
= 0;
4227 p
= fs_path_alloc();
4231 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset
, len
);
4233 num_read
= fill_read_buf(sctx
, offset
, len
);
4234 if (num_read
<= 0) {
4240 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4244 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4248 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4249 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4250 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, num_read
);
4252 ret
= send_cmd(sctx
);
4263 * Send a clone command to user space.
4265 static int send_clone(struct send_ctx
*sctx
,
4266 u64 offset
, u32 len
,
4267 struct clone_root
*clone_root
)
4273 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
4274 "clone_inode=%llu, clone_offset=%llu\n", offset
, len
,
4275 clone_root
->root
->objectid
, clone_root
->ino
,
4276 clone_root
->offset
);
4278 p
= fs_path_alloc();
4282 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CLONE
);
4286 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4290 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4291 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_LEN
, len
);
4292 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4294 if (clone_root
->root
== sctx
->send_root
) {
4295 ret
= get_inode_info(sctx
->send_root
, clone_root
->ino
, NULL
,
4296 &gen
, NULL
, NULL
, NULL
, NULL
);
4299 ret
= get_cur_path(sctx
, clone_root
->ino
, gen
, p
);
4301 ret
= get_inode_path(clone_root
->root
, clone_root
->ino
, p
);
4306 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4307 clone_root
->root
->root_item
.uuid
);
4308 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
4309 le64_to_cpu(clone_root
->root
->root_item
.ctransid
));
4310 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_CLONE_PATH
, p
);
4311 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_OFFSET
,
4312 clone_root
->offset
);
4314 ret
= send_cmd(sctx
);
4323 * Send an update extent command to user space.
4325 static int send_update_extent(struct send_ctx
*sctx
,
4326 u64 offset
, u32 len
)
4331 p
= fs_path_alloc();
4335 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UPDATE_EXTENT
);
4339 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4343 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4344 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4345 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, len
);
4347 ret
= send_cmd(sctx
);
4355 static int send_hole(struct send_ctx
*sctx
, u64 end
)
4357 struct fs_path
*p
= NULL
;
4358 u64 offset
= sctx
->cur_inode_last_extent
;
4362 p
= fs_path_alloc();
4365 memset(sctx
->read_buf
, 0, BTRFS_SEND_READ_SIZE
);
4366 while (offset
< end
) {
4367 len
= min_t(u64
, end
- offset
, BTRFS_SEND_READ_SIZE
);
4369 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4372 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4375 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4376 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4377 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, len
);
4378 ret
= send_cmd(sctx
);
4388 static int send_write_or_clone(struct send_ctx
*sctx
,
4389 struct btrfs_path
*path
,
4390 struct btrfs_key
*key
,
4391 struct clone_root
*clone_root
)
4394 struct btrfs_file_extent_item
*ei
;
4395 u64 offset
= key
->offset
;
4400 u64 bs
= sctx
->send_root
->fs_info
->sb
->s_blocksize
;
4402 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4403 struct btrfs_file_extent_item
);
4404 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
4405 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4406 len
= btrfs_file_extent_inline_len(path
->nodes
[0],
4407 path
->slots
[0], ei
);
4409 * it is possible the inline item won't cover the whole page,
4410 * but there may be items after this page. Make
4411 * sure to send the whole thing
4413 len
= PAGE_CACHE_ALIGN(len
);
4415 len
= btrfs_file_extent_num_bytes(path
->nodes
[0], ei
);
4418 if (offset
+ len
> sctx
->cur_inode_size
)
4419 len
= sctx
->cur_inode_size
- offset
;
4425 if (clone_root
&& IS_ALIGNED(offset
+ len
, bs
)) {
4426 ret
= send_clone(sctx
, offset
, len
, clone_root
);
4427 } else if (sctx
->flags
& BTRFS_SEND_FLAG_NO_FILE_DATA
) {
4428 ret
= send_update_extent(sctx
, offset
, len
);
4432 if (l
> BTRFS_SEND_READ_SIZE
)
4433 l
= BTRFS_SEND_READ_SIZE
;
4434 ret
= send_write(sctx
, pos
+ offset
, l
);
4447 static int is_extent_unchanged(struct send_ctx
*sctx
,
4448 struct btrfs_path
*left_path
,
4449 struct btrfs_key
*ekey
)
4452 struct btrfs_key key
;
4453 struct btrfs_path
*path
= NULL
;
4454 struct extent_buffer
*eb
;
4456 struct btrfs_key found_key
;
4457 struct btrfs_file_extent_item
*ei
;
4462 u64 left_offset_fixed
;
4470 path
= alloc_path_for_send();
4474 eb
= left_path
->nodes
[0];
4475 slot
= left_path
->slots
[0];
4476 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
4477 left_type
= btrfs_file_extent_type(eb
, ei
);
4479 if (left_type
!= BTRFS_FILE_EXTENT_REG
) {
4483 left_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
4484 left_len
= btrfs_file_extent_num_bytes(eb
, ei
);
4485 left_offset
= btrfs_file_extent_offset(eb
, ei
);
4486 left_gen
= btrfs_file_extent_generation(eb
, ei
);
4489 * Following comments will refer to these graphics. L is the left
4490 * extents which we are checking at the moment. 1-8 are the right
4491 * extents that we iterate.
4494 * |-1-|-2a-|-3-|-4-|-5-|-6-|
4497 * |--1--|-2b-|...(same as above)
4499 * Alternative situation. Happens on files where extents got split.
4501 * |-----------7-----------|-6-|
4503 * Alternative situation. Happens on files which got larger.
4506 * Nothing follows after 8.
4509 key
.objectid
= ekey
->objectid
;
4510 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4511 key
.offset
= ekey
->offset
;
4512 ret
= btrfs_search_slot_for_read(sctx
->parent_root
, &key
, path
, 0, 0);
4521 * Handle special case where the right side has no extents at all.
4523 eb
= path
->nodes
[0];
4524 slot
= path
->slots
[0];
4525 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4526 if (found_key
.objectid
!= key
.objectid
||
4527 found_key
.type
!= key
.type
) {
4528 /* If we're a hole then just pretend nothing changed */
4529 ret
= (left_disknr
) ? 0 : 1;
4534 * We're now on 2a, 2b or 7.
4537 while (key
.offset
< ekey
->offset
+ left_len
) {
4538 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
4539 right_type
= btrfs_file_extent_type(eb
, ei
);
4540 if (right_type
!= BTRFS_FILE_EXTENT_REG
) {
4545 right_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
4546 right_len
= btrfs_file_extent_num_bytes(eb
, ei
);
4547 right_offset
= btrfs_file_extent_offset(eb
, ei
);
4548 right_gen
= btrfs_file_extent_generation(eb
, ei
);
4551 * Are we at extent 8? If yes, we know the extent is changed.
4552 * This may only happen on the first iteration.
4554 if (found_key
.offset
+ right_len
<= ekey
->offset
) {
4555 /* If we're a hole just pretend nothing changed */
4556 ret
= (left_disknr
) ? 0 : 1;
4560 left_offset_fixed
= left_offset
;
4561 if (key
.offset
< ekey
->offset
) {
4562 /* Fix the right offset for 2a and 7. */
4563 right_offset
+= ekey
->offset
- key
.offset
;
4565 /* Fix the left offset for all behind 2a and 2b */
4566 left_offset_fixed
+= key
.offset
- ekey
->offset
;
4570 * Check if we have the same extent.
4572 if (left_disknr
!= right_disknr
||
4573 left_offset_fixed
!= right_offset
||
4574 left_gen
!= right_gen
) {
4580 * Go to the next extent.
4582 ret
= btrfs_next_item(sctx
->parent_root
, path
);
4586 eb
= path
->nodes
[0];
4587 slot
= path
->slots
[0];
4588 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4590 if (ret
|| found_key
.objectid
!= key
.objectid
||
4591 found_key
.type
!= key
.type
) {
4592 key
.offset
+= right_len
;
4595 if (found_key
.offset
!= key
.offset
+ right_len
) {
4603 * We're now behind the left extent (treat as unchanged) or at the end
4604 * of the right side (treat as changed).
4606 if (key
.offset
>= ekey
->offset
+ left_len
)
4613 btrfs_free_path(path
);
4617 static int get_last_extent(struct send_ctx
*sctx
, u64 offset
)
4619 struct btrfs_path
*path
;
4620 struct btrfs_root
*root
= sctx
->send_root
;
4621 struct btrfs_file_extent_item
*fi
;
4622 struct btrfs_key key
;
4627 path
= alloc_path_for_send();
4631 sctx
->cur_inode_last_extent
= 0;
4633 key
.objectid
= sctx
->cur_ino
;
4634 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4635 key
.offset
= offset
;
4636 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 0, 1);
4640 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
4641 if (key
.objectid
!= sctx
->cur_ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
4644 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4645 struct btrfs_file_extent_item
);
4646 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
4647 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4648 u64 size
= btrfs_file_extent_inline_len(path
->nodes
[0],
4649 path
->slots
[0], fi
);
4650 extent_end
= ALIGN(key
.offset
+ size
,
4651 sctx
->send_root
->sectorsize
);
4653 extent_end
= key
.offset
+
4654 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
4656 sctx
->cur_inode_last_extent
= extent_end
;
4658 btrfs_free_path(path
);
4662 static int maybe_send_hole(struct send_ctx
*sctx
, struct btrfs_path
*path
,
4663 struct btrfs_key
*key
)
4665 struct btrfs_file_extent_item
*fi
;
4670 if (sctx
->cur_ino
!= key
->objectid
|| !need_send_hole(sctx
))
4673 if (sctx
->cur_inode_last_extent
== (u64
)-1) {
4674 ret
= get_last_extent(sctx
, key
->offset
- 1);
4679 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4680 struct btrfs_file_extent_item
);
4681 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
4682 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4683 u64 size
= btrfs_file_extent_inline_len(path
->nodes
[0],
4684 path
->slots
[0], fi
);
4685 extent_end
= ALIGN(key
->offset
+ size
,
4686 sctx
->send_root
->sectorsize
);
4688 extent_end
= key
->offset
+
4689 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
4692 if (path
->slots
[0] == 0 &&
4693 sctx
->cur_inode_last_extent
< key
->offset
) {
4695 * We might have skipped entire leafs that contained only
4696 * file extent items for our current inode. These leafs have
4697 * a generation number smaller (older) than the one in the
4698 * current leaf and the leaf our last extent came from, and
4699 * are located between these 2 leafs.
4701 ret
= get_last_extent(sctx
, key
->offset
- 1);
4706 if (sctx
->cur_inode_last_extent
< key
->offset
)
4707 ret
= send_hole(sctx
, key
->offset
);
4708 sctx
->cur_inode_last_extent
= extent_end
;
4712 static int process_extent(struct send_ctx
*sctx
,
4713 struct btrfs_path
*path
,
4714 struct btrfs_key
*key
)
4716 struct clone_root
*found_clone
= NULL
;
4719 if (S_ISLNK(sctx
->cur_inode_mode
))
4722 if (sctx
->parent_root
&& !sctx
->cur_inode_new
) {
4723 ret
= is_extent_unchanged(sctx
, path
, key
);
4731 struct btrfs_file_extent_item
*ei
;
4734 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4735 struct btrfs_file_extent_item
);
4736 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
4737 if (type
== BTRFS_FILE_EXTENT_PREALLOC
||
4738 type
== BTRFS_FILE_EXTENT_REG
) {
4740 * The send spec does not have a prealloc command yet,
4741 * so just leave a hole for prealloc'ed extents until
4742 * we have enough commands queued up to justify rev'ing
4745 if (type
== BTRFS_FILE_EXTENT_PREALLOC
) {
4750 /* Have a hole, just skip it. */
4751 if (btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
) == 0) {
4758 ret
= find_extent_clone(sctx
, path
, key
->objectid
, key
->offset
,
4759 sctx
->cur_inode_size
, &found_clone
);
4760 if (ret
!= -ENOENT
&& ret
< 0)
4763 ret
= send_write_or_clone(sctx
, path
, key
, found_clone
);
4767 ret
= maybe_send_hole(sctx
, path
, key
);
4772 static int process_all_extents(struct send_ctx
*sctx
)
4775 struct btrfs_root
*root
;
4776 struct btrfs_path
*path
;
4777 struct btrfs_key key
;
4778 struct btrfs_key found_key
;
4779 struct extent_buffer
*eb
;
4782 root
= sctx
->send_root
;
4783 path
= alloc_path_for_send();
4787 key
.objectid
= sctx
->cmp_key
->objectid
;
4788 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4790 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4795 eb
= path
->nodes
[0];
4796 slot
= path
->slots
[0];
4798 if (slot
>= btrfs_header_nritems(eb
)) {
4799 ret
= btrfs_next_leaf(root
, path
);
4802 } else if (ret
> 0) {
4809 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4811 if (found_key
.objectid
!= key
.objectid
||
4812 found_key
.type
!= key
.type
) {
4817 ret
= process_extent(sctx
, path
, &found_key
);
4825 btrfs_free_path(path
);
4829 static int process_recorded_refs_if_needed(struct send_ctx
*sctx
, int at_end
,
4831 int *refs_processed
)
4835 if (sctx
->cur_ino
== 0)
4837 if (!at_end
&& sctx
->cur_ino
== sctx
->cmp_key
->objectid
&&
4838 sctx
->cmp_key
->type
<= BTRFS_INODE_EXTREF_KEY
)
4840 if (list_empty(&sctx
->new_refs
) && list_empty(&sctx
->deleted_refs
))
4843 ret
= process_recorded_refs(sctx
, pending_move
);
4847 *refs_processed
= 1;
4852 static int finish_inode_if_needed(struct send_ctx
*sctx
, int at_end
)
4863 int pending_move
= 0;
4864 int refs_processed
= 0;
4866 ret
= process_recorded_refs_if_needed(sctx
, at_end
, &pending_move
,
4872 * We have processed the refs and thus need to advance send_progress.
4873 * Now, calls to get_cur_xxx will take the updated refs of the current
4874 * inode into account.
4876 * On the other hand, if our current inode is a directory and couldn't
4877 * be moved/renamed because its parent was renamed/moved too and it has
4878 * a higher inode number, we can only move/rename our current inode
4879 * after we moved/renamed its parent. Therefore in this case operate on
4880 * the old path (pre move/rename) of our current inode, and the
4881 * move/rename will be performed later.
4883 if (refs_processed
&& !pending_move
)
4884 sctx
->send_progress
= sctx
->cur_ino
+ 1;
4886 if (sctx
->cur_ino
== 0 || sctx
->cur_inode_deleted
)
4888 if (!at_end
&& sctx
->cmp_key
->objectid
== sctx
->cur_ino
)
4891 ret
= get_inode_info(sctx
->send_root
, sctx
->cur_ino
, NULL
, NULL
,
4892 &left_mode
, &left_uid
, &left_gid
, NULL
);
4896 if (!sctx
->parent_root
|| sctx
->cur_inode_new
) {
4898 if (!S_ISLNK(sctx
->cur_inode_mode
))
4901 ret
= get_inode_info(sctx
->parent_root
, sctx
->cur_ino
,
4902 NULL
, NULL
, &right_mode
, &right_uid
,
4907 if (left_uid
!= right_uid
|| left_gid
!= right_gid
)
4909 if (!S_ISLNK(sctx
->cur_inode_mode
) && left_mode
!= right_mode
)
4913 if (S_ISREG(sctx
->cur_inode_mode
)) {
4914 if (need_send_hole(sctx
)) {
4915 if (sctx
->cur_inode_last_extent
== (u64
)-1) {
4916 ret
= get_last_extent(sctx
, (u64
)-1);
4920 if (sctx
->cur_inode_last_extent
<
4921 sctx
->cur_inode_size
) {
4922 ret
= send_hole(sctx
, sctx
->cur_inode_size
);
4927 ret
= send_truncate(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
4928 sctx
->cur_inode_size
);
4934 ret
= send_chown(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
4935 left_uid
, left_gid
);
4940 ret
= send_chmod(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
4947 * If other directory inodes depended on our current directory
4948 * inode's move/rename, now do their move/rename operations.
4950 if (!is_waiting_for_move(sctx
, sctx
->cur_ino
)) {
4951 ret
= apply_children_dir_moves(sctx
);
4955 * Need to send that every time, no matter if it actually
4956 * changed between the two trees as we have done changes to
4957 * the inode before. If our inode is a directory and it's
4958 * waiting to be moved/renamed, we will send its utimes when
4959 * it's moved/renamed, therefore we don't need to do it here.
4961 sctx
->send_progress
= sctx
->cur_ino
+ 1;
4962 ret
= send_utimes(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
);
4971 static int changed_inode(struct send_ctx
*sctx
,
4972 enum btrfs_compare_tree_result result
)
4975 struct btrfs_key
*key
= sctx
->cmp_key
;
4976 struct btrfs_inode_item
*left_ii
= NULL
;
4977 struct btrfs_inode_item
*right_ii
= NULL
;
4981 sctx
->cur_ino
= key
->objectid
;
4982 sctx
->cur_inode_new_gen
= 0;
4983 sctx
->cur_inode_last_extent
= (u64
)-1;
4986 * Set send_progress to current inode. This will tell all get_cur_xxx
4987 * functions that the current inode's refs are not updated yet. Later,
4988 * when process_recorded_refs is finished, it is set to cur_ino + 1.
4990 sctx
->send_progress
= sctx
->cur_ino
;
4992 if (result
== BTRFS_COMPARE_TREE_NEW
||
4993 result
== BTRFS_COMPARE_TREE_CHANGED
) {
4994 left_ii
= btrfs_item_ptr(sctx
->left_path
->nodes
[0],
4995 sctx
->left_path
->slots
[0],
4996 struct btrfs_inode_item
);
4997 left_gen
= btrfs_inode_generation(sctx
->left_path
->nodes
[0],
5000 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
5001 sctx
->right_path
->slots
[0],
5002 struct btrfs_inode_item
);
5003 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
5006 if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
5007 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
5008 sctx
->right_path
->slots
[0],
5009 struct btrfs_inode_item
);
5011 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
5015 * The cur_ino = root dir case is special here. We can't treat
5016 * the inode as deleted+reused because it would generate a
5017 * stream that tries to delete/mkdir the root dir.
5019 if (left_gen
!= right_gen
&&
5020 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
5021 sctx
->cur_inode_new_gen
= 1;
5024 if (result
== BTRFS_COMPARE_TREE_NEW
) {
5025 sctx
->cur_inode_gen
= left_gen
;
5026 sctx
->cur_inode_new
= 1;
5027 sctx
->cur_inode_deleted
= 0;
5028 sctx
->cur_inode_size
= btrfs_inode_size(
5029 sctx
->left_path
->nodes
[0], left_ii
);
5030 sctx
->cur_inode_mode
= btrfs_inode_mode(
5031 sctx
->left_path
->nodes
[0], left_ii
);
5032 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
5033 sctx
->left_path
->nodes
[0], left_ii
);
5034 if (sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
5035 ret
= send_create_inode_if_needed(sctx
);
5036 } else if (result
== BTRFS_COMPARE_TREE_DELETED
) {
5037 sctx
->cur_inode_gen
= right_gen
;
5038 sctx
->cur_inode_new
= 0;
5039 sctx
->cur_inode_deleted
= 1;
5040 sctx
->cur_inode_size
= btrfs_inode_size(
5041 sctx
->right_path
->nodes
[0], right_ii
);
5042 sctx
->cur_inode_mode
= btrfs_inode_mode(
5043 sctx
->right_path
->nodes
[0], right_ii
);
5044 } else if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
5046 * We need to do some special handling in case the inode was
5047 * reported as changed with a changed generation number. This
5048 * means that the original inode was deleted and new inode
5049 * reused the same inum. So we have to treat the old inode as
5050 * deleted and the new one as new.
5052 if (sctx
->cur_inode_new_gen
) {
5054 * First, process the inode as if it was deleted.
5056 sctx
->cur_inode_gen
= right_gen
;
5057 sctx
->cur_inode_new
= 0;
5058 sctx
->cur_inode_deleted
= 1;
5059 sctx
->cur_inode_size
= btrfs_inode_size(
5060 sctx
->right_path
->nodes
[0], right_ii
);
5061 sctx
->cur_inode_mode
= btrfs_inode_mode(
5062 sctx
->right_path
->nodes
[0], right_ii
);
5063 ret
= process_all_refs(sctx
,
5064 BTRFS_COMPARE_TREE_DELETED
);
5069 * Now process the inode as if it was new.
5071 sctx
->cur_inode_gen
= left_gen
;
5072 sctx
->cur_inode_new
= 1;
5073 sctx
->cur_inode_deleted
= 0;
5074 sctx
->cur_inode_size
= btrfs_inode_size(
5075 sctx
->left_path
->nodes
[0], left_ii
);
5076 sctx
->cur_inode_mode
= btrfs_inode_mode(
5077 sctx
->left_path
->nodes
[0], left_ii
);
5078 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
5079 sctx
->left_path
->nodes
[0], left_ii
);
5080 ret
= send_create_inode_if_needed(sctx
);
5084 ret
= process_all_refs(sctx
, BTRFS_COMPARE_TREE_NEW
);
5088 * Advance send_progress now as we did not get into
5089 * process_recorded_refs_if_needed in the new_gen case.
5091 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5094 * Now process all extents and xattrs of the inode as if
5095 * they were all new.
5097 ret
= process_all_extents(sctx
);
5100 ret
= process_all_new_xattrs(sctx
);
5104 sctx
->cur_inode_gen
= left_gen
;
5105 sctx
->cur_inode_new
= 0;
5106 sctx
->cur_inode_new_gen
= 0;
5107 sctx
->cur_inode_deleted
= 0;
5108 sctx
->cur_inode_size
= btrfs_inode_size(
5109 sctx
->left_path
->nodes
[0], left_ii
);
5110 sctx
->cur_inode_mode
= btrfs_inode_mode(
5111 sctx
->left_path
->nodes
[0], left_ii
);
5120 * We have to process new refs before deleted refs, but compare_trees gives us
5121 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
5122 * first and later process them in process_recorded_refs.
5123 * For the cur_inode_new_gen case, we skip recording completely because
5124 * changed_inode did already initiate processing of refs. The reason for this is
5125 * that in this case, compare_tree actually compares the refs of 2 different
5126 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
5127 * refs of the right tree as deleted and all refs of the left tree as new.
5129 static int changed_ref(struct send_ctx
*sctx
,
5130 enum btrfs_compare_tree_result result
)
5134 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
5136 if (!sctx
->cur_inode_new_gen
&&
5137 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
5138 if (result
== BTRFS_COMPARE_TREE_NEW
)
5139 ret
= record_new_ref(sctx
);
5140 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
5141 ret
= record_deleted_ref(sctx
);
5142 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
5143 ret
= record_changed_ref(sctx
);
5150 * Process new/deleted/changed xattrs. We skip processing in the
5151 * cur_inode_new_gen case because changed_inode did already initiate processing
5152 * of xattrs. The reason is the same as in changed_ref
5154 static int changed_xattr(struct send_ctx
*sctx
,
5155 enum btrfs_compare_tree_result result
)
5159 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
5161 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
5162 if (result
== BTRFS_COMPARE_TREE_NEW
)
5163 ret
= process_new_xattr(sctx
);
5164 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
5165 ret
= process_deleted_xattr(sctx
);
5166 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
5167 ret
= process_changed_xattr(sctx
);
5174 * Process new/deleted/changed extents. We skip processing in the
5175 * cur_inode_new_gen case because changed_inode did already initiate processing
5176 * of extents. The reason is the same as in changed_ref
5178 static int changed_extent(struct send_ctx
*sctx
,
5179 enum btrfs_compare_tree_result result
)
5183 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
5185 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
5186 if (result
!= BTRFS_COMPARE_TREE_DELETED
)
5187 ret
= process_extent(sctx
, sctx
->left_path
,
5194 static int dir_changed(struct send_ctx
*sctx
, u64 dir
)
5196 u64 orig_gen
, new_gen
;
5199 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &new_gen
, NULL
, NULL
,
5204 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &orig_gen
, NULL
,
5209 return (orig_gen
!= new_gen
) ? 1 : 0;
5212 static int compare_refs(struct send_ctx
*sctx
, struct btrfs_path
*path
,
5213 struct btrfs_key
*key
)
5215 struct btrfs_inode_extref
*extref
;
5216 struct extent_buffer
*leaf
;
5217 u64 dirid
= 0, last_dirid
= 0;
5224 /* Easy case, just check this one dirid */
5225 if (key
->type
== BTRFS_INODE_REF_KEY
) {
5226 dirid
= key
->offset
;
5228 ret
= dir_changed(sctx
, dirid
);
5232 leaf
= path
->nodes
[0];
5233 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
5234 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
5235 while (cur_offset
< item_size
) {
5236 extref
= (struct btrfs_inode_extref
*)(ptr
+
5238 dirid
= btrfs_inode_extref_parent(leaf
, extref
);
5239 ref_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
5240 cur_offset
+= ref_name_len
+ sizeof(*extref
);
5241 if (dirid
== last_dirid
)
5243 ret
= dir_changed(sctx
, dirid
);
5253 * Updates compare related fields in sctx and simply forwards to the actual
5254 * changed_xxx functions.
5256 static int changed_cb(struct btrfs_root
*left_root
,
5257 struct btrfs_root
*right_root
,
5258 struct btrfs_path
*left_path
,
5259 struct btrfs_path
*right_path
,
5260 struct btrfs_key
*key
,
5261 enum btrfs_compare_tree_result result
,
5265 struct send_ctx
*sctx
= ctx
;
5267 if (result
== BTRFS_COMPARE_TREE_SAME
) {
5268 if (key
->type
== BTRFS_INODE_REF_KEY
||
5269 key
->type
== BTRFS_INODE_EXTREF_KEY
) {
5270 ret
= compare_refs(sctx
, left_path
, key
);
5275 } else if (key
->type
== BTRFS_EXTENT_DATA_KEY
) {
5276 return maybe_send_hole(sctx
, left_path
, key
);
5280 result
= BTRFS_COMPARE_TREE_CHANGED
;
5284 sctx
->left_path
= left_path
;
5285 sctx
->right_path
= right_path
;
5286 sctx
->cmp_key
= key
;
5288 ret
= finish_inode_if_needed(sctx
, 0);
5292 /* Ignore non-FS objects */
5293 if (key
->objectid
== BTRFS_FREE_INO_OBJECTID
||
5294 key
->objectid
== BTRFS_FREE_SPACE_OBJECTID
)
5297 if (key
->type
== BTRFS_INODE_ITEM_KEY
)
5298 ret
= changed_inode(sctx
, result
);
5299 else if (key
->type
== BTRFS_INODE_REF_KEY
||
5300 key
->type
== BTRFS_INODE_EXTREF_KEY
)
5301 ret
= changed_ref(sctx
, result
);
5302 else if (key
->type
== BTRFS_XATTR_ITEM_KEY
)
5303 ret
= changed_xattr(sctx
, result
);
5304 else if (key
->type
== BTRFS_EXTENT_DATA_KEY
)
5305 ret
= changed_extent(sctx
, result
);
5311 static int full_send_tree(struct send_ctx
*sctx
)
5314 struct btrfs_trans_handle
*trans
= NULL
;
5315 struct btrfs_root
*send_root
= sctx
->send_root
;
5316 struct btrfs_key key
;
5317 struct btrfs_key found_key
;
5318 struct btrfs_path
*path
;
5319 struct extent_buffer
*eb
;
5324 path
= alloc_path_for_send();
5328 spin_lock(&send_root
->root_item_lock
);
5329 start_ctransid
= btrfs_root_ctransid(&send_root
->root_item
);
5330 spin_unlock(&send_root
->root_item_lock
);
5332 key
.objectid
= BTRFS_FIRST_FREE_OBJECTID
;
5333 key
.type
= BTRFS_INODE_ITEM_KEY
;
5338 * We need to make sure the transaction does not get committed
5339 * while we do anything on commit roots. Join a transaction to prevent
5342 trans
= btrfs_join_transaction(send_root
);
5343 if (IS_ERR(trans
)) {
5344 ret
= PTR_ERR(trans
);
5350 * Make sure the tree has not changed after re-joining. We detect this
5351 * by comparing start_ctransid and ctransid. They should always match.
5353 spin_lock(&send_root
->root_item_lock
);
5354 ctransid
= btrfs_root_ctransid(&send_root
->root_item
);
5355 spin_unlock(&send_root
->root_item_lock
);
5357 if (ctransid
!= start_ctransid
) {
5358 WARN(1, KERN_WARNING
"BTRFS: the root that you're trying to "
5359 "send was modified in between. This is "
5360 "probably a bug.\n");
5365 ret
= btrfs_search_slot_for_read(send_root
, &key
, path
, 1, 0);
5373 * When someone want to commit while we iterate, end the
5374 * joined transaction and rejoin.
5376 if (btrfs_should_end_transaction(trans
, send_root
)) {
5377 ret
= btrfs_end_transaction(trans
, send_root
);
5381 btrfs_release_path(path
);
5385 eb
= path
->nodes
[0];
5386 slot
= path
->slots
[0];
5387 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5389 ret
= changed_cb(send_root
, NULL
, path
, NULL
,
5390 &found_key
, BTRFS_COMPARE_TREE_NEW
, sctx
);
5394 key
.objectid
= found_key
.objectid
;
5395 key
.type
= found_key
.type
;
5396 key
.offset
= found_key
.offset
+ 1;
5398 ret
= btrfs_next_item(send_root
, path
);
5408 ret
= finish_inode_if_needed(sctx
, 1);
5411 btrfs_free_path(path
);
5414 ret
= btrfs_end_transaction(trans
, send_root
);
5416 btrfs_end_transaction(trans
, send_root
);
5421 static int send_subvol(struct send_ctx
*sctx
)
5425 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_STREAM_HEADER
)) {
5426 ret
= send_header(sctx
);
5431 ret
= send_subvol_begin(sctx
);
5435 if (sctx
->parent_root
) {
5436 ret
= btrfs_compare_trees(sctx
->send_root
, sctx
->parent_root
,
5440 ret
= finish_inode_if_needed(sctx
, 1);
5444 ret
= full_send_tree(sctx
);
5450 free_recorded_refs(sctx
);
5454 static void btrfs_root_dec_send_in_progress(struct btrfs_root
* root
)
5456 spin_lock(&root
->root_item_lock
);
5457 root
->send_in_progress
--;
5459 * Not much left to do, we don't know why it's unbalanced and
5460 * can't blindly reset it to 0.
5462 if (root
->send_in_progress
< 0)
5463 btrfs_err(root
->fs_info
,
5464 "send_in_progres unbalanced %d root %llu\n",
5465 root
->send_in_progress
, root
->root_key
.objectid
);
5466 spin_unlock(&root
->root_item_lock
);
5469 long btrfs_ioctl_send(struct file
*mnt_file
, void __user
*arg_
)
5472 struct btrfs_root
*send_root
;
5473 struct btrfs_root
*clone_root
;
5474 struct btrfs_fs_info
*fs_info
;
5475 struct btrfs_ioctl_send_args
*arg
= NULL
;
5476 struct btrfs_key key
;
5477 struct send_ctx
*sctx
= NULL
;
5479 u64
*clone_sources_tmp
= NULL
;
5480 int clone_sources_to_rollback
= 0;
5481 int sort_clone_roots
= 0;
5484 if (!capable(CAP_SYS_ADMIN
))
5487 send_root
= BTRFS_I(file_inode(mnt_file
))->root
;
5488 fs_info
= send_root
->fs_info
;
5491 * The subvolume must remain read-only during send, protect against
5494 spin_lock(&send_root
->root_item_lock
);
5495 send_root
->send_in_progress
++;
5496 spin_unlock(&send_root
->root_item_lock
);
5499 * This is done when we lookup the root, it should already be complete
5500 * by the time we get here.
5502 WARN_ON(send_root
->orphan_cleanup_state
!= ORPHAN_CLEANUP_DONE
);
5505 * Userspace tools do the checks and warn the user if it's
5508 if (!btrfs_root_readonly(send_root
)) {
5513 arg
= memdup_user(arg_
, sizeof(*arg
));
5520 if (!access_ok(VERIFY_READ
, arg
->clone_sources
,
5521 sizeof(*arg
->clone_sources
) *
5522 arg
->clone_sources_count
)) {
5527 if (arg
->flags
& ~BTRFS_SEND_FLAG_MASK
) {
5532 sctx
= kzalloc(sizeof(struct send_ctx
), GFP_NOFS
);
5538 INIT_LIST_HEAD(&sctx
->new_refs
);
5539 INIT_LIST_HEAD(&sctx
->deleted_refs
);
5540 INIT_RADIX_TREE(&sctx
->name_cache
, GFP_NOFS
);
5541 INIT_LIST_HEAD(&sctx
->name_cache_list
);
5543 sctx
->flags
= arg
->flags
;
5545 sctx
->send_filp
= fget(arg
->send_fd
);
5546 if (!sctx
->send_filp
) {
5551 sctx
->send_root
= send_root
;
5552 sctx
->clone_roots_cnt
= arg
->clone_sources_count
;
5554 sctx
->send_max_size
= BTRFS_SEND_BUF_SIZE
;
5555 sctx
->send_buf
= vmalloc(sctx
->send_max_size
);
5556 if (!sctx
->send_buf
) {
5561 sctx
->read_buf
= vmalloc(BTRFS_SEND_READ_SIZE
);
5562 if (!sctx
->read_buf
) {
5567 sctx
->pending_dir_moves
= RB_ROOT
;
5568 sctx
->waiting_dir_moves
= RB_ROOT
;
5569 sctx
->orphan_dirs
= RB_ROOT
;
5571 sctx
->clone_roots
= vzalloc(sizeof(struct clone_root
) *
5572 (arg
->clone_sources_count
+ 1));
5573 if (!sctx
->clone_roots
) {
5578 if (arg
->clone_sources_count
) {
5579 clone_sources_tmp
= vmalloc(arg
->clone_sources_count
*
5580 sizeof(*arg
->clone_sources
));
5581 if (!clone_sources_tmp
) {
5586 ret
= copy_from_user(clone_sources_tmp
, arg
->clone_sources
,
5587 arg
->clone_sources_count
*
5588 sizeof(*arg
->clone_sources
));
5594 for (i
= 0; i
< arg
->clone_sources_count
; i
++) {
5595 key
.objectid
= clone_sources_tmp
[i
];
5596 key
.type
= BTRFS_ROOT_ITEM_KEY
;
5597 key
.offset
= (u64
)-1;
5599 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
5601 clone_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
5602 if (IS_ERR(clone_root
)) {
5603 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5604 ret
= PTR_ERR(clone_root
);
5607 clone_sources_to_rollback
= i
+ 1;
5608 spin_lock(&clone_root
->root_item_lock
);
5609 clone_root
->send_in_progress
++;
5610 if (!btrfs_root_readonly(clone_root
)) {
5611 spin_unlock(&clone_root
->root_item_lock
);
5612 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5616 spin_unlock(&clone_root
->root_item_lock
);
5617 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5619 sctx
->clone_roots
[i
].root
= clone_root
;
5621 vfree(clone_sources_tmp
);
5622 clone_sources_tmp
= NULL
;
5625 if (arg
->parent_root
) {
5626 key
.objectid
= arg
->parent_root
;
5627 key
.type
= BTRFS_ROOT_ITEM_KEY
;
5628 key
.offset
= (u64
)-1;
5630 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
5632 sctx
->parent_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
5633 if (IS_ERR(sctx
->parent_root
)) {
5634 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5635 ret
= PTR_ERR(sctx
->parent_root
);
5639 spin_lock(&sctx
->parent_root
->root_item_lock
);
5640 sctx
->parent_root
->send_in_progress
++;
5641 if (!btrfs_root_readonly(sctx
->parent_root
)) {
5642 spin_unlock(&sctx
->parent_root
->root_item_lock
);
5643 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5647 spin_unlock(&sctx
->parent_root
->root_item_lock
);
5649 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5653 * Clones from send_root are allowed, but only if the clone source
5654 * is behind the current send position. This is checked while searching
5655 * for possible clone sources.
5657 sctx
->clone_roots
[sctx
->clone_roots_cnt
++].root
= sctx
->send_root
;
5659 /* We do a bsearch later */
5660 sort(sctx
->clone_roots
, sctx
->clone_roots_cnt
,
5661 sizeof(*sctx
->clone_roots
), __clone_root_cmp_sort
,
5663 sort_clone_roots
= 1;
5665 ret
= send_subvol(sctx
);
5669 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_END_CMD
)) {
5670 ret
= begin_cmd(sctx
, BTRFS_SEND_C_END
);
5673 ret
= send_cmd(sctx
);
5679 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
));
5680 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
)) {
5682 struct pending_dir_move
*pm
;
5684 n
= rb_first(&sctx
->pending_dir_moves
);
5685 pm
= rb_entry(n
, struct pending_dir_move
, node
);
5686 while (!list_empty(&pm
->list
)) {
5687 struct pending_dir_move
*pm2
;
5689 pm2
= list_first_entry(&pm
->list
,
5690 struct pending_dir_move
, list
);
5691 free_pending_move(sctx
, pm2
);
5693 free_pending_move(sctx
, pm
);
5696 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
));
5697 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
)) {
5699 struct waiting_dir_move
*dm
;
5701 n
= rb_first(&sctx
->waiting_dir_moves
);
5702 dm
= rb_entry(n
, struct waiting_dir_move
, node
);
5703 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
5707 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
));
5708 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
)) {
5710 struct orphan_dir_info
*odi
;
5712 n
= rb_first(&sctx
->orphan_dirs
);
5713 odi
= rb_entry(n
, struct orphan_dir_info
, node
);
5714 free_orphan_dir_info(sctx
, odi
);
5717 if (sort_clone_roots
) {
5718 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
5719 btrfs_root_dec_send_in_progress(
5720 sctx
->clone_roots
[i
].root
);
5722 for (i
= 0; sctx
&& i
< clone_sources_to_rollback
; i
++)
5723 btrfs_root_dec_send_in_progress(
5724 sctx
->clone_roots
[i
].root
);
5726 btrfs_root_dec_send_in_progress(send_root
);
5728 if (sctx
&& !IS_ERR_OR_NULL(sctx
->parent_root
))
5729 btrfs_root_dec_send_in_progress(sctx
->parent_root
);
5732 vfree(clone_sources_tmp
);
5735 if (sctx
->send_filp
)
5736 fput(sctx
->send_filp
);
5738 vfree(sctx
->clone_roots
);
5739 vfree(sctx
->send_buf
);
5740 vfree(sctx
->read_buf
);
5742 name_cache_free(sctx
);