2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
38 static int g_verbose
= 0;
40 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
43 * A fs_path is a helper to dynamically build path names with unknown size.
44 * It reallocates the internal buffer on demand.
45 * It allows fast adding of path elements on the right side (normal path) and
46 * fast adding to the left side (reversed path). A reversed path can also be
47 * unreversed if needed.
56 unsigned short buf_len
:15;
57 unsigned short reversed
:1;
61 * Average path length does not exceed 200 bytes, we'll have
62 * better packing in the slab and higher chance to satisfy
63 * a allocation later during send.
68 #define FS_PATH_INLINE_SIZE \
69 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
72 /* reused for each extent */
74 struct btrfs_root
*root
;
81 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
82 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
85 struct file
*send_filp
;
91 u64 cmd_send_size
[BTRFS_SEND_C_MAX
+ 1];
92 u64 flags
; /* 'flags' member of btrfs_ioctl_send_args is u64 */
94 struct btrfs_root
*send_root
;
95 struct btrfs_root
*parent_root
;
96 struct clone_root
*clone_roots
;
99 /* current state of the compare_tree call */
100 struct btrfs_path
*left_path
;
101 struct btrfs_path
*right_path
;
102 struct btrfs_key
*cmp_key
;
105 * infos of the currently processed inode. In case of deleted inodes,
106 * these are the values from the deleted inode.
111 int cur_inode_new_gen
;
112 int cur_inode_deleted
;
116 u64 cur_inode_last_extent
;
120 struct list_head new_refs
;
121 struct list_head deleted_refs
;
123 struct radix_tree_root name_cache
;
124 struct list_head name_cache_list
;
127 struct file_ra_state ra
;
132 * We process inodes by their increasing order, so if before an
133 * incremental send we reverse the parent/child relationship of
134 * directories such that a directory with a lower inode number was
135 * the parent of a directory with a higher inode number, and the one
136 * becoming the new parent got renamed too, we can't rename/move the
137 * directory with lower inode number when we finish processing it - we
138 * must process the directory with higher inode number first, then
139 * rename/move it and then rename/move the directory with lower inode
140 * number. Example follows.
142 * Tree state when the first send was performed:
154 * Tree state when the second (incremental) send is performed:
163 * The sequence of steps that lead to the second state was:
165 * mv /a/b/c/d /a/b/c2/d2
166 * mv /a/b/c /a/b/c2/d2/cc
168 * "c" has lower inode number, but we can't move it (2nd mv operation)
169 * before we move "d", which has higher inode number.
171 * So we just memorize which move/rename operations must be performed
172 * later when their respective parent is processed and moved/renamed.
175 /* Indexed by parent directory inode number. */
176 struct rb_root pending_dir_moves
;
179 * Reverse index, indexed by the inode number of a directory that
180 * is waiting for the move/rename of its immediate parent before its
181 * own move/rename can be performed.
183 struct rb_root waiting_dir_moves
;
186 * A directory that is going to be rm'ed might have a child directory
187 * which is in the pending directory moves index above. In this case,
188 * the directory can only be removed after the move/rename of its child
189 * is performed. Example:
209 * Sequence of steps that lead to the send snapshot:
210 * rm -f /a/b/c/foo.txt
212 * mv /a/b/c/x /a/b/YY
215 * When the child is processed, its move/rename is delayed until its
216 * parent is processed (as explained above), but all other operations
217 * like update utimes, chown, chgrp, etc, are performed and the paths
218 * that it uses for those operations must use the orphanized name of
219 * its parent (the directory we're going to rm later), so we need to
220 * memorize that name.
222 * Indexed by the inode number of the directory to be deleted.
224 struct rb_root orphan_dirs
;
227 struct pending_dir_move
{
229 struct list_head list
;
234 struct list_head update_refs
;
237 struct waiting_dir_move
{
241 * There might be some directory that could not be removed because it
242 * was waiting for this directory inode to be moved first. Therefore
243 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
248 struct orphan_dir_info
{
254 struct name_cache_entry
{
255 struct list_head list
;
257 * radix_tree has only 32bit entries but we need to handle 64bit inums.
258 * We use the lower 32bit of the 64bit inum to store it in the tree. If
259 * more then one inum would fall into the same entry, we use radix_list
260 * to store the additional entries. radix_list is also used to store
261 * entries where two entries have the same inum but different
264 struct list_head radix_list
;
270 int need_later_update
;
275 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
);
277 static struct waiting_dir_move
*
278 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
);
280 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
);
282 static int need_send_hole(struct send_ctx
*sctx
)
284 return (sctx
->parent_root
&& !sctx
->cur_inode_new
&&
285 !sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
&&
286 S_ISREG(sctx
->cur_inode_mode
));
289 static void fs_path_reset(struct fs_path
*p
)
292 p
->start
= p
->buf
+ p
->buf_len
- 1;
302 static struct fs_path
*fs_path_alloc(void)
306 p
= kmalloc(sizeof(*p
), GFP_NOFS
);
310 p
->buf
= p
->inline_buf
;
311 p
->buf_len
= FS_PATH_INLINE_SIZE
;
316 static struct fs_path
*fs_path_alloc_reversed(void)
328 static void fs_path_free(struct fs_path
*p
)
332 if (p
->buf
!= p
->inline_buf
)
337 static int fs_path_len(struct fs_path
*p
)
339 return p
->end
- p
->start
;
342 static int fs_path_ensure_buf(struct fs_path
*p
, int len
)
350 if (p
->buf_len
>= len
)
353 if (len
> PATH_MAX
) {
358 path_len
= p
->end
- p
->start
;
359 old_buf_len
= p
->buf_len
;
362 * First time the inline_buf does not suffice
364 if (p
->buf
== p
->inline_buf
) {
365 tmp_buf
= kmalloc(len
, GFP_NOFS
);
367 memcpy(tmp_buf
, p
->buf
, old_buf_len
);
369 tmp_buf
= krealloc(p
->buf
, len
, GFP_NOFS
);
375 * The real size of the buffer is bigger, this will let the fast path
376 * happen most of the time
378 p
->buf_len
= ksize(p
->buf
);
381 tmp_buf
= p
->buf
+ old_buf_len
- path_len
- 1;
382 p
->end
= p
->buf
+ p
->buf_len
- 1;
383 p
->start
= p
->end
- path_len
;
384 memmove(p
->start
, tmp_buf
, path_len
+ 1);
387 p
->end
= p
->start
+ path_len
;
392 static int fs_path_prepare_for_add(struct fs_path
*p
, int name_len
,
398 new_len
= p
->end
- p
->start
+ name_len
;
399 if (p
->start
!= p
->end
)
401 ret
= fs_path_ensure_buf(p
, new_len
);
406 if (p
->start
!= p
->end
)
408 p
->start
-= name_len
;
409 *prepared
= p
->start
;
411 if (p
->start
!= p
->end
)
422 static int fs_path_add(struct fs_path
*p
, const char *name
, int name_len
)
427 ret
= fs_path_prepare_for_add(p
, name_len
, &prepared
);
430 memcpy(prepared
, name
, name_len
);
436 static int fs_path_add_path(struct fs_path
*p
, struct fs_path
*p2
)
441 ret
= fs_path_prepare_for_add(p
, p2
->end
- p2
->start
, &prepared
);
444 memcpy(prepared
, p2
->start
, p2
->end
- p2
->start
);
450 static int fs_path_add_from_extent_buffer(struct fs_path
*p
,
451 struct extent_buffer
*eb
,
452 unsigned long off
, int len
)
457 ret
= fs_path_prepare_for_add(p
, len
, &prepared
);
461 read_extent_buffer(eb
, prepared
, off
, len
);
467 static int fs_path_copy(struct fs_path
*p
, struct fs_path
*from
)
471 p
->reversed
= from
->reversed
;
474 ret
= fs_path_add_path(p
, from
);
480 static void fs_path_unreverse(struct fs_path
*p
)
489 len
= p
->end
- p
->start
;
491 p
->end
= p
->start
+ len
;
492 memmove(p
->start
, tmp
, len
+ 1);
496 static struct btrfs_path
*alloc_path_for_send(void)
498 struct btrfs_path
*path
;
500 path
= btrfs_alloc_path();
503 path
->search_commit_root
= 1;
504 path
->skip_locking
= 1;
505 path
->need_commit_sem
= 1;
509 static int write_buf(struct file
*filp
, const void *buf
, u32 len
, loff_t
*off
)
519 ret
= vfs_write(filp
, (__force
const char __user
*)buf
+ pos
,
521 /* TODO handle that correctly */
522 /*if (ret == -ERESTARTSYS) {
541 static int tlv_put(struct send_ctx
*sctx
, u16 attr
, const void *data
, int len
)
543 struct btrfs_tlv_header
*hdr
;
544 int total_len
= sizeof(*hdr
) + len
;
545 int left
= sctx
->send_max_size
- sctx
->send_size
;
547 if (unlikely(left
< total_len
))
550 hdr
= (struct btrfs_tlv_header
*) (sctx
->send_buf
+ sctx
->send_size
);
551 hdr
->tlv_type
= cpu_to_le16(attr
);
552 hdr
->tlv_len
= cpu_to_le16(len
);
553 memcpy(hdr
+ 1, data
, len
);
554 sctx
->send_size
+= total_len
;
559 #define TLV_PUT_DEFINE_INT(bits) \
560 static int tlv_put_u##bits(struct send_ctx *sctx, \
561 u##bits attr, u##bits value) \
563 __le##bits __tmp = cpu_to_le##bits(value); \
564 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
567 TLV_PUT_DEFINE_INT(64)
569 static int tlv_put_string(struct send_ctx
*sctx
, u16 attr
,
570 const char *str
, int len
)
574 return tlv_put(sctx
, attr
, str
, len
);
577 static int tlv_put_uuid(struct send_ctx
*sctx
, u16 attr
,
580 return tlv_put(sctx
, attr
, uuid
, BTRFS_UUID_SIZE
);
583 static int tlv_put_btrfs_timespec(struct send_ctx
*sctx
, u16 attr
,
584 struct extent_buffer
*eb
,
585 struct btrfs_timespec
*ts
)
587 struct btrfs_timespec bts
;
588 read_extent_buffer(eb
, &bts
, (unsigned long)ts
, sizeof(bts
));
589 return tlv_put(sctx
, attr
, &bts
, sizeof(bts
));
593 #define TLV_PUT(sctx, attrtype, attrlen, data) \
595 ret = tlv_put(sctx, attrtype, attrlen, data); \
597 goto tlv_put_failure; \
600 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
602 ret = tlv_put_u##bits(sctx, attrtype, value); \
604 goto tlv_put_failure; \
607 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
608 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
609 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
610 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
611 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
613 ret = tlv_put_string(sctx, attrtype, str, len); \
615 goto tlv_put_failure; \
617 #define TLV_PUT_PATH(sctx, attrtype, p) \
619 ret = tlv_put_string(sctx, attrtype, p->start, \
620 p->end - p->start); \
622 goto tlv_put_failure; \
624 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
626 ret = tlv_put_uuid(sctx, attrtype, uuid); \
628 goto tlv_put_failure; \
630 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
632 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
634 goto tlv_put_failure; \
637 static int send_header(struct send_ctx
*sctx
)
639 struct btrfs_stream_header hdr
;
641 strcpy(hdr
.magic
, BTRFS_SEND_STREAM_MAGIC
);
642 hdr
.version
= cpu_to_le32(BTRFS_SEND_STREAM_VERSION
);
644 return write_buf(sctx
->send_filp
, &hdr
, sizeof(hdr
),
649 * For each command/item we want to send to userspace, we call this function.
651 static int begin_cmd(struct send_ctx
*sctx
, int cmd
)
653 struct btrfs_cmd_header
*hdr
;
655 if (WARN_ON(!sctx
->send_buf
))
658 BUG_ON(sctx
->send_size
);
660 sctx
->send_size
+= sizeof(*hdr
);
661 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
662 hdr
->cmd
= cpu_to_le16(cmd
);
667 static int send_cmd(struct send_ctx
*sctx
)
670 struct btrfs_cmd_header
*hdr
;
673 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
674 hdr
->len
= cpu_to_le32(sctx
->send_size
- sizeof(*hdr
));
677 crc
= btrfs_crc32c(0, (unsigned char *)sctx
->send_buf
, sctx
->send_size
);
678 hdr
->crc
= cpu_to_le32(crc
);
680 ret
= write_buf(sctx
->send_filp
, sctx
->send_buf
, sctx
->send_size
,
683 sctx
->total_send_size
+= sctx
->send_size
;
684 sctx
->cmd_send_size
[le16_to_cpu(hdr
->cmd
)] += sctx
->send_size
;
691 * Sends a move instruction to user space
693 static int send_rename(struct send_ctx
*sctx
,
694 struct fs_path
*from
, struct fs_path
*to
)
698 verbose_printk("btrfs: send_rename %s -> %s\n", from
->start
, to
->start
);
700 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RENAME
);
704 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, from
);
705 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_TO
, to
);
707 ret
= send_cmd(sctx
);
715 * Sends a link instruction to user space
717 static int send_link(struct send_ctx
*sctx
,
718 struct fs_path
*path
, struct fs_path
*lnk
)
722 verbose_printk("btrfs: send_link %s -> %s\n", path
->start
, lnk
->start
);
724 ret
= begin_cmd(sctx
, BTRFS_SEND_C_LINK
);
728 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
729 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, lnk
);
731 ret
= send_cmd(sctx
);
739 * Sends an unlink instruction to user space
741 static int send_unlink(struct send_ctx
*sctx
, struct fs_path
*path
)
745 verbose_printk("btrfs: send_unlink %s\n", path
->start
);
747 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UNLINK
);
751 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
753 ret
= send_cmd(sctx
);
761 * Sends a rmdir instruction to user space
763 static int send_rmdir(struct send_ctx
*sctx
, struct fs_path
*path
)
767 verbose_printk("btrfs: send_rmdir %s\n", path
->start
);
769 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RMDIR
);
773 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
775 ret
= send_cmd(sctx
);
783 * Helper function to retrieve some fields from an inode item.
785 static int __get_inode_info(struct btrfs_root
*root
, struct btrfs_path
*path
,
786 u64 ino
, u64
*size
, u64
*gen
, u64
*mode
, u64
*uid
,
790 struct btrfs_inode_item
*ii
;
791 struct btrfs_key key
;
794 key
.type
= BTRFS_INODE_ITEM_KEY
;
796 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
803 ii
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
804 struct btrfs_inode_item
);
806 *size
= btrfs_inode_size(path
->nodes
[0], ii
);
808 *gen
= btrfs_inode_generation(path
->nodes
[0], ii
);
810 *mode
= btrfs_inode_mode(path
->nodes
[0], ii
);
812 *uid
= btrfs_inode_uid(path
->nodes
[0], ii
);
814 *gid
= btrfs_inode_gid(path
->nodes
[0], ii
);
816 *rdev
= btrfs_inode_rdev(path
->nodes
[0], ii
);
821 static int get_inode_info(struct btrfs_root
*root
,
822 u64 ino
, u64
*size
, u64
*gen
,
823 u64
*mode
, u64
*uid
, u64
*gid
,
826 struct btrfs_path
*path
;
829 path
= alloc_path_for_send();
832 ret
= __get_inode_info(root
, path
, ino
, size
, gen
, mode
, uid
, gid
,
834 btrfs_free_path(path
);
838 typedef int (*iterate_inode_ref_t
)(int num
, u64 dir
, int index
,
843 * Helper function to iterate the entries in ONE btrfs_inode_ref or
844 * btrfs_inode_extref.
845 * The iterate callback may return a non zero value to stop iteration. This can
846 * be a negative value for error codes or 1 to simply stop it.
848 * path must point to the INODE_REF or INODE_EXTREF when called.
850 static int iterate_inode_ref(struct btrfs_root
*root
, struct btrfs_path
*path
,
851 struct btrfs_key
*found_key
, int resolve
,
852 iterate_inode_ref_t iterate
, void *ctx
)
854 struct extent_buffer
*eb
= path
->nodes
[0];
855 struct btrfs_item
*item
;
856 struct btrfs_inode_ref
*iref
;
857 struct btrfs_inode_extref
*extref
;
858 struct btrfs_path
*tmp_path
;
862 int slot
= path
->slots
[0];
869 unsigned long name_off
;
870 unsigned long elem_size
;
873 p
= fs_path_alloc_reversed();
877 tmp_path
= alloc_path_for_send();
884 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
885 ptr
= (unsigned long)btrfs_item_ptr(eb
, slot
,
886 struct btrfs_inode_ref
);
887 item
= btrfs_item_nr(slot
);
888 total
= btrfs_item_size(eb
, item
);
889 elem_size
= sizeof(*iref
);
891 ptr
= btrfs_item_ptr_offset(eb
, slot
);
892 total
= btrfs_item_size_nr(eb
, slot
);
893 elem_size
= sizeof(*extref
);
896 while (cur
< total
) {
899 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
900 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur
);
901 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
902 name_off
= (unsigned long)(iref
+ 1);
903 index
= btrfs_inode_ref_index(eb
, iref
);
904 dir
= found_key
->offset
;
906 extref
= (struct btrfs_inode_extref
*)(ptr
+ cur
);
907 name_len
= btrfs_inode_extref_name_len(eb
, extref
);
908 name_off
= (unsigned long)&extref
->name
;
909 index
= btrfs_inode_extref_index(eb
, extref
);
910 dir
= btrfs_inode_extref_parent(eb
, extref
);
914 start
= btrfs_ref_to_path(root
, tmp_path
, name_len
,
918 ret
= PTR_ERR(start
);
921 if (start
< p
->buf
) {
922 /* overflow , try again with larger buffer */
923 ret
= fs_path_ensure_buf(p
,
924 p
->buf_len
+ p
->buf
- start
);
927 start
= btrfs_ref_to_path(root
, tmp_path
,
932 ret
= PTR_ERR(start
);
935 BUG_ON(start
< p
->buf
);
939 ret
= fs_path_add_from_extent_buffer(p
, eb
, name_off
,
945 cur
+= elem_size
+ name_len
;
946 ret
= iterate(num
, dir
, index
, p
, ctx
);
953 btrfs_free_path(tmp_path
);
958 typedef int (*iterate_dir_item_t
)(int num
, struct btrfs_key
*di_key
,
959 const char *name
, int name_len
,
960 const char *data
, int data_len
,
964 * Helper function to iterate the entries in ONE btrfs_dir_item.
965 * The iterate callback may return a non zero value to stop iteration. This can
966 * be a negative value for error codes or 1 to simply stop it.
968 * path must point to the dir item when called.
970 static int iterate_dir_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
971 struct btrfs_key
*found_key
,
972 iterate_dir_item_t iterate
, void *ctx
)
975 struct extent_buffer
*eb
;
976 struct btrfs_item
*item
;
977 struct btrfs_dir_item
*di
;
978 struct btrfs_key di_key
;
991 * Start with a small buffer (1 page). If later we end up needing more
992 * space, which can happen for xattrs on a fs with a leaf size greater
993 * then the page size, attempt to increase the buffer. Typically xattr
997 buf
= kmalloc(buf_len
, GFP_NOFS
);
1003 eb
= path
->nodes
[0];
1004 slot
= path
->slots
[0];
1005 item
= btrfs_item_nr(slot
);
1006 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
1009 total
= btrfs_item_size(eb
, item
);
1012 while (cur
< total
) {
1013 name_len
= btrfs_dir_name_len(eb
, di
);
1014 data_len
= btrfs_dir_data_len(eb
, di
);
1015 type
= btrfs_dir_type(eb
, di
);
1016 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
1018 if (type
== BTRFS_FT_XATTR
) {
1019 if (name_len
> XATTR_NAME_MAX
) {
1020 ret
= -ENAMETOOLONG
;
1023 if (name_len
+ data_len
> BTRFS_MAX_XATTR_SIZE(root
)) {
1031 if (name_len
+ data_len
> PATH_MAX
) {
1032 ret
= -ENAMETOOLONG
;
1037 if (name_len
+ data_len
> buf_len
) {
1038 buf_len
= name_len
+ data_len
;
1039 if (is_vmalloc_addr(buf
)) {
1043 char *tmp
= krealloc(buf
, buf_len
,
1044 GFP_NOFS
| __GFP_NOWARN
);
1051 buf
= vmalloc(buf_len
);
1059 read_extent_buffer(eb
, buf
, (unsigned long)(di
+ 1),
1060 name_len
+ data_len
);
1062 len
= sizeof(*di
) + name_len
+ data_len
;
1063 di
= (struct btrfs_dir_item
*)((char *)di
+ len
);
1066 ret
= iterate(num
, &di_key
, buf
, name_len
, buf
+ name_len
,
1067 data_len
, type
, ctx
);
1083 static int __copy_first_ref(int num
, u64 dir
, int index
,
1084 struct fs_path
*p
, void *ctx
)
1087 struct fs_path
*pt
= ctx
;
1089 ret
= fs_path_copy(pt
, p
);
1093 /* we want the first only */
1098 * Retrieve the first path of an inode. If an inode has more then one
1099 * ref/hardlink, this is ignored.
1101 static int get_inode_path(struct btrfs_root
*root
,
1102 u64 ino
, struct fs_path
*path
)
1105 struct btrfs_key key
, found_key
;
1106 struct btrfs_path
*p
;
1108 p
= alloc_path_for_send();
1112 fs_path_reset(path
);
1115 key
.type
= BTRFS_INODE_REF_KEY
;
1118 ret
= btrfs_search_slot_for_read(root
, &key
, p
, 1, 0);
1125 btrfs_item_key_to_cpu(p
->nodes
[0], &found_key
, p
->slots
[0]);
1126 if (found_key
.objectid
!= ino
||
1127 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1128 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1133 ret
= iterate_inode_ref(root
, p
, &found_key
, 1,
1134 __copy_first_ref
, path
);
1144 struct backref_ctx
{
1145 struct send_ctx
*sctx
;
1147 struct btrfs_path
*path
;
1148 /* number of total found references */
1152 * used for clones found in send_root. clones found behind cur_objectid
1153 * and cur_offset are not considered as allowed clones.
1158 /* may be truncated in case it's the last extent in a file */
1161 /* data offset in the file extent item */
1164 /* Just to check for bugs in backref resolving */
1168 static int __clone_root_cmp_bsearch(const void *key
, const void *elt
)
1170 u64 root
= (u64
)(uintptr_t)key
;
1171 struct clone_root
*cr
= (struct clone_root
*)elt
;
1173 if (root
< cr
->root
->objectid
)
1175 if (root
> cr
->root
->objectid
)
1180 static int __clone_root_cmp_sort(const void *e1
, const void *e2
)
1182 struct clone_root
*cr1
= (struct clone_root
*)e1
;
1183 struct clone_root
*cr2
= (struct clone_root
*)e2
;
1185 if (cr1
->root
->objectid
< cr2
->root
->objectid
)
1187 if (cr1
->root
->objectid
> cr2
->root
->objectid
)
1193 * Called for every backref that is found for the current extent.
1194 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1196 static int __iterate_backrefs(u64 ino
, u64 offset
, u64 root
, void *ctx_
)
1198 struct backref_ctx
*bctx
= ctx_
;
1199 struct clone_root
*found
;
1203 /* First check if the root is in the list of accepted clone sources */
1204 found
= bsearch((void *)(uintptr_t)root
, bctx
->sctx
->clone_roots
,
1205 bctx
->sctx
->clone_roots_cnt
,
1206 sizeof(struct clone_root
),
1207 __clone_root_cmp_bsearch
);
1211 if (found
->root
== bctx
->sctx
->send_root
&&
1212 ino
== bctx
->cur_objectid
&&
1213 offset
== bctx
->cur_offset
) {
1214 bctx
->found_itself
= 1;
1218 * There are inodes that have extents that lie behind its i_size. Don't
1219 * accept clones from these extents.
1221 ret
= __get_inode_info(found
->root
, bctx
->path
, ino
, &i_size
, NULL
, NULL
,
1223 btrfs_release_path(bctx
->path
);
1227 if (offset
+ bctx
->data_offset
+ bctx
->extent_len
> i_size
)
1231 * Make sure we don't consider clones from send_root that are
1232 * behind the current inode/offset.
1234 if (found
->root
== bctx
->sctx
->send_root
) {
1236 * TODO for the moment we don't accept clones from the inode
1237 * that is currently send. We may change this when
1238 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1241 if (ino
>= bctx
->cur_objectid
)
1244 if (ino
> bctx
->cur_objectid
)
1246 if (offset
+ bctx
->extent_len
> bctx
->cur_offset
)
1252 found
->found_refs
++;
1253 if (ino
< found
->ino
) {
1255 found
->offset
= offset
;
1256 } else if (found
->ino
== ino
) {
1258 * same extent found more then once in the same file.
1260 if (found
->offset
> offset
+ bctx
->extent_len
)
1261 found
->offset
= offset
;
1268 * Given an inode, offset and extent item, it finds a good clone for a clone
1269 * instruction. Returns -ENOENT when none could be found. The function makes
1270 * sure that the returned clone is usable at the point where sending is at the
1271 * moment. This means, that no clones are accepted which lie behind the current
1274 * path must point to the extent item when called.
1276 static int find_extent_clone(struct send_ctx
*sctx
,
1277 struct btrfs_path
*path
,
1278 u64 ino
, u64 data_offset
,
1280 struct clone_root
**found
)
1287 u64 extent_item_pos
;
1289 struct btrfs_file_extent_item
*fi
;
1290 struct extent_buffer
*eb
= path
->nodes
[0];
1291 struct backref_ctx
*backref_ctx
= NULL
;
1292 struct clone_root
*cur_clone_root
;
1293 struct btrfs_key found_key
;
1294 struct btrfs_path
*tmp_path
;
1298 tmp_path
= alloc_path_for_send();
1302 /* We only use this path under the commit sem */
1303 tmp_path
->need_commit_sem
= 0;
1305 backref_ctx
= kmalloc(sizeof(*backref_ctx
), GFP_NOFS
);
1311 backref_ctx
->path
= tmp_path
;
1313 if (data_offset
>= ino_size
) {
1315 * There may be extents that lie behind the file's size.
1316 * I at least had this in combination with snapshotting while
1317 * writing large files.
1323 fi
= btrfs_item_ptr(eb
, path
->slots
[0],
1324 struct btrfs_file_extent_item
);
1325 extent_type
= btrfs_file_extent_type(eb
, fi
);
1326 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
1330 compressed
= btrfs_file_extent_compression(eb
, fi
);
1332 num_bytes
= btrfs_file_extent_num_bytes(eb
, fi
);
1333 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1334 if (disk_byte
== 0) {
1338 logical
= disk_byte
+ btrfs_file_extent_offset(eb
, fi
);
1340 down_read(&sctx
->send_root
->fs_info
->commit_root_sem
);
1341 ret
= extent_from_logical(sctx
->send_root
->fs_info
, disk_byte
, tmp_path
,
1342 &found_key
, &flags
);
1343 up_read(&sctx
->send_root
->fs_info
->commit_root_sem
);
1344 btrfs_release_path(tmp_path
);
1348 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1354 * Setup the clone roots.
1356 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1357 cur_clone_root
= sctx
->clone_roots
+ i
;
1358 cur_clone_root
->ino
= (u64
)-1;
1359 cur_clone_root
->offset
= 0;
1360 cur_clone_root
->found_refs
= 0;
1363 backref_ctx
->sctx
= sctx
;
1364 backref_ctx
->found
= 0;
1365 backref_ctx
->cur_objectid
= ino
;
1366 backref_ctx
->cur_offset
= data_offset
;
1367 backref_ctx
->found_itself
= 0;
1368 backref_ctx
->extent_len
= num_bytes
;
1370 * For non-compressed extents iterate_extent_inodes() gives us extent
1371 * offsets that already take into account the data offset, but not for
1372 * compressed extents, since the offset is logical and not relative to
1373 * the physical extent locations. We must take this into account to
1374 * avoid sending clone offsets that go beyond the source file's size,
1375 * which would result in the clone ioctl failing with -EINVAL on the
1378 if (compressed
== BTRFS_COMPRESS_NONE
)
1379 backref_ctx
->data_offset
= 0;
1381 backref_ctx
->data_offset
= btrfs_file_extent_offset(eb
, fi
);
1384 * The last extent of a file may be too large due to page alignment.
1385 * We need to adjust extent_len in this case so that the checks in
1386 * __iterate_backrefs work.
1388 if (data_offset
+ num_bytes
>= ino_size
)
1389 backref_ctx
->extent_len
= ino_size
- data_offset
;
1392 * Now collect all backrefs.
1394 if (compressed
== BTRFS_COMPRESS_NONE
)
1395 extent_item_pos
= logical
- found_key
.objectid
;
1397 extent_item_pos
= 0;
1398 ret
= iterate_extent_inodes(sctx
->send_root
->fs_info
,
1399 found_key
.objectid
, extent_item_pos
, 1,
1400 __iterate_backrefs
, backref_ctx
);
1405 if (!backref_ctx
->found_itself
) {
1406 /* found a bug in backref code? */
1408 btrfs_err(sctx
->send_root
->fs_info
, "did not find backref in "
1409 "send_root. inode=%llu, offset=%llu, "
1410 "disk_byte=%llu found extent=%llu",
1411 ino
, data_offset
, disk_byte
, found_key
.objectid
);
1415 verbose_printk(KERN_DEBUG
"btrfs: find_extent_clone: data_offset=%llu, "
1417 "num_bytes=%llu, logical=%llu\n",
1418 data_offset
, ino
, num_bytes
, logical
);
1420 if (!backref_ctx
->found
)
1421 verbose_printk("btrfs: no clones found\n");
1423 cur_clone_root
= NULL
;
1424 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1425 if (sctx
->clone_roots
[i
].found_refs
) {
1426 if (!cur_clone_root
)
1427 cur_clone_root
= sctx
->clone_roots
+ i
;
1428 else if (sctx
->clone_roots
[i
].root
== sctx
->send_root
)
1429 /* prefer clones from send_root over others */
1430 cur_clone_root
= sctx
->clone_roots
+ i
;
1435 if (cur_clone_root
) {
1436 if (compressed
!= BTRFS_COMPRESS_NONE
) {
1438 * Offsets given by iterate_extent_inodes() are relative
1439 * to the start of the extent, we need to add logical
1440 * offset from the file extent item.
1441 * (See why at backref.c:check_extent_in_eb())
1443 cur_clone_root
->offset
+= btrfs_file_extent_offset(eb
,
1446 *found
= cur_clone_root
;
1453 btrfs_free_path(tmp_path
);
1458 static int read_symlink(struct btrfs_root
*root
,
1460 struct fs_path
*dest
)
1463 struct btrfs_path
*path
;
1464 struct btrfs_key key
;
1465 struct btrfs_file_extent_item
*ei
;
1471 path
= alloc_path_for_send();
1476 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1478 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1483 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1484 struct btrfs_file_extent_item
);
1485 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
1486 compression
= btrfs_file_extent_compression(path
->nodes
[0], ei
);
1487 BUG_ON(type
!= BTRFS_FILE_EXTENT_INLINE
);
1488 BUG_ON(compression
);
1490 off
= btrfs_file_extent_inline_start(ei
);
1491 len
= btrfs_file_extent_inline_len(path
->nodes
[0], path
->slots
[0], ei
);
1493 ret
= fs_path_add_from_extent_buffer(dest
, path
->nodes
[0], off
, len
);
1496 btrfs_free_path(path
);
1501 * Helper function to generate a file name that is unique in the root of
1502 * send_root and parent_root. This is used to generate names for orphan inodes.
1504 static int gen_unique_name(struct send_ctx
*sctx
,
1506 struct fs_path
*dest
)
1509 struct btrfs_path
*path
;
1510 struct btrfs_dir_item
*di
;
1515 path
= alloc_path_for_send();
1520 len
= snprintf(tmp
, sizeof(tmp
), "o%llu-%llu-%llu",
1522 ASSERT(len
< sizeof(tmp
));
1524 di
= btrfs_lookup_dir_item(NULL
, sctx
->send_root
,
1525 path
, BTRFS_FIRST_FREE_OBJECTID
,
1526 tmp
, strlen(tmp
), 0);
1527 btrfs_release_path(path
);
1533 /* not unique, try again */
1538 if (!sctx
->parent_root
) {
1544 di
= btrfs_lookup_dir_item(NULL
, sctx
->parent_root
,
1545 path
, BTRFS_FIRST_FREE_OBJECTID
,
1546 tmp
, strlen(tmp
), 0);
1547 btrfs_release_path(path
);
1553 /* not unique, try again */
1561 ret
= fs_path_add(dest
, tmp
, strlen(tmp
));
1564 btrfs_free_path(path
);
1569 inode_state_no_change
,
1570 inode_state_will_create
,
1571 inode_state_did_create
,
1572 inode_state_will_delete
,
1573 inode_state_did_delete
,
1576 static int get_cur_inode_state(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1584 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &left_gen
, NULL
, NULL
,
1586 if (ret
< 0 && ret
!= -ENOENT
)
1590 if (!sctx
->parent_root
) {
1591 right_ret
= -ENOENT
;
1593 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
, &right_gen
,
1594 NULL
, NULL
, NULL
, NULL
);
1595 if (ret
< 0 && ret
!= -ENOENT
)
1600 if (!left_ret
&& !right_ret
) {
1601 if (left_gen
== gen
&& right_gen
== gen
) {
1602 ret
= inode_state_no_change
;
1603 } else if (left_gen
== gen
) {
1604 if (ino
< sctx
->send_progress
)
1605 ret
= inode_state_did_create
;
1607 ret
= inode_state_will_create
;
1608 } else if (right_gen
== gen
) {
1609 if (ino
< sctx
->send_progress
)
1610 ret
= inode_state_did_delete
;
1612 ret
= inode_state_will_delete
;
1616 } else if (!left_ret
) {
1617 if (left_gen
== gen
) {
1618 if (ino
< sctx
->send_progress
)
1619 ret
= inode_state_did_create
;
1621 ret
= inode_state_will_create
;
1625 } else if (!right_ret
) {
1626 if (right_gen
== gen
) {
1627 if (ino
< sctx
->send_progress
)
1628 ret
= inode_state_did_delete
;
1630 ret
= inode_state_will_delete
;
1642 static int is_inode_existent(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1646 ret
= get_cur_inode_state(sctx
, ino
, gen
);
1650 if (ret
== inode_state_no_change
||
1651 ret
== inode_state_did_create
||
1652 ret
== inode_state_will_delete
)
1662 * Helper function to lookup a dir item in a dir.
1664 static int lookup_dir_item_inode(struct btrfs_root
*root
,
1665 u64 dir
, const char *name
, int name_len
,
1670 struct btrfs_dir_item
*di
;
1671 struct btrfs_key key
;
1672 struct btrfs_path
*path
;
1674 path
= alloc_path_for_send();
1678 di
= btrfs_lookup_dir_item(NULL
, root
, path
,
1679 dir
, name
, name_len
, 0);
1688 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
1689 if (key
.type
== BTRFS_ROOT_ITEM_KEY
) {
1693 *found_inode
= key
.objectid
;
1694 *found_type
= btrfs_dir_type(path
->nodes
[0], di
);
1697 btrfs_free_path(path
);
1702 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1703 * generation of the parent dir and the name of the dir entry.
1705 static int get_first_ref(struct btrfs_root
*root
, u64 ino
,
1706 u64
*dir
, u64
*dir_gen
, struct fs_path
*name
)
1709 struct btrfs_key key
;
1710 struct btrfs_key found_key
;
1711 struct btrfs_path
*path
;
1715 path
= alloc_path_for_send();
1720 key
.type
= BTRFS_INODE_REF_KEY
;
1723 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 1, 0);
1727 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1729 if (ret
|| found_key
.objectid
!= ino
||
1730 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1731 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1736 if (found_key
.type
== BTRFS_INODE_REF_KEY
) {
1737 struct btrfs_inode_ref
*iref
;
1738 iref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1739 struct btrfs_inode_ref
);
1740 len
= btrfs_inode_ref_name_len(path
->nodes
[0], iref
);
1741 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1742 (unsigned long)(iref
+ 1),
1744 parent_dir
= found_key
.offset
;
1746 struct btrfs_inode_extref
*extref
;
1747 extref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1748 struct btrfs_inode_extref
);
1749 len
= btrfs_inode_extref_name_len(path
->nodes
[0], extref
);
1750 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1751 (unsigned long)&extref
->name
, len
);
1752 parent_dir
= btrfs_inode_extref_parent(path
->nodes
[0], extref
);
1756 btrfs_release_path(path
);
1759 ret
= get_inode_info(root
, parent_dir
, NULL
, dir_gen
, NULL
,
1768 btrfs_free_path(path
);
1772 static int is_first_ref(struct btrfs_root
*root
,
1774 const char *name
, int name_len
)
1777 struct fs_path
*tmp_name
;
1780 tmp_name
= fs_path_alloc();
1784 ret
= get_first_ref(root
, ino
, &tmp_dir
, NULL
, tmp_name
);
1788 if (dir
!= tmp_dir
|| name_len
!= fs_path_len(tmp_name
)) {
1793 ret
= !memcmp(tmp_name
->start
, name
, name_len
);
1796 fs_path_free(tmp_name
);
1801 * Used by process_recorded_refs to determine if a new ref would overwrite an
1802 * already existing ref. In case it detects an overwrite, it returns the
1803 * inode/gen in who_ino/who_gen.
1804 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1805 * to make sure later references to the overwritten inode are possible.
1806 * Orphanizing is however only required for the first ref of an inode.
1807 * process_recorded_refs does an additional is_first_ref check to see if
1808 * orphanizing is really required.
1810 static int will_overwrite_ref(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
1811 const char *name
, int name_len
,
1812 u64
*who_ino
, u64
*who_gen
)
1816 u64 other_inode
= 0;
1819 if (!sctx
->parent_root
)
1822 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1827 * If we have a parent root we need to verify that the parent dir was
1828 * not delted and then re-created, if it was then we have no overwrite
1829 * and we can just unlink this entry.
1831 if (sctx
->parent_root
) {
1832 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &gen
, NULL
,
1834 if (ret
< 0 && ret
!= -ENOENT
)
1844 ret
= lookup_dir_item_inode(sctx
->parent_root
, dir
, name
, name_len
,
1845 &other_inode
, &other_type
);
1846 if (ret
< 0 && ret
!= -ENOENT
)
1854 * Check if the overwritten ref was already processed. If yes, the ref
1855 * was already unlinked/moved, so we can safely assume that we will not
1856 * overwrite anything at this point in time.
1858 if (other_inode
> sctx
->send_progress
) {
1859 ret
= get_inode_info(sctx
->parent_root
, other_inode
, NULL
,
1860 who_gen
, NULL
, NULL
, NULL
, NULL
);
1865 *who_ino
= other_inode
;
1875 * Checks if the ref was overwritten by an already processed inode. This is
1876 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1877 * thus the orphan name needs be used.
1878 * process_recorded_refs also uses it to avoid unlinking of refs that were
1881 static int did_overwrite_ref(struct send_ctx
*sctx
,
1882 u64 dir
, u64 dir_gen
,
1883 u64 ino
, u64 ino_gen
,
1884 const char *name
, int name_len
)
1891 if (!sctx
->parent_root
)
1894 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1898 /* check if the ref was overwritten by another ref */
1899 ret
= lookup_dir_item_inode(sctx
->send_root
, dir
, name
, name_len
,
1900 &ow_inode
, &other_type
);
1901 if (ret
< 0 && ret
!= -ENOENT
)
1904 /* was never and will never be overwritten */
1909 ret
= get_inode_info(sctx
->send_root
, ow_inode
, NULL
, &gen
, NULL
, NULL
,
1914 if (ow_inode
== ino
&& gen
== ino_gen
) {
1919 /* we know that it is or will be overwritten. check this now */
1920 if (ow_inode
< sctx
->send_progress
)
1930 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1931 * that got overwritten. This is used by process_recorded_refs to determine
1932 * if it has to use the path as returned by get_cur_path or the orphan name.
1934 static int did_overwrite_first_ref(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1937 struct fs_path
*name
= NULL
;
1941 if (!sctx
->parent_root
)
1944 name
= fs_path_alloc();
1948 ret
= get_first_ref(sctx
->parent_root
, ino
, &dir
, &dir_gen
, name
);
1952 ret
= did_overwrite_ref(sctx
, dir
, dir_gen
, ino
, gen
,
1953 name
->start
, fs_path_len(name
));
1961 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1962 * so we need to do some special handling in case we have clashes. This function
1963 * takes care of this with the help of name_cache_entry::radix_list.
1964 * In case of error, nce is kfreed.
1966 static int name_cache_insert(struct send_ctx
*sctx
,
1967 struct name_cache_entry
*nce
)
1970 struct list_head
*nce_head
;
1972 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
1973 (unsigned long)nce
->ino
);
1975 nce_head
= kmalloc(sizeof(*nce_head
), GFP_NOFS
);
1980 INIT_LIST_HEAD(nce_head
);
1982 ret
= radix_tree_insert(&sctx
->name_cache
, nce
->ino
, nce_head
);
1989 list_add_tail(&nce
->radix_list
, nce_head
);
1990 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
1991 sctx
->name_cache_size
++;
1996 static void name_cache_delete(struct send_ctx
*sctx
,
1997 struct name_cache_entry
*nce
)
1999 struct list_head
*nce_head
;
2001 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
2002 (unsigned long)nce
->ino
);
2004 btrfs_err(sctx
->send_root
->fs_info
,
2005 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2006 nce
->ino
, sctx
->name_cache_size
);
2009 list_del(&nce
->radix_list
);
2010 list_del(&nce
->list
);
2011 sctx
->name_cache_size
--;
2014 * We may not get to the final release of nce_head if the lookup fails
2016 if (nce_head
&& list_empty(nce_head
)) {
2017 radix_tree_delete(&sctx
->name_cache
, (unsigned long)nce
->ino
);
2022 static struct name_cache_entry
*name_cache_search(struct send_ctx
*sctx
,
2025 struct list_head
*nce_head
;
2026 struct name_cache_entry
*cur
;
2028 nce_head
= radix_tree_lookup(&sctx
->name_cache
, (unsigned long)ino
);
2032 list_for_each_entry(cur
, nce_head
, radix_list
) {
2033 if (cur
->ino
== ino
&& cur
->gen
== gen
)
2040 * Removes the entry from the list and adds it back to the end. This marks the
2041 * entry as recently used so that name_cache_clean_unused does not remove it.
2043 static void name_cache_used(struct send_ctx
*sctx
, struct name_cache_entry
*nce
)
2045 list_del(&nce
->list
);
2046 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
2050 * Remove some entries from the beginning of name_cache_list.
2052 static void name_cache_clean_unused(struct send_ctx
*sctx
)
2054 struct name_cache_entry
*nce
;
2056 if (sctx
->name_cache_size
< SEND_CTX_NAME_CACHE_CLEAN_SIZE
)
2059 while (sctx
->name_cache_size
> SEND_CTX_MAX_NAME_CACHE_SIZE
) {
2060 nce
= list_entry(sctx
->name_cache_list
.next
,
2061 struct name_cache_entry
, list
);
2062 name_cache_delete(sctx
, nce
);
2067 static void name_cache_free(struct send_ctx
*sctx
)
2069 struct name_cache_entry
*nce
;
2071 while (!list_empty(&sctx
->name_cache_list
)) {
2072 nce
= list_entry(sctx
->name_cache_list
.next
,
2073 struct name_cache_entry
, list
);
2074 name_cache_delete(sctx
, nce
);
2080 * Used by get_cur_path for each ref up to the root.
2081 * Returns 0 if it succeeded.
2082 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2083 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2084 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2085 * Returns <0 in case of error.
2087 static int __get_cur_name_and_parent(struct send_ctx
*sctx
,
2091 struct fs_path
*dest
)
2095 struct name_cache_entry
*nce
= NULL
;
2098 * First check if we already did a call to this function with the same
2099 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2100 * return the cached result.
2102 nce
= name_cache_search(sctx
, ino
, gen
);
2104 if (ino
< sctx
->send_progress
&& nce
->need_later_update
) {
2105 name_cache_delete(sctx
, nce
);
2109 name_cache_used(sctx
, nce
);
2110 *parent_ino
= nce
->parent_ino
;
2111 *parent_gen
= nce
->parent_gen
;
2112 ret
= fs_path_add(dest
, nce
->name
, nce
->name_len
);
2121 * If the inode is not existent yet, add the orphan name and return 1.
2122 * This should only happen for the parent dir that we determine in
2125 ret
= is_inode_existent(sctx
, ino
, gen
);
2130 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2138 * Depending on whether the inode was already processed or not, use
2139 * send_root or parent_root for ref lookup.
2141 if (ino
< sctx
->send_progress
)
2142 ret
= get_first_ref(sctx
->send_root
, ino
,
2143 parent_ino
, parent_gen
, dest
);
2145 ret
= get_first_ref(sctx
->parent_root
, ino
,
2146 parent_ino
, parent_gen
, dest
);
2151 * Check if the ref was overwritten by an inode's ref that was processed
2152 * earlier. If yes, treat as orphan and return 1.
2154 ret
= did_overwrite_ref(sctx
, *parent_ino
, *parent_gen
, ino
, gen
,
2155 dest
->start
, dest
->end
- dest
->start
);
2159 fs_path_reset(dest
);
2160 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2168 * Store the result of the lookup in the name cache.
2170 nce
= kmalloc(sizeof(*nce
) + fs_path_len(dest
) + 1, GFP_NOFS
);
2178 nce
->parent_ino
= *parent_ino
;
2179 nce
->parent_gen
= *parent_gen
;
2180 nce
->name_len
= fs_path_len(dest
);
2182 strcpy(nce
->name
, dest
->start
);
2184 if (ino
< sctx
->send_progress
)
2185 nce
->need_later_update
= 0;
2187 nce
->need_later_update
= 1;
2189 nce_ret
= name_cache_insert(sctx
, nce
);
2192 name_cache_clean_unused(sctx
);
2199 * Magic happens here. This function returns the first ref to an inode as it
2200 * would look like while receiving the stream at this point in time.
2201 * We walk the path up to the root. For every inode in between, we check if it
2202 * was already processed/sent. If yes, we continue with the parent as found
2203 * in send_root. If not, we continue with the parent as found in parent_root.
2204 * If we encounter an inode that was deleted at this point in time, we use the
2205 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2206 * that were not created yet and overwritten inodes/refs.
2208 * When do we have have orphan inodes:
2209 * 1. When an inode is freshly created and thus no valid refs are available yet
2210 * 2. When a directory lost all it's refs (deleted) but still has dir items
2211 * inside which were not processed yet (pending for move/delete). If anyone
2212 * tried to get the path to the dir items, it would get a path inside that
2214 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2215 * of an unprocessed inode. If in that case the first ref would be
2216 * overwritten, the overwritten inode gets "orphanized". Later when we
2217 * process this overwritten inode, it is restored at a new place by moving
2220 * sctx->send_progress tells this function at which point in time receiving
2223 static int get_cur_path(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2224 struct fs_path
*dest
)
2227 struct fs_path
*name
= NULL
;
2228 u64 parent_inode
= 0;
2232 name
= fs_path_alloc();
2239 fs_path_reset(dest
);
2241 while (!stop
&& ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
2242 fs_path_reset(name
);
2244 if (is_waiting_for_rm(sctx
, ino
)) {
2245 ret
= gen_unique_name(sctx
, ino
, gen
, name
);
2248 ret
= fs_path_add_path(dest
, name
);
2252 if (is_waiting_for_move(sctx
, ino
)) {
2253 ret
= get_first_ref(sctx
->parent_root
, ino
,
2254 &parent_inode
, &parent_gen
, name
);
2256 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
,
2266 ret
= fs_path_add_path(dest
, name
);
2277 fs_path_unreverse(dest
);
2282 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2284 static int send_subvol_begin(struct send_ctx
*sctx
)
2287 struct btrfs_root
*send_root
= sctx
->send_root
;
2288 struct btrfs_root
*parent_root
= sctx
->parent_root
;
2289 struct btrfs_path
*path
;
2290 struct btrfs_key key
;
2291 struct btrfs_root_ref
*ref
;
2292 struct extent_buffer
*leaf
;
2296 path
= btrfs_alloc_path();
2300 name
= kmalloc(BTRFS_PATH_NAME_MAX
, GFP_NOFS
);
2302 btrfs_free_path(path
);
2306 key
.objectid
= send_root
->objectid
;
2307 key
.type
= BTRFS_ROOT_BACKREF_KEY
;
2310 ret
= btrfs_search_slot_for_read(send_root
->fs_info
->tree_root
,
2319 leaf
= path
->nodes
[0];
2320 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2321 if (key
.type
!= BTRFS_ROOT_BACKREF_KEY
||
2322 key
.objectid
!= send_root
->objectid
) {
2326 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
2327 namelen
= btrfs_root_ref_name_len(leaf
, ref
);
2328 read_extent_buffer(leaf
, name
, (unsigned long)(ref
+ 1), namelen
);
2329 btrfs_release_path(path
);
2332 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SNAPSHOT
);
2336 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SUBVOL
);
2341 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_PATH
, name
, namelen
);
2342 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2343 sctx
->send_root
->root_item
.uuid
);
2344 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CTRANSID
,
2345 le64_to_cpu(sctx
->send_root
->root_item
.ctransid
));
2347 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2348 sctx
->parent_root
->root_item
.uuid
);
2349 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
2350 le64_to_cpu(sctx
->parent_root
->root_item
.ctransid
));
2353 ret
= send_cmd(sctx
);
2357 btrfs_free_path(path
);
2362 static int send_truncate(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 size
)
2367 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino
, size
);
2369 p
= fs_path_alloc();
2373 ret
= begin_cmd(sctx
, BTRFS_SEND_C_TRUNCATE
);
2377 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2380 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2381 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, size
);
2383 ret
= send_cmd(sctx
);
2391 static int send_chmod(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 mode
)
2396 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino
, mode
);
2398 p
= fs_path_alloc();
2402 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHMOD
);
2406 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2409 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2410 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
& 07777);
2412 ret
= send_cmd(sctx
);
2420 static int send_chown(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 uid
, u64 gid
)
2425 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino
, uid
, gid
);
2427 p
= fs_path_alloc();
2431 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHOWN
);
2435 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2438 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2439 TLV_PUT_U64(sctx
, BTRFS_SEND_A_UID
, uid
);
2440 TLV_PUT_U64(sctx
, BTRFS_SEND_A_GID
, gid
);
2442 ret
= send_cmd(sctx
);
2450 static int send_utimes(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
2453 struct fs_path
*p
= NULL
;
2454 struct btrfs_inode_item
*ii
;
2455 struct btrfs_path
*path
= NULL
;
2456 struct extent_buffer
*eb
;
2457 struct btrfs_key key
;
2460 verbose_printk("btrfs: send_utimes %llu\n", ino
);
2462 p
= fs_path_alloc();
2466 path
= alloc_path_for_send();
2473 key
.type
= BTRFS_INODE_ITEM_KEY
;
2475 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2479 eb
= path
->nodes
[0];
2480 slot
= path
->slots
[0];
2481 ii
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
2483 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UTIMES
);
2487 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2490 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2491 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_ATIME
, eb
, &ii
->atime
);
2492 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_MTIME
, eb
, &ii
->mtime
);
2493 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_CTIME
, eb
, &ii
->ctime
);
2494 /* TODO Add otime support when the otime patches get into upstream */
2496 ret
= send_cmd(sctx
);
2501 btrfs_free_path(path
);
2506 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2507 * a valid path yet because we did not process the refs yet. So, the inode
2508 * is created as orphan.
2510 static int send_create_inode(struct send_ctx
*sctx
, u64 ino
)
2519 verbose_printk("btrfs: send_create_inode %llu\n", ino
);
2521 p
= fs_path_alloc();
2525 if (ino
!= sctx
->cur_ino
) {
2526 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &gen
, &mode
,
2531 gen
= sctx
->cur_inode_gen
;
2532 mode
= sctx
->cur_inode_mode
;
2533 rdev
= sctx
->cur_inode_rdev
;
2536 if (S_ISREG(mode
)) {
2537 cmd
= BTRFS_SEND_C_MKFILE
;
2538 } else if (S_ISDIR(mode
)) {
2539 cmd
= BTRFS_SEND_C_MKDIR
;
2540 } else if (S_ISLNK(mode
)) {
2541 cmd
= BTRFS_SEND_C_SYMLINK
;
2542 } else if (S_ISCHR(mode
) || S_ISBLK(mode
)) {
2543 cmd
= BTRFS_SEND_C_MKNOD
;
2544 } else if (S_ISFIFO(mode
)) {
2545 cmd
= BTRFS_SEND_C_MKFIFO
;
2546 } else if (S_ISSOCK(mode
)) {
2547 cmd
= BTRFS_SEND_C_MKSOCK
;
2549 printk(KERN_WARNING
"btrfs: unexpected inode type %o",
2550 (int)(mode
& S_IFMT
));
2555 ret
= begin_cmd(sctx
, cmd
);
2559 ret
= gen_unique_name(sctx
, ino
, gen
, p
);
2563 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2564 TLV_PUT_U64(sctx
, BTRFS_SEND_A_INO
, ino
);
2566 if (S_ISLNK(mode
)) {
2568 ret
= read_symlink(sctx
->send_root
, ino
, p
);
2571 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, p
);
2572 } else if (S_ISCHR(mode
) || S_ISBLK(mode
) ||
2573 S_ISFIFO(mode
) || S_ISSOCK(mode
)) {
2574 TLV_PUT_U64(sctx
, BTRFS_SEND_A_RDEV
, new_encode_dev(rdev
));
2575 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
);
2578 ret
= send_cmd(sctx
);
2590 * We need some special handling for inodes that get processed before the parent
2591 * directory got created. See process_recorded_refs for details.
2592 * This function does the check if we already created the dir out of order.
2594 static int did_create_dir(struct send_ctx
*sctx
, u64 dir
)
2597 struct btrfs_path
*path
= NULL
;
2598 struct btrfs_key key
;
2599 struct btrfs_key found_key
;
2600 struct btrfs_key di_key
;
2601 struct extent_buffer
*eb
;
2602 struct btrfs_dir_item
*di
;
2605 path
= alloc_path_for_send();
2612 key
.type
= BTRFS_DIR_INDEX_KEY
;
2614 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2619 eb
= path
->nodes
[0];
2620 slot
= path
->slots
[0];
2621 if (slot
>= btrfs_header_nritems(eb
)) {
2622 ret
= btrfs_next_leaf(sctx
->send_root
, path
);
2625 } else if (ret
> 0) {
2632 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
2633 if (found_key
.objectid
!= key
.objectid
||
2634 found_key
.type
!= key
.type
) {
2639 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
2640 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
2642 if (di_key
.type
!= BTRFS_ROOT_ITEM_KEY
&&
2643 di_key
.objectid
< sctx
->send_progress
) {
2652 btrfs_free_path(path
);
2657 * Only creates the inode if it is:
2658 * 1. Not a directory
2659 * 2. Or a directory which was not created already due to out of order
2660 * directories. See did_create_dir and process_recorded_refs for details.
2662 static int send_create_inode_if_needed(struct send_ctx
*sctx
)
2666 if (S_ISDIR(sctx
->cur_inode_mode
)) {
2667 ret
= did_create_dir(sctx
, sctx
->cur_ino
);
2676 ret
= send_create_inode(sctx
, sctx
->cur_ino
);
2684 struct recorded_ref
{
2685 struct list_head list
;
2688 struct fs_path
*full_path
;
2696 * We need to process new refs before deleted refs, but compare_tree gives us
2697 * everything mixed. So we first record all refs and later process them.
2698 * This function is a helper to record one ref.
2700 static int __record_ref(struct list_head
*head
, u64 dir
,
2701 u64 dir_gen
, struct fs_path
*path
)
2703 struct recorded_ref
*ref
;
2705 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
2710 ref
->dir_gen
= dir_gen
;
2711 ref
->full_path
= path
;
2713 ref
->name
= (char *)kbasename(ref
->full_path
->start
);
2714 ref
->name_len
= ref
->full_path
->end
- ref
->name
;
2715 ref
->dir_path
= ref
->full_path
->start
;
2716 if (ref
->name
== ref
->full_path
->start
)
2717 ref
->dir_path_len
= 0;
2719 ref
->dir_path_len
= ref
->full_path
->end
-
2720 ref
->full_path
->start
- 1 - ref
->name_len
;
2722 list_add_tail(&ref
->list
, head
);
2726 static int dup_ref(struct recorded_ref
*ref
, struct list_head
*list
)
2728 struct recorded_ref
*new;
2730 new = kmalloc(sizeof(*ref
), GFP_NOFS
);
2734 new->dir
= ref
->dir
;
2735 new->dir_gen
= ref
->dir_gen
;
2736 new->full_path
= NULL
;
2737 INIT_LIST_HEAD(&new->list
);
2738 list_add_tail(&new->list
, list
);
2742 static void __free_recorded_refs(struct list_head
*head
)
2744 struct recorded_ref
*cur
;
2746 while (!list_empty(head
)) {
2747 cur
= list_entry(head
->next
, struct recorded_ref
, list
);
2748 fs_path_free(cur
->full_path
);
2749 list_del(&cur
->list
);
2754 static void free_recorded_refs(struct send_ctx
*sctx
)
2756 __free_recorded_refs(&sctx
->new_refs
);
2757 __free_recorded_refs(&sctx
->deleted_refs
);
2761 * Renames/moves a file/dir to its orphan name. Used when the first
2762 * ref of an unprocessed inode gets overwritten and for all non empty
2765 static int orphanize_inode(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2766 struct fs_path
*path
)
2769 struct fs_path
*orphan
;
2771 orphan
= fs_path_alloc();
2775 ret
= gen_unique_name(sctx
, ino
, gen
, orphan
);
2779 ret
= send_rename(sctx
, path
, orphan
);
2782 fs_path_free(orphan
);
2786 static struct orphan_dir_info
*
2787 add_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2789 struct rb_node
**p
= &sctx
->orphan_dirs
.rb_node
;
2790 struct rb_node
*parent
= NULL
;
2791 struct orphan_dir_info
*entry
, *odi
;
2793 odi
= kmalloc(sizeof(*odi
), GFP_NOFS
);
2795 return ERR_PTR(-ENOMEM
);
2801 entry
= rb_entry(parent
, struct orphan_dir_info
, node
);
2802 if (dir_ino
< entry
->ino
) {
2804 } else if (dir_ino
> entry
->ino
) {
2805 p
= &(*p
)->rb_right
;
2812 rb_link_node(&odi
->node
, parent
, p
);
2813 rb_insert_color(&odi
->node
, &sctx
->orphan_dirs
);
2817 static struct orphan_dir_info
*
2818 get_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2820 struct rb_node
*n
= sctx
->orphan_dirs
.rb_node
;
2821 struct orphan_dir_info
*entry
;
2824 entry
= rb_entry(n
, struct orphan_dir_info
, node
);
2825 if (dir_ino
< entry
->ino
)
2827 else if (dir_ino
> entry
->ino
)
2835 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
)
2837 struct orphan_dir_info
*odi
= get_orphan_dir_info(sctx
, dir_ino
);
2842 static void free_orphan_dir_info(struct send_ctx
*sctx
,
2843 struct orphan_dir_info
*odi
)
2847 rb_erase(&odi
->node
, &sctx
->orphan_dirs
);
2852 * Returns 1 if a directory can be removed at this point in time.
2853 * We check this by iterating all dir items and checking if the inode behind
2854 * the dir item was already processed.
2856 static int can_rmdir(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
2860 struct btrfs_root
*root
= sctx
->parent_root
;
2861 struct btrfs_path
*path
;
2862 struct btrfs_key key
;
2863 struct btrfs_key found_key
;
2864 struct btrfs_key loc
;
2865 struct btrfs_dir_item
*di
;
2868 * Don't try to rmdir the top/root subvolume dir.
2870 if (dir
== BTRFS_FIRST_FREE_OBJECTID
)
2873 path
= alloc_path_for_send();
2878 key
.type
= BTRFS_DIR_INDEX_KEY
;
2880 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2885 struct waiting_dir_move
*dm
;
2887 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
2888 ret
= btrfs_next_leaf(root
, path
);
2895 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2897 if (found_key
.objectid
!= key
.objectid
||
2898 found_key
.type
!= key
.type
)
2901 di
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2902 struct btrfs_dir_item
);
2903 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &loc
);
2905 dm
= get_waiting_dir_move(sctx
, loc
.objectid
);
2907 struct orphan_dir_info
*odi
;
2909 odi
= add_orphan_dir_info(sctx
, dir
);
2915 dm
->rmdir_ino
= dir
;
2920 if (loc
.objectid
> send_progress
) {
2931 btrfs_free_path(path
);
2935 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
)
2937 struct waiting_dir_move
*entry
= get_waiting_dir_move(sctx
, ino
);
2939 return entry
!= NULL
;
2942 static int add_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
)
2944 struct rb_node
**p
= &sctx
->waiting_dir_moves
.rb_node
;
2945 struct rb_node
*parent
= NULL
;
2946 struct waiting_dir_move
*entry
, *dm
;
2948 dm
= kmalloc(sizeof(*dm
), GFP_NOFS
);
2956 entry
= rb_entry(parent
, struct waiting_dir_move
, node
);
2957 if (ino
< entry
->ino
) {
2959 } else if (ino
> entry
->ino
) {
2960 p
= &(*p
)->rb_right
;
2967 rb_link_node(&dm
->node
, parent
, p
);
2968 rb_insert_color(&dm
->node
, &sctx
->waiting_dir_moves
);
2972 static struct waiting_dir_move
*
2973 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
)
2975 struct rb_node
*n
= sctx
->waiting_dir_moves
.rb_node
;
2976 struct waiting_dir_move
*entry
;
2979 entry
= rb_entry(n
, struct waiting_dir_move
, node
);
2980 if (ino
< entry
->ino
)
2982 else if (ino
> entry
->ino
)
2990 static void free_waiting_dir_move(struct send_ctx
*sctx
,
2991 struct waiting_dir_move
*dm
)
2995 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
2999 static int add_pending_dir_move(struct send_ctx
*sctx
,
3003 struct list_head
*new_refs
,
3004 struct list_head
*deleted_refs
,
3005 const bool is_orphan
)
3007 struct rb_node
**p
= &sctx
->pending_dir_moves
.rb_node
;
3008 struct rb_node
*parent
= NULL
;
3009 struct pending_dir_move
*entry
= NULL
, *pm
;
3010 struct recorded_ref
*cur
;
3014 pm
= kmalloc(sizeof(*pm
), GFP_NOFS
);
3017 pm
->parent_ino
= parent_ino
;
3020 pm
->is_orphan
= is_orphan
;
3021 INIT_LIST_HEAD(&pm
->list
);
3022 INIT_LIST_HEAD(&pm
->update_refs
);
3023 RB_CLEAR_NODE(&pm
->node
);
3027 entry
= rb_entry(parent
, struct pending_dir_move
, node
);
3028 if (parent_ino
< entry
->parent_ino
) {
3030 } else if (parent_ino
> entry
->parent_ino
) {
3031 p
= &(*p
)->rb_right
;
3038 list_for_each_entry(cur
, deleted_refs
, list
) {
3039 ret
= dup_ref(cur
, &pm
->update_refs
);
3043 list_for_each_entry(cur
, new_refs
, list
) {
3044 ret
= dup_ref(cur
, &pm
->update_refs
);
3049 ret
= add_waiting_dir_move(sctx
, pm
->ino
);
3054 list_add_tail(&pm
->list
, &entry
->list
);
3056 rb_link_node(&pm
->node
, parent
, p
);
3057 rb_insert_color(&pm
->node
, &sctx
->pending_dir_moves
);
3062 __free_recorded_refs(&pm
->update_refs
);
3068 static struct pending_dir_move
*get_pending_dir_moves(struct send_ctx
*sctx
,
3071 struct rb_node
*n
= sctx
->pending_dir_moves
.rb_node
;
3072 struct pending_dir_move
*entry
;
3075 entry
= rb_entry(n
, struct pending_dir_move
, node
);
3076 if (parent_ino
< entry
->parent_ino
)
3078 else if (parent_ino
> entry
->parent_ino
)
3086 static int apply_dir_move(struct send_ctx
*sctx
, struct pending_dir_move
*pm
)
3088 struct fs_path
*from_path
= NULL
;
3089 struct fs_path
*to_path
= NULL
;
3090 struct fs_path
*name
= NULL
;
3091 u64 orig_progress
= sctx
->send_progress
;
3092 struct recorded_ref
*cur
;
3093 u64 parent_ino
, parent_gen
;
3094 struct waiting_dir_move
*dm
= NULL
;
3098 name
= fs_path_alloc();
3099 from_path
= fs_path_alloc();
3100 if (!name
|| !from_path
) {
3105 dm
= get_waiting_dir_move(sctx
, pm
->ino
);
3107 rmdir_ino
= dm
->rmdir_ino
;
3108 free_waiting_dir_move(sctx
, dm
);
3110 if (pm
->is_orphan
) {
3111 ret
= gen_unique_name(sctx
, pm
->ino
,
3112 pm
->gen
, from_path
);
3114 ret
= get_first_ref(sctx
->parent_root
, pm
->ino
,
3115 &parent_ino
, &parent_gen
, name
);
3118 ret
= get_cur_path(sctx
, parent_ino
, parent_gen
,
3122 ret
= fs_path_add_path(from_path
, name
);
3127 sctx
->send_progress
= sctx
->cur_ino
+ 1;
3128 fs_path_reset(name
);
3131 ret
= get_cur_path(sctx
, pm
->ino
, pm
->gen
, to_path
);
3135 ret
= send_rename(sctx
, from_path
, to_path
);
3140 struct orphan_dir_info
*odi
;
3142 odi
= get_orphan_dir_info(sctx
, rmdir_ino
);
3144 /* already deleted */
3147 ret
= can_rmdir(sctx
, rmdir_ino
, odi
->gen
, sctx
->cur_ino
+ 1);
3153 name
= fs_path_alloc();
3158 ret
= get_cur_path(sctx
, rmdir_ino
, odi
->gen
, name
);
3161 ret
= send_rmdir(sctx
, name
);
3164 free_orphan_dir_info(sctx
, odi
);
3168 ret
= send_utimes(sctx
, pm
->ino
, pm
->gen
);
3173 * After rename/move, need to update the utimes of both new parent(s)
3174 * and old parent(s).
3176 list_for_each_entry(cur
, &pm
->update_refs
, list
) {
3177 if (cur
->dir
== rmdir_ino
)
3179 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
3186 fs_path_free(from_path
);
3187 fs_path_free(to_path
);
3188 sctx
->send_progress
= orig_progress
;
3193 static void free_pending_move(struct send_ctx
*sctx
, struct pending_dir_move
*m
)
3195 if (!list_empty(&m
->list
))
3197 if (!RB_EMPTY_NODE(&m
->node
))
3198 rb_erase(&m
->node
, &sctx
->pending_dir_moves
);
3199 __free_recorded_refs(&m
->update_refs
);
3203 static void tail_append_pending_moves(struct pending_dir_move
*moves
,
3204 struct list_head
*stack
)
3206 if (list_empty(&moves
->list
)) {
3207 list_add_tail(&moves
->list
, stack
);
3210 list_splice_init(&moves
->list
, &list
);
3211 list_add_tail(&moves
->list
, stack
);
3212 list_splice_tail(&list
, stack
);
3216 static int apply_children_dir_moves(struct send_ctx
*sctx
)
3218 struct pending_dir_move
*pm
;
3219 struct list_head stack
;
3220 u64 parent_ino
= sctx
->cur_ino
;
3223 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3227 INIT_LIST_HEAD(&stack
);
3228 tail_append_pending_moves(pm
, &stack
);
3230 while (!list_empty(&stack
)) {
3231 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3232 parent_ino
= pm
->ino
;
3233 ret
= apply_dir_move(sctx
, pm
);
3234 free_pending_move(sctx
, pm
);
3237 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3239 tail_append_pending_moves(pm
, &stack
);
3244 while (!list_empty(&stack
)) {
3245 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3246 free_pending_move(sctx
, pm
);
3252 * We might need to delay a directory rename even when no ancestor directory
3253 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3254 * renamed. This happens when we rename a directory to the old name (the name
3255 * in the parent root) of some other unrelated directory that got its rename
3256 * delayed due to some ancestor with higher number that got renamed.
3262 * |---- a/ (ino 257)
3263 * | |---- file (ino 260)
3265 * |---- b/ (ino 258)
3266 * |---- c/ (ino 259)
3270 * |---- a/ (ino 258)
3271 * |---- x/ (ino 259)
3272 * |---- y/ (ino 257)
3273 * |----- file (ino 260)
3275 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3276 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3277 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3280 * 1 - rename 259 from 'c' to 'x'
3281 * 2 - rename 257 from 'a' to 'x/y'
3282 * 3 - rename 258 from 'b' to 'a'
3284 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3285 * be done right away and < 0 on error.
3287 static int wait_for_dest_dir_move(struct send_ctx
*sctx
,
3288 struct recorded_ref
*parent_ref
,
3289 const bool is_orphan
)
3291 struct btrfs_path
*path
;
3292 struct btrfs_key key
;
3293 struct btrfs_key di_key
;
3294 struct btrfs_dir_item
*di
;
3299 if (RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
))
3302 path
= alloc_path_for_send();
3306 key
.objectid
= parent_ref
->dir
;
3307 key
.type
= BTRFS_DIR_ITEM_KEY
;
3308 key
.offset
= btrfs_name_hash(parent_ref
->name
, parent_ref
->name_len
);
3310 ret
= btrfs_search_slot(NULL
, sctx
->parent_root
, &key
, path
, 0, 0);
3313 } else if (ret
> 0) {
3318 di
= btrfs_match_dir_item_name(sctx
->parent_root
, path
,
3319 parent_ref
->name
, parent_ref
->name_len
);
3325 * di_key.objectid has the number of the inode that has a dentry in the
3326 * parent directory with the same name that sctx->cur_ino is being
3327 * renamed to. We need to check if that inode is in the send root as
3328 * well and if it is currently marked as an inode with a pending rename,
3329 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3330 * that it happens after that other inode is renamed.
3332 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &di_key
);
3333 if (di_key
.type
!= BTRFS_INODE_ITEM_KEY
) {
3338 ret
= get_inode_info(sctx
->parent_root
, di_key
.objectid
, NULL
,
3339 &left_gen
, NULL
, NULL
, NULL
, NULL
);
3342 ret
= get_inode_info(sctx
->send_root
, di_key
.objectid
, NULL
,
3343 &right_gen
, NULL
, NULL
, NULL
, NULL
);
3350 /* Different inode, no need to delay the rename of sctx->cur_ino */
3351 if (right_gen
!= left_gen
) {
3356 if (is_waiting_for_move(sctx
, di_key
.objectid
)) {
3357 ret
= add_pending_dir_move(sctx
,
3359 sctx
->cur_inode_gen
,
3362 &sctx
->deleted_refs
,
3368 btrfs_free_path(path
);
3372 static int wait_for_parent_move(struct send_ctx
*sctx
,
3373 struct recorded_ref
*parent_ref
)
3376 u64 ino
= parent_ref
->dir
;
3377 u64 parent_ino_before
, parent_ino_after
;
3378 struct fs_path
*path_before
= NULL
;
3379 struct fs_path
*path_after
= NULL
;
3382 path_after
= fs_path_alloc();
3383 path_before
= fs_path_alloc();
3384 if (!path_after
|| !path_before
) {
3390 * Our current directory inode may not yet be renamed/moved because some
3391 * ancestor (immediate or not) has to be renamed/moved first. So find if
3392 * such ancestor exists and make sure our own rename/move happens after
3393 * that ancestor is processed.
3395 while (ino
> BTRFS_FIRST_FREE_OBJECTID
) {
3396 if (is_waiting_for_move(sctx
, ino
)) {
3401 fs_path_reset(path_before
);
3402 fs_path_reset(path_after
);
3404 ret
= get_first_ref(sctx
->send_root
, ino
, &parent_ino_after
,
3408 ret
= get_first_ref(sctx
->parent_root
, ino
, &parent_ino_before
,
3410 if (ret
< 0 && ret
!= -ENOENT
) {
3412 } else if (ret
== -ENOENT
) {
3417 len1
= fs_path_len(path_before
);
3418 len2
= fs_path_len(path_after
);
3419 if (ino
> sctx
->cur_ino
&&
3420 (parent_ino_before
!= parent_ino_after
|| len1
!= len2
||
3421 memcmp(path_before
->start
, path_after
->start
, len1
))) {
3425 ino
= parent_ino_after
;
3429 fs_path_free(path_before
);
3430 fs_path_free(path_after
);
3433 ret
= add_pending_dir_move(sctx
,
3435 sctx
->cur_inode_gen
,
3438 &sctx
->deleted_refs
,
3448 * This does all the move/link/unlink/rmdir magic.
3450 static int process_recorded_refs(struct send_ctx
*sctx
, int *pending_move
)
3453 struct recorded_ref
*cur
;
3454 struct recorded_ref
*cur2
;
3455 struct list_head check_dirs
;
3456 struct fs_path
*valid_path
= NULL
;
3459 int did_overwrite
= 0;
3461 u64 last_dir_ino_rm
= 0;
3462 bool can_rename
= true;
3464 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx
->cur_ino
);
3467 * This should never happen as the root dir always has the same ref
3468 * which is always '..'
3470 BUG_ON(sctx
->cur_ino
<= BTRFS_FIRST_FREE_OBJECTID
);
3471 INIT_LIST_HEAD(&check_dirs
);
3473 valid_path
= fs_path_alloc();
3480 * First, check if the first ref of the current inode was overwritten
3481 * before. If yes, we know that the current inode was already orphanized
3482 * and thus use the orphan name. If not, we can use get_cur_path to
3483 * get the path of the first ref as it would like while receiving at
3484 * this point in time.
3485 * New inodes are always orphan at the beginning, so force to use the
3486 * orphan name in this case.
3487 * The first ref is stored in valid_path and will be updated if it
3488 * gets moved around.
3490 if (!sctx
->cur_inode_new
) {
3491 ret
= did_overwrite_first_ref(sctx
, sctx
->cur_ino
,
3492 sctx
->cur_inode_gen
);
3498 if (sctx
->cur_inode_new
|| did_overwrite
) {
3499 ret
= gen_unique_name(sctx
, sctx
->cur_ino
,
3500 sctx
->cur_inode_gen
, valid_path
);
3505 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3511 list_for_each_entry(cur
, &sctx
->new_refs
, list
) {
3513 * We may have refs where the parent directory does not exist
3514 * yet. This happens if the parent directories inum is higher
3515 * the the current inum. To handle this case, we create the
3516 * parent directory out of order. But we need to check if this
3517 * did already happen before due to other refs in the same dir.
3519 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3522 if (ret
== inode_state_will_create
) {
3525 * First check if any of the current inodes refs did
3526 * already create the dir.
3528 list_for_each_entry(cur2
, &sctx
->new_refs
, list
) {
3531 if (cur2
->dir
== cur
->dir
) {
3538 * If that did not happen, check if a previous inode
3539 * did already create the dir.
3542 ret
= did_create_dir(sctx
, cur
->dir
);
3546 ret
= send_create_inode(sctx
, cur
->dir
);
3553 * Check if this new ref would overwrite the first ref of
3554 * another unprocessed inode. If yes, orphanize the
3555 * overwritten inode. If we find an overwritten ref that is
3556 * not the first ref, simply unlink it.
3558 ret
= will_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3559 cur
->name
, cur
->name_len
,
3560 &ow_inode
, &ow_gen
);
3564 ret
= is_first_ref(sctx
->parent_root
,
3565 ow_inode
, cur
->dir
, cur
->name
,
3570 struct name_cache_entry
*nce
;
3572 ret
= orphanize_inode(sctx
, ow_inode
, ow_gen
,
3577 * Make sure we clear our orphanized inode's
3578 * name from the name cache. This is because the
3579 * inode ow_inode might be an ancestor of some
3580 * other inode that will be orphanized as well
3581 * later and has an inode number greater than
3582 * sctx->send_progress. We need to prevent
3583 * future name lookups from using the old name
3584 * and get instead the orphan name.
3586 nce
= name_cache_search(sctx
, ow_inode
, ow_gen
);
3588 name_cache_delete(sctx
, nce
);
3592 ret
= send_unlink(sctx
, cur
->full_path
);
3598 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->parent_root
) {
3599 ret
= wait_for_dest_dir_move(sctx
, cur
, is_orphan
);
3609 * link/move the ref to the new place. If we have an orphan
3610 * inode, move it and update valid_path. If not, link or move
3611 * it depending on the inode mode.
3613 if (is_orphan
&& can_rename
) {
3614 ret
= send_rename(sctx
, valid_path
, cur
->full_path
);
3618 ret
= fs_path_copy(valid_path
, cur
->full_path
);
3621 } else if (can_rename
) {
3622 if (S_ISDIR(sctx
->cur_inode_mode
)) {
3624 * Dirs can't be linked, so move it. For moved
3625 * dirs, we always have one new and one deleted
3626 * ref. The deleted ref is ignored later.
3628 ret
= wait_for_parent_move(sctx
, cur
);
3634 ret
= send_rename(sctx
, valid_path
,
3637 ret
= fs_path_copy(valid_path
,
3643 ret
= send_link(sctx
, cur
->full_path
,
3649 ret
= dup_ref(cur
, &check_dirs
);
3654 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->cur_inode_deleted
) {
3656 * Check if we can already rmdir the directory. If not,
3657 * orphanize it. For every dir item inside that gets deleted
3658 * later, we do this check again and rmdir it then if possible.
3659 * See the use of check_dirs for more details.
3661 ret
= can_rmdir(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3666 ret
= send_rmdir(sctx
, valid_path
);
3669 } else if (!is_orphan
) {
3670 ret
= orphanize_inode(sctx
, sctx
->cur_ino
,
3671 sctx
->cur_inode_gen
, valid_path
);
3677 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
3678 ret
= dup_ref(cur
, &check_dirs
);
3682 } else if (S_ISDIR(sctx
->cur_inode_mode
) &&
3683 !list_empty(&sctx
->deleted_refs
)) {
3685 * We have a moved dir. Add the old parent to check_dirs
3687 cur
= list_entry(sctx
->deleted_refs
.next
, struct recorded_ref
,
3689 ret
= dup_ref(cur
, &check_dirs
);
3692 } else if (!S_ISDIR(sctx
->cur_inode_mode
)) {
3694 * We have a non dir inode. Go through all deleted refs and
3695 * unlink them if they were not already overwritten by other
3698 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
3699 ret
= did_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3700 sctx
->cur_ino
, sctx
->cur_inode_gen
,
3701 cur
->name
, cur
->name_len
);
3705 ret
= send_unlink(sctx
, cur
->full_path
);
3709 ret
= dup_ref(cur
, &check_dirs
);
3714 * If the inode is still orphan, unlink the orphan. This may
3715 * happen when a previous inode did overwrite the first ref
3716 * of this inode and no new refs were added for the current
3717 * inode. Unlinking does not mean that the inode is deleted in
3718 * all cases. There may still be links to this inode in other
3722 ret
= send_unlink(sctx
, valid_path
);
3729 * We did collect all parent dirs where cur_inode was once located. We
3730 * now go through all these dirs and check if they are pending for
3731 * deletion and if it's finally possible to perform the rmdir now.
3732 * We also update the inode stats of the parent dirs here.
3734 list_for_each_entry(cur
, &check_dirs
, list
) {
3736 * In case we had refs into dirs that were not processed yet,
3737 * we don't need to do the utime and rmdir logic for these dirs.
3738 * The dir will be processed later.
3740 if (cur
->dir
> sctx
->cur_ino
)
3743 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3747 if (ret
== inode_state_did_create
||
3748 ret
== inode_state_no_change
) {
3749 /* TODO delayed utimes */
3750 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
3753 } else if (ret
== inode_state_did_delete
&&
3754 cur
->dir
!= last_dir_ino_rm
) {
3755 ret
= can_rmdir(sctx
, cur
->dir
, cur
->dir_gen
,
3760 ret
= get_cur_path(sctx
, cur
->dir
,
3761 cur
->dir_gen
, valid_path
);
3764 ret
= send_rmdir(sctx
, valid_path
);
3767 last_dir_ino_rm
= cur
->dir
;
3775 __free_recorded_refs(&check_dirs
);
3776 free_recorded_refs(sctx
);
3777 fs_path_free(valid_path
);
3781 static int record_ref(struct btrfs_root
*root
, int num
, u64 dir
, int index
,
3782 struct fs_path
*name
, void *ctx
, struct list_head
*refs
)
3785 struct send_ctx
*sctx
= ctx
;
3789 p
= fs_path_alloc();
3793 ret
= get_inode_info(root
, dir
, NULL
, &gen
, NULL
, NULL
,
3798 ret
= get_cur_path(sctx
, dir
, gen
, p
);
3801 ret
= fs_path_add_path(p
, name
);
3805 ret
= __record_ref(refs
, dir
, gen
, p
);
3813 static int __record_new_ref(int num
, u64 dir
, int index
,
3814 struct fs_path
*name
,
3817 struct send_ctx
*sctx
= ctx
;
3818 return record_ref(sctx
->send_root
, num
, dir
, index
, name
,
3819 ctx
, &sctx
->new_refs
);
3823 static int __record_deleted_ref(int num
, u64 dir
, int index
,
3824 struct fs_path
*name
,
3827 struct send_ctx
*sctx
= ctx
;
3828 return record_ref(sctx
->parent_root
, num
, dir
, index
, name
,
3829 ctx
, &sctx
->deleted_refs
);
3832 static int record_new_ref(struct send_ctx
*sctx
)
3836 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
3837 sctx
->cmp_key
, 0, __record_new_ref
, sctx
);
3846 static int record_deleted_ref(struct send_ctx
*sctx
)
3850 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
3851 sctx
->cmp_key
, 0, __record_deleted_ref
, sctx
);
3860 struct find_ref_ctx
{
3863 struct btrfs_root
*root
;
3864 struct fs_path
*name
;
3868 static int __find_iref(int num
, u64 dir
, int index
,
3869 struct fs_path
*name
,
3872 struct find_ref_ctx
*ctx
= ctx_
;
3876 if (dir
== ctx
->dir
&& fs_path_len(name
) == fs_path_len(ctx
->name
) &&
3877 strncmp(name
->start
, ctx
->name
->start
, fs_path_len(name
)) == 0) {
3879 * To avoid doing extra lookups we'll only do this if everything
3882 ret
= get_inode_info(ctx
->root
, dir
, NULL
, &dir_gen
, NULL
,
3886 if (dir_gen
!= ctx
->dir_gen
)
3888 ctx
->found_idx
= num
;
3894 static int find_iref(struct btrfs_root
*root
,
3895 struct btrfs_path
*path
,
3896 struct btrfs_key
*key
,
3897 u64 dir
, u64 dir_gen
, struct fs_path
*name
)
3900 struct find_ref_ctx ctx
;
3904 ctx
.dir_gen
= dir_gen
;
3908 ret
= iterate_inode_ref(root
, path
, key
, 0, __find_iref
, &ctx
);
3912 if (ctx
.found_idx
== -1)
3915 return ctx
.found_idx
;
3918 static int __record_changed_new_ref(int num
, u64 dir
, int index
,
3919 struct fs_path
*name
,
3924 struct send_ctx
*sctx
= ctx
;
3926 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &dir_gen
, NULL
,
3931 ret
= find_iref(sctx
->parent_root
, sctx
->right_path
,
3932 sctx
->cmp_key
, dir
, dir_gen
, name
);
3934 ret
= __record_new_ref(num
, dir
, index
, name
, sctx
);
3941 static int __record_changed_deleted_ref(int num
, u64 dir
, int index
,
3942 struct fs_path
*name
,
3947 struct send_ctx
*sctx
= ctx
;
3949 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &dir_gen
, NULL
,
3954 ret
= find_iref(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
3955 dir
, dir_gen
, name
);
3957 ret
= __record_deleted_ref(num
, dir
, index
, name
, sctx
);
3964 static int record_changed_ref(struct send_ctx
*sctx
)
3968 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
3969 sctx
->cmp_key
, 0, __record_changed_new_ref
, sctx
);
3972 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
3973 sctx
->cmp_key
, 0, __record_changed_deleted_ref
, sctx
);
3983 * Record and process all refs at once. Needed when an inode changes the
3984 * generation number, which means that it was deleted and recreated.
3986 static int process_all_refs(struct send_ctx
*sctx
,
3987 enum btrfs_compare_tree_result cmd
)
3990 struct btrfs_root
*root
;
3991 struct btrfs_path
*path
;
3992 struct btrfs_key key
;
3993 struct btrfs_key found_key
;
3994 struct extent_buffer
*eb
;
3996 iterate_inode_ref_t cb
;
3997 int pending_move
= 0;
3999 path
= alloc_path_for_send();
4003 if (cmd
== BTRFS_COMPARE_TREE_NEW
) {
4004 root
= sctx
->send_root
;
4005 cb
= __record_new_ref
;
4006 } else if (cmd
== BTRFS_COMPARE_TREE_DELETED
) {
4007 root
= sctx
->parent_root
;
4008 cb
= __record_deleted_ref
;
4010 btrfs_err(sctx
->send_root
->fs_info
,
4011 "Wrong command %d in process_all_refs", cmd
);
4016 key
.objectid
= sctx
->cmp_key
->objectid
;
4017 key
.type
= BTRFS_INODE_REF_KEY
;
4019 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4024 eb
= path
->nodes
[0];
4025 slot
= path
->slots
[0];
4026 if (slot
>= btrfs_header_nritems(eb
)) {
4027 ret
= btrfs_next_leaf(root
, path
);
4035 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4037 if (found_key
.objectid
!= key
.objectid
||
4038 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
4039 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
))
4042 ret
= iterate_inode_ref(root
, path
, &found_key
, 0, cb
, sctx
);
4048 btrfs_release_path(path
);
4050 ret
= process_recorded_refs(sctx
, &pending_move
);
4051 /* Only applicable to an incremental send. */
4052 ASSERT(pending_move
== 0);
4055 btrfs_free_path(path
);
4059 static int send_set_xattr(struct send_ctx
*sctx
,
4060 struct fs_path
*path
,
4061 const char *name
, int name_len
,
4062 const char *data
, int data_len
)
4066 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SET_XATTR
);
4070 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
4071 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
4072 TLV_PUT(sctx
, BTRFS_SEND_A_XATTR_DATA
, data
, data_len
);
4074 ret
= send_cmd(sctx
);
4081 static int send_remove_xattr(struct send_ctx
*sctx
,
4082 struct fs_path
*path
,
4083 const char *name
, int name_len
)
4087 ret
= begin_cmd(sctx
, BTRFS_SEND_C_REMOVE_XATTR
);
4091 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
4092 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
4094 ret
= send_cmd(sctx
);
4101 static int __process_new_xattr(int num
, struct btrfs_key
*di_key
,
4102 const char *name
, int name_len
,
4103 const char *data
, int data_len
,
4107 struct send_ctx
*sctx
= ctx
;
4109 posix_acl_xattr_header dummy_acl
;
4111 p
= fs_path_alloc();
4116 * This hack is needed because empty acl's are stored as zero byte
4117 * data in xattrs. Problem with that is, that receiving these zero byte
4118 * acl's will fail later. To fix this, we send a dummy acl list that
4119 * only contains the version number and no entries.
4121 if (!strncmp(name
, XATTR_NAME_POSIX_ACL_ACCESS
, name_len
) ||
4122 !strncmp(name
, XATTR_NAME_POSIX_ACL_DEFAULT
, name_len
)) {
4123 if (data_len
== 0) {
4124 dummy_acl
.a_version
=
4125 cpu_to_le32(POSIX_ACL_XATTR_VERSION
);
4126 data
= (char *)&dummy_acl
;
4127 data_len
= sizeof(dummy_acl
);
4131 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4135 ret
= send_set_xattr(sctx
, p
, name
, name_len
, data
, data_len
);
4142 static int __process_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4143 const char *name
, int name_len
,
4144 const char *data
, int data_len
,
4148 struct send_ctx
*sctx
= ctx
;
4151 p
= fs_path_alloc();
4155 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4159 ret
= send_remove_xattr(sctx
, p
, name
, name_len
);
4166 static int process_new_xattr(struct send_ctx
*sctx
)
4170 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4171 sctx
->cmp_key
, __process_new_xattr
, sctx
);
4176 static int process_deleted_xattr(struct send_ctx
*sctx
)
4180 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4181 sctx
->cmp_key
, __process_deleted_xattr
, sctx
);
4186 struct find_xattr_ctx
{
4194 static int __find_xattr(int num
, struct btrfs_key
*di_key
,
4195 const char *name
, int name_len
,
4196 const char *data
, int data_len
,
4197 u8 type
, void *vctx
)
4199 struct find_xattr_ctx
*ctx
= vctx
;
4201 if (name_len
== ctx
->name_len
&&
4202 strncmp(name
, ctx
->name
, name_len
) == 0) {
4203 ctx
->found_idx
= num
;
4204 ctx
->found_data_len
= data_len
;
4205 ctx
->found_data
= kmemdup(data
, data_len
, GFP_NOFS
);
4206 if (!ctx
->found_data
)
4213 static int find_xattr(struct btrfs_root
*root
,
4214 struct btrfs_path
*path
,
4215 struct btrfs_key
*key
,
4216 const char *name
, int name_len
,
4217 char **data
, int *data_len
)
4220 struct find_xattr_ctx ctx
;
4223 ctx
.name_len
= name_len
;
4225 ctx
.found_data
= NULL
;
4226 ctx
.found_data_len
= 0;
4228 ret
= iterate_dir_item(root
, path
, key
, __find_xattr
, &ctx
);
4232 if (ctx
.found_idx
== -1)
4235 *data
= ctx
.found_data
;
4236 *data_len
= ctx
.found_data_len
;
4238 kfree(ctx
.found_data
);
4240 return ctx
.found_idx
;
4244 static int __process_changed_new_xattr(int num
, struct btrfs_key
*di_key
,
4245 const char *name
, int name_len
,
4246 const char *data
, int data_len
,
4250 struct send_ctx
*sctx
= ctx
;
4251 char *found_data
= NULL
;
4252 int found_data_len
= 0;
4254 ret
= find_xattr(sctx
->parent_root
, sctx
->right_path
,
4255 sctx
->cmp_key
, name
, name_len
, &found_data
,
4257 if (ret
== -ENOENT
) {
4258 ret
= __process_new_xattr(num
, di_key
, name
, name_len
, data
,
4259 data_len
, type
, ctx
);
4260 } else if (ret
>= 0) {
4261 if (data_len
!= found_data_len
||
4262 memcmp(data
, found_data
, data_len
)) {
4263 ret
= __process_new_xattr(num
, di_key
, name
, name_len
,
4264 data
, data_len
, type
, ctx
);
4274 static int __process_changed_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4275 const char *name
, int name_len
,
4276 const char *data
, int data_len
,
4280 struct send_ctx
*sctx
= ctx
;
4282 ret
= find_xattr(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
4283 name
, name_len
, NULL
, NULL
);
4285 ret
= __process_deleted_xattr(num
, di_key
, name
, name_len
, data
,
4286 data_len
, type
, ctx
);
4293 static int process_changed_xattr(struct send_ctx
*sctx
)
4297 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4298 sctx
->cmp_key
, __process_changed_new_xattr
, sctx
);
4301 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4302 sctx
->cmp_key
, __process_changed_deleted_xattr
, sctx
);
4308 static int process_all_new_xattrs(struct send_ctx
*sctx
)
4311 struct btrfs_root
*root
;
4312 struct btrfs_path
*path
;
4313 struct btrfs_key key
;
4314 struct btrfs_key found_key
;
4315 struct extent_buffer
*eb
;
4318 path
= alloc_path_for_send();
4322 root
= sctx
->send_root
;
4324 key
.objectid
= sctx
->cmp_key
->objectid
;
4325 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4327 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4332 eb
= path
->nodes
[0];
4333 slot
= path
->slots
[0];
4334 if (slot
>= btrfs_header_nritems(eb
)) {
4335 ret
= btrfs_next_leaf(root
, path
);
4338 } else if (ret
> 0) {
4345 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4346 if (found_key
.objectid
!= key
.objectid
||
4347 found_key
.type
!= key
.type
) {
4352 ret
= iterate_dir_item(root
, path
, &found_key
,
4353 __process_new_xattr
, sctx
);
4361 btrfs_free_path(path
);
4365 static ssize_t
fill_read_buf(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4367 struct btrfs_root
*root
= sctx
->send_root
;
4368 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4369 struct inode
*inode
;
4372 struct btrfs_key key
;
4373 pgoff_t index
= offset
>> PAGE_CACHE_SHIFT
;
4375 unsigned pg_offset
= offset
& ~PAGE_CACHE_MASK
;
4378 key
.objectid
= sctx
->cur_ino
;
4379 key
.type
= BTRFS_INODE_ITEM_KEY
;
4382 inode
= btrfs_iget(fs_info
->sb
, &key
, root
, NULL
);
4384 return PTR_ERR(inode
);
4386 if (offset
+ len
> i_size_read(inode
)) {
4387 if (offset
> i_size_read(inode
))
4390 len
= offset
- i_size_read(inode
);
4395 last_index
= (offset
+ len
- 1) >> PAGE_CACHE_SHIFT
;
4397 /* initial readahead */
4398 memset(&sctx
->ra
, 0, sizeof(struct file_ra_state
));
4399 file_ra_state_init(&sctx
->ra
, inode
->i_mapping
);
4400 btrfs_force_ra(inode
->i_mapping
, &sctx
->ra
, NULL
, index
,
4401 last_index
- index
+ 1);
4403 while (index
<= last_index
) {
4404 unsigned cur_len
= min_t(unsigned, len
,
4405 PAGE_CACHE_SIZE
- pg_offset
);
4406 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
4412 if (!PageUptodate(page
)) {
4413 btrfs_readpage(NULL
, page
);
4415 if (!PageUptodate(page
)) {
4417 page_cache_release(page
);
4424 memcpy(sctx
->read_buf
+ ret
, addr
+ pg_offset
, cur_len
);
4427 page_cache_release(page
);
4439 * Read some bytes from the current inode/file and send a write command to
4442 static int send_write(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4446 ssize_t num_read
= 0;
4448 p
= fs_path_alloc();
4452 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset
, len
);
4454 num_read
= fill_read_buf(sctx
, offset
, len
);
4455 if (num_read
<= 0) {
4461 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4465 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4469 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4470 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4471 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, num_read
);
4473 ret
= send_cmd(sctx
);
4484 * Send a clone command to user space.
4486 static int send_clone(struct send_ctx
*sctx
,
4487 u64 offset
, u32 len
,
4488 struct clone_root
*clone_root
)
4494 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
4495 "clone_inode=%llu, clone_offset=%llu\n", offset
, len
,
4496 clone_root
->root
->objectid
, clone_root
->ino
,
4497 clone_root
->offset
);
4499 p
= fs_path_alloc();
4503 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CLONE
);
4507 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4511 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4512 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_LEN
, len
);
4513 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4515 if (clone_root
->root
== sctx
->send_root
) {
4516 ret
= get_inode_info(sctx
->send_root
, clone_root
->ino
, NULL
,
4517 &gen
, NULL
, NULL
, NULL
, NULL
);
4520 ret
= get_cur_path(sctx
, clone_root
->ino
, gen
, p
);
4522 ret
= get_inode_path(clone_root
->root
, clone_root
->ino
, p
);
4527 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4528 clone_root
->root
->root_item
.uuid
);
4529 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
4530 le64_to_cpu(clone_root
->root
->root_item
.ctransid
));
4531 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_CLONE_PATH
, p
);
4532 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_OFFSET
,
4533 clone_root
->offset
);
4535 ret
= send_cmd(sctx
);
4544 * Send an update extent command to user space.
4546 static int send_update_extent(struct send_ctx
*sctx
,
4547 u64 offset
, u32 len
)
4552 p
= fs_path_alloc();
4556 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UPDATE_EXTENT
);
4560 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4564 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4565 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4566 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, len
);
4568 ret
= send_cmd(sctx
);
4576 static int send_hole(struct send_ctx
*sctx
, u64 end
)
4578 struct fs_path
*p
= NULL
;
4579 u64 offset
= sctx
->cur_inode_last_extent
;
4583 p
= fs_path_alloc();
4586 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4588 goto tlv_put_failure
;
4589 memset(sctx
->read_buf
, 0, BTRFS_SEND_READ_SIZE
);
4590 while (offset
< end
) {
4591 len
= min_t(u64
, end
- offset
, BTRFS_SEND_READ_SIZE
);
4593 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4596 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4597 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4598 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, len
);
4599 ret
= send_cmd(sctx
);
4609 static int send_write_or_clone(struct send_ctx
*sctx
,
4610 struct btrfs_path
*path
,
4611 struct btrfs_key
*key
,
4612 struct clone_root
*clone_root
)
4615 struct btrfs_file_extent_item
*ei
;
4616 u64 offset
= key
->offset
;
4621 u64 bs
= sctx
->send_root
->fs_info
->sb
->s_blocksize
;
4623 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4624 struct btrfs_file_extent_item
);
4625 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
4626 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4627 len
= btrfs_file_extent_inline_len(path
->nodes
[0],
4628 path
->slots
[0], ei
);
4630 * it is possible the inline item won't cover the whole page,
4631 * but there may be items after this page. Make
4632 * sure to send the whole thing
4634 len
= PAGE_CACHE_ALIGN(len
);
4636 len
= btrfs_file_extent_num_bytes(path
->nodes
[0], ei
);
4639 if (offset
+ len
> sctx
->cur_inode_size
)
4640 len
= sctx
->cur_inode_size
- offset
;
4646 if (clone_root
&& IS_ALIGNED(offset
+ len
, bs
)) {
4647 ret
= send_clone(sctx
, offset
, len
, clone_root
);
4648 } else if (sctx
->flags
& BTRFS_SEND_FLAG_NO_FILE_DATA
) {
4649 ret
= send_update_extent(sctx
, offset
, len
);
4653 if (l
> BTRFS_SEND_READ_SIZE
)
4654 l
= BTRFS_SEND_READ_SIZE
;
4655 ret
= send_write(sctx
, pos
+ offset
, l
);
4668 static int is_extent_unchanged(struct send_ctx
*sctx
,
4669 struct btrfs_path
*left_path
,
4670 struct btrfs_key
*ekey
)
4673 struct btrfs_key key
;
4674 struct btrfs_path
*path
= NULL
;
4675 struct extent_buffer
*eb
;
4677 struct btrfs_key found_key
;
4678 struct btrfs_file_extent_item
*ei
;
4683 u64 left_offset_fixed
;
4691 path
= alloc_path_for_send();
4695 eb
= left_path
->nodes
[0];
4696 slot
= left_path
->slots
[0];
4697 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
4698 left_type
= btrfs_file_extent_type(eb
, ei
);
4700 if (left_type
!= BTRFS_FILE_EXTENT_REG
) {
4704 left_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
4705 left_len
= btrfs_file_extent_num_bytes(eb
, ei
);
4706 left_offset
= btrfs_file_extent_offset(eb
, ei
);
4707 left_gen
= btrfs_file_extent_generation(eb
, ei
);
4710 * Following comments will refer to these graphics. L is the left
4711 * extents which we are checking at the moment. 1-8 are the right
4712 * extents that we iterate.
4715 * |-1-|-2a-|-3-|-4-|-5-|-6-|
4718 * |--1--|-2b-|...(same as above)
4720 * Alternative situation. Happens on files where extents got split.
4722 * |-----------7-----------|-6-|
4724 * Alternative situation. Happens on files which got larger.
4727 * Nothing follows after 8.
4730 key
.objectid
= ekey
->objectid
;
4731 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4732 key
.offset
= ekey
->offset
;
4733 ret
= btrfs_search_slot_for_read(sctx
->parent_root
, &key
, path
, 0, 0);
4742 * Handle special case where the right side has no extents at all.
4744 eb
= path
->nodes
[0];
4745 slot
= path
->slots
[0];
4746 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4747 if (found_key
.objectid
!= key
.objectid
||
4748 found_key
.type
!= key
.type
) {
4749 /* If we're a hole then just pretend nothing changed */
4750 ret
= (left_disknr
) ? 0 : 1;
4755 * We're now on 2a, 2b or 7.
4758 while (key
.offset
< ekey
->offset
+ left_len
) {
4759 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
4760 right_type
= btrfs_file_extent_type(eb
, ei
);
4761 if (right_type
!= BTRFS_FILE_EXTENT_REG
) {
4766 right_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
4767 right_len
= btrfs_file_extent_num_bytes(eb
, ei
);
4768 right_offset
= btrfs_file_extent_offset(eb
, ei
);
4769 right_gen
= btrfs_file_extent_generation(eb
, ei
);
4772 * Are we at extent 8? If yes, we know the extent is changed.
4773 * This may only happen on the first iteration.
4775 if (found_key
.offset
+ right_len
<= ekey
->offset
) {
4776 /* If we're a hole just pretend nothing changed */
4777 ret
= (left_disknr
) ? 0 : 1;
4781 left_offset_fixed
= left_offset
;
4782 if (key
.offset
< ekey
->offset
) {
4783 /* Fix the right offset for 2a and 7. */
4784 right_offset
+= ekey
->offset
- key
.offset
;
4786 /* Fix the left offset for all behind 2a and 2b */
4787 left_offset_fixed
+= key
.offset
- ekey
->offset
;
4791 * Check if we have the same extent.
4793 if (left_disknr
!= right_disknr
||
4794 left_offset_fixed
!= right_offset
||
4795 left_gen
!= right_gen
) {
4801 * Go to the next extent.
4803 ret
= btrfs_next_item(sctx
->parent_root
, path
);
4807 eb
= path
->nodes
[0];
4808 slot
= path
->slots
[0];
4809 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4811 if (ret
|| found_key
.objectid
!= key
.objectid
||
4812 found_key
.type
!= key
.type
) {
4813 key
.offset
+= right_len
;
4816 if (found_key
.offset
!= key
.offset
+ right_len
) {
4824 * We're now behind the left extent (treat as unchanged) or at the end
4825 * of the right side (treat as changed).
4827 if (key
.offset
>= ekey
->offset
+ left_len
)
4834 btrfs_free_path(path
);
4838 static int get_last_extent(struct send_ctx
*sctx
, u64 offset
)
4840 struct btrfs_path
*path
;
4841 struct btrfs_root
*root
= sctx
->send_root
;
4842 struct btrfs_file_extent_item
*fi
;
4843 struct btrfs_key key
;
4848 path
= alloc_path_for_send();
4852 sctx
->cur_inode_last_extent
= 0;
4854 key
.objectid
= sctx
->cur_ino
;
4855 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4856 key
.offset
= offset
;
4857 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 0, 1);
4861 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
4862 if (key
.objectid
!= sctx
->cur_ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
4865 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4866 struct btrfs_file_extent_item
);
4867 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
4868 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4869 u64 size
= btrfs_file_extent_inline_len(path
->nodes
[0],
4870 path
->slots
[0], fi
);
4871 extent_end
= ALIGN(key
.offset
+ size
,
4872 sctx
->send_root
->sectorsize
);
4874 extent_end
= key
.offset
+
4875 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
4877 sctx
->cur_inode_last_extent
= extent_end
;
4879 btrfs_free_path(path
);
4883 static int maybe_send_hole(struct send_ctx
*sctx
, struct btrfs_path
*path
,
4884 struct btrfs_key
*key
)
4886 struct btrfs_file_extent_item
*fi
;
4891 if (sctx
->cur_ino
!= key
->objectid
|| !need_send_hole(sctx
))
4894 if (sctx
->cur_inode_last_extent
== (u64
)-1) {
4895 ret
= get_last_extent(sctx
, key
->offset
- 1);
4900 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4901 struct btrfs_file_extent_item
);
4902 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
4903 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4904 u64 size
= btrfs_file_extent_inline_len(path
->nodes
[0],
4905 path
->slots
[0], fi
);
4906 extent_end
= ALIGN(key
->offset
+ size
,
4907 sctx
->send_root
->sectorsize
);
4909 extent_end
= key
->offset
+
4910 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
4913 if (path
->slots
[0] == 0 &&
4914 sctx
->cur_inode_last_extent
< key
->offset
) {
4916 * We might have skipped entire leafs that contained only
4917 * file extent items for our current inode. These leafs have
4918 * a generation number smaller (older) than the one in the
4919 * current leaf and the leaf our last extent came from, and
4920 * are located between these 2 leafs.
4922 ret
= get_last_extent(sctx
, key
->offset
- 1);
4927 if (sctx
->cur_inode_last_extent
< key
->offset
)
4928 ret
= send_hole(sctx
, key
->offset
);
4929 sctx
->cur_inode_last_extent
= extent_end
;
4933 static int process_extent(struct send_ctx
*sctx
,
4934 struct btrfs_path
*path
,
4935 struct btrfs_key
*key
)
4937 struct clone_root
*found_clone
= NULL
;
4940 if (S_ISLNK(sctx
->cur_inode_mode
))
4943 if (sctx
->parent_root
&& !sctx
->cur_inode_new
) {
4944 ret
= is_extent_unchanged(sctx
, path
, key
);
4952 struct btrfs_file_extent_item
*ei
;
4955 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4956 struct btrfs_file_extent_item
);
4957 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
4958 if (type
== BTRFS_FILE_EXTENT_PREALLOC
||
4959 type
== BTRFS_FILE_EXTENT_REG
) {
4961 * The send spec does not have a prealloc command yet,
4962 * so just leave a hole for prealloc'ed extents until
4963 * we have enough commands queued up to justify rev'ing
4966 if (type
== BTRFS_FILE_EXTENT_PREALLOC
) {
4971 /* Have a hole, just skip it. */
4972 if (btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
) == 0) {
4979 ret
= find_extent_clone(sctx
, path
, key
->objectid
, key
->offset
,
4980 sctx
->cur_inode_size
, &found_clone
);
4981 if (ret
!= -ENOENT
&& ret
< 0)
4984 ret
= send_write_or_clone(sctx
, path
, key
, found_clone
);
4988 ret
= maybe_send_hole(sctx
, path
, key
);
4993 static int process_all_extents(struct send_ctx
*sctx
)
4996 struct btrfs_root
*root
;
4997 struct btrfs_path
*path
;
4998 struct btrfs_key key
;
4999 struct btrfs_key found_key
;
5000 struct extent_buffer
*eb
;
5003 root
= sctx
->send_root
;
5004 path
= alloc_path_for_send();
5008 key
.objectid
= sctx
->cmp_key
->objectid
;
5009 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5011 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5016 eb
= path
->nodes
[0];
5017 slot
= path
->slots
[0];
5019 if (slot
>= btrfs_header_nritems(eb
)) {
5020 ret
= btrfs_next_leaf(root
, path
);
5023 } else if (ret
> 0) {
5030 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5032 if (found_key
.objectid
!= key
.objectid
||
5033 found_key
.type
!= key
.type
) {
5038 ret
= process_extent(sctx
, path
, &found_key
);
5046 btrfs_free_path(path
);
5050 static int process_recorded_refs_if_needed(struct send_ctx
*sctx
, int at_end
,
5052 int *refs_processed
)
5056 if (sctx
->cur_ino
== 0)
5058 if (!at_end
&& sctx
->cur_ino
== sctx
->cmp_key
->objectid
&&
5059 sctx
->cmp_key
->type
<= BTRFS_INODE_EXTREF_KEY
)
5061 if (list_empty(&sctx
->new_refs
) && list_empty(&sctx
->deleted_refs
))
5064 ret
= process_recorded_refs(sctx
, pending_move
);
5068 *refs_processed
= 1;
5073 static int finish_inode_if_needed(struct send_ctx
*sctx
, int at_end
)
5084 int pending_move
= 0;
5085 int refs_processed
= 0;
5087 ret
= process_recorded_refs_if_needed(sctx
, at_end
, &pending_move
,
5093 * We have processed the refs and thus need to advance send_progress.
5094 * Now, calls to get_cur_xxx will take the updated refs of the current
5095 * inode into account.
5097 * On the other hand, if our current inode is a directory and couldn't
5098 * be moved/renamed because its parent was renamed/moved too and it has
5099 * a higher inode number, we can only move/rename our current inode
5100 * after we moved/renamed its parent. Therefore in this case operate on
5101 * the old path (pre move/rename) of our current inode, and the
5102 * move/rename will be performed later.
5104 if (refs_processed
&& !pending_move
)
5105 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5107 if (sctx
->cur_ino
== 0 || sctx
->cur_inode_deleted
)
5109 if (!at_end
&& sctx
->cmp_key
->objectid
== sctx
->cur_ino
)
5112 ret
= get_inode_info(sctx
->send_root
, sctx
->cur_ino
, NULL
, NULL
,
5113 &left_mode
, &left_uid
, &left_gid
, NULL
);
5117 if (!sctx
->parent_root
|| sctx
->cur_inode_new
) {
5119 if (!S_ISLNK(sctx
->cur_inode_mode
))
5122 ret
= get_inode_info(sctx
->parent_root
, sctx
->cur_ino
,
5123 NULL
, NULL
, &right_mode
, &right_uid
,
5128 if (left_uid
!= right_uid
|| left_gid
!= right_gid
)
5130 if (!S_ISLNK(sctx
->cur_inode_mode
) && left_mode
!= right_mode
)
5134 if (S_ISREG(sctx
->cur_inode_mode
)) {
5135 if (need_send_hole(sctx
)) {
5136 if (sctx
->cur_inode_last_extent
== (u64
)-1 ||
5137 sctx
->cur_inode_last_extent
<
5138 sctx
->cur_inode_size
) {
5139 ret
= get_last_extent(sctx
, (u64
)-1);
5143 if (sctx
->cur_inode_last_extent
<
5144 sctx
->cur_inode_size
) {
5145 ret
= send_hole(sctx
, sctx
->cur_inode_size
);
5150 ret
= send_truncate(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5151 sctx
->cur_inode_size
);
5157 ret
= send_chown(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5158 left_uid
, left_gid
);
5163 ret
= send_chmod(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5170 * If other directory inodes depended on our current directory
5171 * inode's move/rename, now do their move/rename operations.
5173 if (!is_waiting_for_move(sctx
, sctx
->cur_ino
)) {
5174 ret
= apply_children_dir_moves(sctx
);
5178 * Need to send that every time, no matter if it actually
5179 * changed between the two trees as we have done changes to
5180 * the inode before. If our inode is a directory and it's
5181 * waiting to be moved/renamed, we will send its utimes when
5182 * it's moved/renamed, therefore we don't need to do it here.
5184 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5185 ret
= send_utimes(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
);
5194 static int changed_inode(struct send_ctx
*sctx
,
5195 enum btrfs_compare_tree_result result
)
5198 struct btrfs_key
*key
= sctx
->cmp_key
;
5199 struct btrfs_inode_item
*left_ii
= NULL
;
5200 struct btrfs_inode_item
*right_ii
= NULL
;
5204 sctx
->cur_ino
= key
->objectid
;
5205 sctx
->cur_inode_new_gen
= 0;
5206 sctx
->cur_inode_last_extent
= (u64
)-1;
5209 * Set send_progress to current inode. This will tell all get_cur_xxx
5210 * functions that the current inode's refs are not updated yet. Later,
5211 * when process_recorded_refs is finished, it is set to cur_ino + 1.
5213 sctx
->send_progress
= sctx
->cur_ino
;
5215 if (result
== BTRFS_COMPARE_TREE_NEW
||
5216 result
== BTRFS_COMPARE_TREE_CHANGED
) {
5217 left_ii
= btrfs_item_ptr(sctx
->left_path
->nodes
[0],
5218 sctx
->left_path
->slots
[0],
5219 struct btrfs_inode_item
);
5220 left_gen
= btrfs_inode_generation(sctx
->left_path
->nodes
[0],
5223 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
5224 sctx
->right_path
->slots
[0],
5225 struct btrfs_inode_item
);
5226 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
5229 if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
5230 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
5231 sctx
->right_path
->slots
[0],
5232 struct btrfs_inode_item
);
5234 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
5238 * The cur_ino = root dir case is special here. We can't treat
5239 * the inode as deleted+reused because it would generate a
5240 * stream that tries to delete/mkdir the root dir.
5242 if (left_gen
!= right_gen
&&
5243 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
5244 sctx
->cur_inode_new_gen
= 1;
5247 if (result
== BTRFS_COMPARE_TREE_NEW
) {
5248 sctx
->cur_inode_gen
= left_gen
;
5249 sctx
->cur_inode_new
= 1;
5250 sctx
->cur_inode_deleted
= 0;
5251 sctx
->cur_inode_size
= btrfs_inode_size(
5252 sctx
->left_path
->nodes
[0], left_ii
);
5253 sctx
->cur_inode_mode
= btrfs_inode_mode(
5254 sctx
->left_path
->nodes
[0], left_ii
);
5255 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
5256 sctx
->left_path
->nodes
[0], left_ii
);
5257 if (sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
5258 ret
= send_create_inode_if_needed(sctx
);
5259 } else if (result
== BTRFS_COMPARE_TREE_DELETED
) {
5260 sctx
->cur_inode_gen
= right_gen
;
5261 sctx
->cur_inode_new
= 0;
5262 sctx
->cur_inode_deleted
= 1;
5263 sctx
->cur_inode_size
= btrfs_inode_size(
5264 sctx
->right_path
->nodes
[0], right_ii
);
5265 sctx
->cur_inode_mode
= btrfs_inode_mode(
5266 sctx
->right_path
->nodes
[0], right_ii
);
5267 } else if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
5269 * We need to do some special handling in case the inode was
5270 * reported as changed with a changed generation number. This
5271 * means that the original inode was deleted and new inode
5272 * reused the same inum. So we have to treat the old inode as
5273 * deleted and the new one as new.
5275 if (sctx
->cur_inode_new_gen
) {
5277 * First, process the inode as if it was deleted.
5279 sctx
->cur_inode_gen
= right_gen
;
5280 sctx
->cur_inode_new
= 0;
5281 sctx
->cur_inode_deleted
= 1;
5282 sctx
->cur_inode_size
= btrfs_inode_size(
5283 sctx
->right_path
->nodes
[0], right_ii
);
5284 sctx
->cur_inode_mode
= btrfs_inode_mode(
5285 sctx
->right_path
->nodes
[0], right_ii
);
5286 ret
= process_all_refs(sctx
,
5287 BTRFS_COMPARE_TREE_DELETED
);
5292 * Now process the inode as if it was new.
5294 sctx
->cur_inode_gen
= left_gen
;
5295 sctx
->cur_inode_new
= 1;
5296 sctx
->cur_inode_deleted
= 0;
5297 sctx
->cur_inode_size
= btrfs_inode_size(
5298 sctx
->left_path
->nodes
[0], left_ii
);
5299 sctx
->cur_inode_mode
= btrfs_inode_mode(
5300 sctx
->left_path
->nodes
[0], left_ii
);
5301 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
5302 sctx
->left_path
->nodes
[0], left_ii
);
5303 ret
= send_create_inode_if_needed(sctx
);
5307 ret
= process_all_refs(sctx
, BTRFS_COMPARE_TREE_NEW
);
5311 * Advance send_progress now as we did not get into
5312 * process_recorded_refs_if_needed in the new_gen case.
5314 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5317 * Now process all extents and xattrs of the inode as if
5318 * they were all new.
5320 ret
= process_all_extents(sctx
);
5323 ret
= process_all_new_xattrs(sctx
);
5327 sctx
->cur_inode_gen
= left_gen
;
5328 sctx
->cur_inode_new
= 0;
5329 sctx
->cur_inode_new_gen
= 0;
5330 sctx
->cur_inode_deleted
= 0;
5331 sctx
->cur_inode_size
= btrfs_inode_size(
5332 sctx
->left_path
->nodes
[0], left_ii
);
5333 sctx
->cur_inode_mode
= btrfs_inode_mode(
5334 sctx
->left_path
->nodes
[0], left_ii
);
5343 * We have to process new refs before deleted refs, but compare_trees gives us
5344 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
5345 * first and later process them in process_recorded_refs.
5346 * For the cur_inode_new_gen case, we skip recording completely because
5347 * changed_inode did already initiate processing of refs. The reason for this is
5348 * that in this case, compare_tree actually compares the refs of 2 different
5349 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
5350 * refs of the right tree as deleted and all refs of the left tree as new.
5352 static int changed_ref(struct send_ctx
*sctx
,
5353 enum btrfs_compare_tree_result result
)
5357 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
5359 if (!sctx
->cur_inode_new_gen
&&
5360 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
5361 if (result
== BTRFS_COMPARE_TREE_NEW
)
5362 ret
= record_new_ref(sctx
);
5363 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
5364 ret
= record_deleted_ref(sctx
);
5365 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
5366 ret
= record_changed_ref(sctx
);
5373 * Process new/deleted/changed xattrs. We skip processing in the
5374 * cur_inode_new_gen case because changed_inode did already initiate processing
5375 * of xattrs. The reason is the same as in changed_ref
5377 static int changed_xattr(struct send_ctx
*sctx
,
5378 enum btrfs_compare_tree_result result
)
5382 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
5384 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
5385 if (result
== BTRFS_COMPARE_TREE_NEW
)
5386 ret
= process_new_xattr(sctx
);
5387 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
5388 ret
= process_deleted_xattr(sctx
);
5389 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
5390 ret
= process_changed_xattr(sctx
);
5397 * Process new/deleted/changed extents. We skip processing in the
5398 * cur_inode_new_gen case because changed_inode did already initiate processing
5399 * of extents. The reason is the same as in changed_ref
5401 static int changed_extent(struct send_ctx
*sctx
,
5402 enum btrfs_compare_tree_result result
)
5406 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
5408 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
5409 if (result
!= BTRFS_COMPARE_TREE_DELETED
)
5410 ret
= process_extent(sctx
, sctx
->left_path
,
5417 static int dir_changed(struct send_ctx
*sctx
, u64 dir
)
5419 u64 orig_gen
, new_gen
;
5422 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &new_gen
, NULL
, NULL
,
5427 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &orig_gen
, NULL
,
5432 return (orig_gen
!= new_gen
) ? 1 : 0;
5435 static int compare_refs(struct send_ctx
*sctx
, struct btrfs_path
*path
,
5436 struct btrfs_key
*key
)
5438 struct btrfs_inode_extref
*extref
;
5439 struct extent_buffer
*leaf
;
5440 u64 dirid
= 0, last_dirid
= 0;
5447 /* Easy case, just check this one dirid */
5448 if (key
->type
== BTRFS_INODE_REF_KEY
) {
5449 dirid
= key
->offset
;
5451 ret
= dir_changed(sctx
, dirid
);
5455 leaf
= path
->nodes
[0];
5456 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
5457 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
5458 while (cur_offset
< item_size
) {
5459 extref
= (struct btrfs_inode_extref
*)(ptr
+
5461 dirid
= btrfs_inode_extref_parent(leaf
, extref
);
5462 ref_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
5463 cur_offset
+= ref_name_len
+ sizeof(*extref
);
5464 if (dirid
== last_dirid
)
5466 ret
= dir_changed(sctx
, dirid
);
5476 * Updates compare related fields in sctx and simply forwards to the actual
5477 * changed_xxx functions.
5479 static int changed_cb(struct btrfs_root
*left_root
,
5480 struct btrfs_root
*right_root
,
5481 struct btrfs_path
*left_path
,
5482 struct btrfs_path
*right_path
,
5483 struct btrfs_key
*key
,
5484 enum btrfs_compare_tree_result result
,
5488 struct send_ctx
*sctx
= ctx
;
5490 if (result
== BTRFS_COMPARE_TREE_SAME
) {
5491 if (key
->type
== BTRFS_INODE_REF_KEY
||
5492 key
->type
== BTRFS_INODE_EXTREF_KEY
) {
5493 ret
= compare_refs(sctx
, left_path
, key
);
5498 } else if (key
->type
== BTRFS_EXTENT_DATA_KEY
) {
5499 return maybe_send_hole(sctx
, left_path
, key
);
5503 result
= BTRFS_COMPARE_TREE_CHANGED
;
5507 sctx
->left_path
= left_path
;
5508 sctx
->right_path
= right_path
;
5509 sctx
->cmp_key
= key
;
5511 ret
= finish_inode_if_needed(sctx
, 0);
5515 /* Ignore non-FS objects */
5516 if (key
->objectid
== BTRFS_FREE_INO_OBJECTID
||
5517 key
->objectid
== BTRFS_FREE_SPACE_OBJECTID
)
5520 if (key
->type
== BTRFS_INODE_ITEM_KEY
)
5521 ret
= changed_inode(sctx
, result
);
5522 else if (key
->type
== BTRFS_INODE_REF_KEY
||
5523 key
->type
== BTRFS_INODE_EXTREF_KEY
)
5524 ret
= changed_ref(sctx
, result
);
5525 else if (key
->type
== BTRFS_XATTR_ITEM_KEY
)
5526 ret
= changed_xattr(sctx
, result
);
5527 else if (key
->type
== BTRFS_EXTENT_DATA_KEY
)
5528 ret
= changed_extent(sctx
, result
);
5534 static int full_send_tree(struct send_ctx
*sctx
)
5537 struct btrfs_root
*send_root
= sctx
->send_root
;
5538 struct btrfs_key key
;
5539 struct btrfs_key found_key
;
5540 struct btrfs_path
*path
;
5541 struct extent_buffer
*eb
;
5544 path
= alloc_path_for_send();
5548 key
.objectid
= BTRFS_FIRST_FREE_OBJECTID
;
5549 key
.type
= BTRFS_INODE_ITEM_KEY
;
5552 ret
= btrfs_search_slot_for_read(send_root
, &key
, path
, 1, 0);
5559 eb
= path
->nodes
[0];
5560 slot
= path
->slots
[0];
5561 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5563 ret
= changed_cb(send_root
, NULL
, path
, NULL
,
5564 &found_key
, BTRFS_COMPARE_TREE_NEW
, sctx
);
5568 key
.objectid
= found_key
.objectid
;
5569 key
.type
= found_key
.type
;
5570 key
.offset
= found_key
.offset
+ 1;
5572 ret
= btrfs_next_item(send_root
, path
);
5582 ret
= finish_inode_if_needed(sctx
, 1);
5585 btrfs_free_path(path
);
5589 static int send_subvol(struct send_ctx
*sctx
)
5593 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_STREAM_HEADER
)) {
5594 ret
= send_header(sctx
);
5599 ret
= send_subvol_begin(sctx
);
5603 if (sctx
->parent_root
) {
5604 ret
= btrfs_compare_trees(sctx
->send_root
, sctx
->parent_root
,
5608 ret
= finish_inode_if_needed(sctx
, 1);
5612 ret
= full_send_tree(sctx
);
5618 free_recorded_refs(sctx
);
5623 * If orphan cleanup did remove any orphans from a root, it means the tree
5624 * was modified and therefore the commit root is not the same as the current
5625 * root anymore. This is a problem, because send uses the commit root and
5626 * therefore can see inode items that don't exist in the current root anymore,
5627 * and for example make calls to btrfs_iget, which will do tree lookups based
5628 * on the current root and not on the commit root. Those lookups will fail,
5629 * returning a -ESTALE error, and making send fail with that error. So make
5630 * sure a send does not see any orphans we have just removed, and that it will
5631 * see the same inodes regardless of whether a transaction commit happened
5632 * before it started (meaning that the commit root will be the same as the
5633 * current root) or not.
5635 static int ensure_commit_roots_uptodate(struct send_ctx
*sctx
)
5638 struct btrfs_trans_handle
*trans
= NULL
;
5641 if (sctx
->parent_root
&&
5642 sctx
->parent_root
->node
!= sctx
->parent_root
->commit_root
)
5645 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
5646 if (sctx
->clone_roots
[i
].root
->node
!=
5647 sctx
->clone_roots
[i
].root
->commit_root
)
5651 return btrfs_end_transaction(trans
, sctx
->send_root
);
5656 /* Use any root, all fs roots will get their commit roots updated. */
5658 trans
= btrfs_join_transaction(sctx
->send_root
);
5660 return PTR_ERR(trans
);
5664 return btrfs_commit_transaction(trans
, sctx
->send_root
);
5667 static void btrfs_root_dec_send_in_progress(struct btrfs_root
* root
)
5669 spin_lock(&root
->root_item_lock
);
5670 root
->send_in_progress
--;
5672 * Not much left to do, we don't know why it's unbalanced and
5673 * can't blindly reset it to 0.
5675 if (root
->send_in_progress
< 0)
5676 btrfs_err(root
->fs_info
,
5677 "send_in_progres unbalanced %d root %llu",
5678 root
->send_in_progress
, root
->root_key
.objectid
);
5679 spin_unlock(&root
->root_item_lock
);
5682 long btrfs_ioctl_send(struct file
*mnt_file
, void __user
*arg_
)
5685 struct btrfs_root
*send_root
;
5686 struct btrfs_root
*clone_root
;
5687 struct btrfs_fs_info
*fs_info
;
5688 struct btrfs_ioctl_send_args
*arg
= NULL
;
5689 struct btrfs_key key
;
5690 struct send_ctx
*sctx
= NULL
;
5692 u64
*clone_sources_tmp
= NULL
;
5693 int clone_sources_to_rollback
= 0;
5694 int sort_clone_roots
= 0;
5697 if (!capable(CAP_SYS_ADMIN
))
5700 send_root
= BTRFS_I(file_inode(mnt_file
))->root
;
5701 fs_info
= send_root
->fs_info
;
5704 * The subvolume must remain read-only during send, protect against
5705 * making it RW. This also protects against deletion.
5707 spin_lock(&send_root
->root_item_lock
);
5708 send_root
->send_in_progress
++;
5709 spin_unlock(&send_root
->root_item_lock
);
5712 * This is done when we lookup the root, it should already be complete
5713 * by the time we get here.
5715 WARN_ON(send_root
->orphan_cleanup_state
!= ORPHAN_CLEANUP_DONE
);
5718 * Userspace tools do the checks and warn the user if it's
5721 if (!btrfs_root_readonly(send_root
)) {
5726 arg
= memdup_user(arg_
, sizeof(*arg
));
5733 if (!access_ok(VERIFY_READ
, arg
->clone_sources
,
5734 sizeof(*arg
->clone_sources
) *
5735 arg
->clone_sources_count
)) {
5740 if (arg
->flags
& ~BTRFS_SEND_FLAG_MASK
) {
5745 sctx
= kzalloc(sizeof(struct send_ctx
), GFP_NOFS
);
5751 INIT_LIST_HEAD(&sctx
->new_refs
);
5752 INIT_LIST_HEAD(&sctx
->deleted_refs
);
5753 INIT_RADIX_TREE(&sctx
->name_cache
, GFP_NOFS
);
5754 INIT_LIST_HEAD(&sctx
->name_cache_list
);
5756 sctx
->flags
= arg
->flags
;
5758 sctx
->send_filp
= fget(arg
->send_fd
);
5759 if (!sctx
->send_filp
) {
5764 sctx
->send_root
= send_root
;
5766 * Unlikely but possible, if the subvolume is marked for deletion but
5767 * is slow to remove the directory entry, send can still be started
5769 if (btrfs_root_dead(sctx
->send_root
)) {
5774 sctx
->clone_roots_cnt
= arg
->clone_sources_count
;
5776 sctx
->send_max_size
= BTRFS_SEND_BUF_SIZE
;
5777 sctx
->send_buf
= vmalloc(sctx
->send_max_size
);
5778 if (!sctx
->send_buf
) {
5783 sctx
->read_buf
= vmalloc(BTRFS_SEND_READ_SIZE
);
5784 if (!sctx
->read_buf
) {
5789 sctx
->pending_dir_moves
= RB_ROOT
;
5790 sctx
->waiting_dir_moves
= RB_ROOT
;
5791 sctx
->orphan_dirs
= RB_ROOT
;
5793 sctx
->clone_roots
= vzalloc(sizeof(struct clone_root
) *
5794 (arg
->clone_sources_count
+ 1));
5795 if (!sctx
->clone_roots
) {
5800 if (arg
->clone_sources_count
) {
5801 clone_sources_tmp
= vmalloc(arg
->clone_sources_count
*
5802 sizeof(*arg
->clone_sources
));
5803 if (!clone_sources_tmp
) {
5808 ret
= copy_from_user(clone_sources_tmp
, arg
->clone_sources
,
5809 arg
->clone_sources_count
*
5810 sizeof(*arg
->clone_sources
));
5816 for (i
= 0; i
< arg
->clone_sources_count
; i
++) {
5817 key
.objectid
= clone_sources_tmp
[i
];
5818 key
.type
= BTRFS_ROOT_ITEM_KEY
;
5819 key
.offset
= (u64
)-1;
5821 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
5823 clone_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
5824 if (IS_ERR(clone_root
)) {
5825 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5826 ret
= PTR_ERR(clone_root
);
5829 spin_lock(&clone_root
->root_item_lock
);
5830 if (!btrfs_root_readonly(clone_root
) ||
5831 btrfs_root_dead(clone_root
)) {
5832 spin_unlock(&clone_root
->root_item_lock
);
5833 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5837 clone_root
->send_in_progress
++;
5838 spin_unlock(&clone_root
->root_item_lock
);
5839 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5841 sctx
->clone_roots
[i
].root
= clone_root
;
5842 clone_sources_to_rollback
= i
+ 1;
5844 vfree(clone_sources_tmp
);
5845 clone_sources_tmp
= NULL
;
5848 if (arg
->parent_root
) {
5849 key
.objectid
= arg
->parent_root
;
5850 key
.type
= BTRFS_ROOT_ITEM_KEY
;
5851 key
.offset
= (u64
)-1;
5853 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
5855 sctx
->parent_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
5856 if (IS_ERR(sctx
->parent_root
)) {
5857 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5858 ret
= PTR_ERR(sctx
->parent_root
);
5862 spin_lock(&sctx
->parent_root
->root_item_lock
);
5863 sctx
->parent_root
->send_in_progress
++;
5864 if (!btrfs_root_readonly(sctx
->parent_root
) ||
5865 btrfs_root_dead(sctx
->parent_root
)) {
5866 spin_unlock(&sctx
->parent_root
->root_item_lock
);
5867 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5871 spin_unlock(&sctx
->parent_root
->root_item_lock
);
5873 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5877 * Clones from send_root are allowed, but only if the clone source
5878 * is behind the current send position. This is checked while searching
5879 * for possible clone sources.
5881 sctx
->clone_roots
[sctx
->clone_roots_cnt
++].root
= sctx
->send_root
;
5883 /* We do a bsearch later */
5884 sort(sctx
->clone_roots
, sctx
->clone_roots_cnt
,
5885 sizeof(*sctx
->clone_roots
), __clone_root_cmp_sort
,
5887 sort_clone_roots
= 1;
5889 ret
= ensure_commit_roots_uptodate(sctx
);
5893 current
->journal_info
= BTRFS_SEND_TRANS_STUB
;
5894 ret
= send_subvol(sctx
);
5895 current
->journal_info
= NULL
;
5899 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_END_CMD
)) {
5900 ret
= begin_cmd(sctx
, BTRFS_SEND_C_END
);
5903 ret
= send_cmd(sctx
);
5909 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
));
5910 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
)) {
5912 struct pending_dir_move
*pm
;
5914 n
= rb_first(&sctx
->pending_dir_moves
);
5915 pm
= rb_entry(n
, struct pending_dir_move
, node
);
5916 while (!list_empty(&pm
->list
)) {
5917 struct pending_dir_move
*pm2
;
5919 pm2
= list_first_entry(&pm
->list
,
5920 struct pending_dir_move
, list
);
5921 free_pending_move(sctx
, pm2
);
5923 free_pending_move(sctx
, pm
);
5926 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
));
5927 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
)) {
5929 struct waiting_dir_move
*dm
;
5931 n
= rb_first(&sctx
->waiting_dir_moves
);
5932 dm
= rb_entry(n
, struct waiting_dir_move
, node
);
5933 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
5937 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
));
5938 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
)) {
5940 struct orphan_dir_info
*odi
;
5942 n
= rb_first(&sctx
->orphan_dirs
);
5943 odi
= rb_entry(n
, struct orphan_dir_info
, node
);
5944 free_orphan_dir_info(sctx
, odi
);
5947 if (sort_clone_roots
) {
5948 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
5949 btrfs_root_dec_send_in_progress(
5950 sctx
->clone_roots
[i
].root
);
5952 for (i
= 0; sctx
&& i
< clone_sources_to_rollback
; i
++)
5953 btrfs_root_dec_send_in_progress(
5954 sctx
->clone_roots
[i
].root
);
5956 btrfs_root_dec_send_in_progress(send_root
);
5958 if (sctx
&& !IS_ERR_OR_NULL(sctx
->parent_root
))
5959 btrfs_root_dec_send_in_progress(sctx
->parent_root
);
5962 vfree(clone_sources_tmp
);
5965 if (sctx
->send_filp
)
5966 fput(sctx
->send_filp
);
5968 vfree(sctx
->clone_roots
);
5969 vfree(sctx
->send_buf
);
5970 vfree(sctx
->read_buf
);
5972 name_cache_free(sctx
);