2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
37 #include "compression.h"
39 static int g_verbose
= 0;
41 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
44 * A fs_path is a helper to dynamically build path names with unknown size.
45 * It reallocates the internal buffer on demand.
46 * It allows fast adding of path elements on the right side (normal path) and
47 * fast adding to the left side (reversed path). A reversed path can also be
48 * unreversed if needed.
57 unsigned short buf_len
:15;
58 unsigned short reversed
:1;
62 * Average path length does not exceed 200 bytes, we'll have
63 * better packing in the slab and higher chance to satisfy
64 * a allocation later during send.
69 #define FS_PATH_INLINE_SIZE \
70 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
73 /* reused for each extent */
75 struct btrfs_root
*root
;
82 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
83 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
86 struct file
*send_filp
;
92 u64 cmd_send_size
[BTRFS_SEND_C_MAX
+ 1];
93 u64 flags
; /* 'flags' member of btrfs_ioctl_send_args is u64 */
95 struct btrfs_root
*send_root
;
96 struct btrfs_root
*parent_root
;
97 struct clone_root
*clone_roots
;
100 /* current state of the compare_tree call */
101 struct btrfs_path
*left_path
;
102 struct btrfs_path
*right_path
;
103 struct btrfs_key
*cmp_key
;
106 * infos of the currently processed inode. In case of deleted inodes,
107 * these are the values from the deleted inode.
112 int cur_inode_new_gen
;
113 int cur_inode_deleted
;
117 u64 cur_inode_last_extent
;
121 struct list_head new_refs
;
122 struct list_head deleted_refs
;
124 struct radix_tree_root name_cache
;
125 struct list_head name_cache_list
;
128 struct file_ra_state ra
;
133 * We process inodes by their increasing order, so if before an
134 * incremental send we reverse the parent/child relationship of
135 * directories such that a directory with a lower inode number was
136 * the parent of a directory with a higher inode number, and the one
137 * becoming the new parent got renamed too, we can't rename/move the
138 * directory with lower inode number when we finish processing it - we
139 * must process the directory with higher inode number first, then
140 * rename/move it and then rename/move the directory with lower inode
141 * number. Example follows.
143 * Tree state when the first send was performed:
155 * Tree state when the second (incremental) send is performed:
164 * The sequence of steps that lead to the second state was:
166 * mv /a/b/c/d /a/b/c2/d2
167 * mv /a/b/c /a/b/c2/d2/cc
169 * "c" has lower inode number, but we can't move it (2nd mv operation)
170 * before we move "d", which has higher inode number.
172 * So we just memorize which move/rename operations must be performed
173 * later when their respective parent is processed and moved/renamed.
176 /* Indexed by parent directory inode number. */
177 struct rb_root pending_dir_moves
;
180 * Reverse index, indexed by the inode number of a directory that
181 * is waiting for the move/rename of its immediate parent before its
182 * own move/rename can be performed.
184 struct rb_root waiting_dir_moves
;
187 * A directory that is going to be rm'ed might have a child directory
188 * which is in the pending directory moves index above. In this case,
189 * the directory can only be removed after the move/rename of its child
190 * is performed. Example:
210 * Sequence of steps that lead to the send snapshot:
211 * rm -f /a/b/c/foo.txt
213 * mv /a/b/c/x /a/b/YY
216 * When the child is processed, its move/rename is delayed until its
217 * parent is processed (as explained above), but all other operations
218 * like update utimes, chown, chgrp, etc, are performed and the paths
219 * that it uses for those operations must use the orphanized name of
220 * its parent (the directory we're going to rm later), so we need to
221 * memorize that name.
223 * Indexed by the inode number of the directory to be deleted.
225 struct rb_root orphan_dirs
;
228 struct pending_dir_move
{
230 struct list_head list
;
234 struct list_head update_refs
;
237 struct waiting_dir_move
{
241 * There might be some directory that could not be removed because it
242 * was waiting for this directory inode to be moved first. Therefore
243 * after this directory is moved, we can try to rmdir the ino rmdir_ino.
249 struct orphan_dir_info
{
255 struct name_cache_entry
{
256 struct list_head list
;
258 * radix_tree has only 32bit entries but we need to handle 64bit inums.
259 * We use the lower 32bit of the 64bit inum to store it in the tree. If
260 * more then one inum would fall into the same entry, we use radix_list
261 * to store the additional entries. radix_list is also used to store
262 * entries where two entries have the same inum but different
265 struct list_head radix_list
;
271 int need_later_update
;
276 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
);
278 static struct waiting_dir_move
*
279 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
);
281 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
);
283 static int need_send_hole(struct send_ctx
*sctx
)
285 return (sctx
->parent_root
&& !sctx
->cur_inode_new
&&
286 !sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
&&
287 S_ISREG(sctx
->cur_inode_mode
));
290 static void fs_path_reset(struct fs_path
*p
)
293 p
->start
= p
->buf
+ p
->buf_len
- 1;
303 static struct fs_path
*fs_path_alloc(void)
307 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
311 p
->buf
= p
->inline_buf
;
312 p
->buf_len
= FS_PATH_INLINE_SIZE
;
317 static struct fs_path
*fs_path_alloc_reversed(void)
329 static void fs_path_free(struct fs_path
*p
)
333 if (p
->buf
!= p
->inline_buf
)
338 static int fs_path_len(struct fs_path
*p
)
340 return p
->end
- p
->start
;
343 static int fs_path_ensure_buf(struct fs_path
*p
, int len
)
351 if (p
->buf_len
>= len
)
354 if (len
> PATH_MAX
) {
359 path_len
= p
->end
- p
->start
;
360 old_buf_len
= p
->buf_len
;
363 * First time the inline_buf does not suffice
365 if (p
->buf
== p
->inline_buf
) {
366 tmp_buf
= kmalloc(len
, GFP_KERNEL
);
368 memcpy(tmp_buf
, p
->buf
, old_buf_len
);
370 tmp_buf
= krealloc(p
->buf
, len
, GFP_KERNEL
);
376 * The real size of the buffer is bigger, this will let the fast path
377 * happen most of the time
379 p
->buf_len
= ksize(p
->buf
);
382 tmp_buf
= p
->buf
+ old_buf_len
- path_len
- 1;
383 p
->end
= p
->buf
+ p
->buf_len
- 1;
384 p
->start
= p
->end
- path_len
;
385 memmove(p
->start
, tmp_buf
, path_len
+ 1);
388 p
->end
= p
->start
+ path_len
;
393 static int fs_path_prepare_for_add(struct fs_path
*p
, int name_len
,
399 new_len
= p
->end
- p
->start
+ name_len
;
400 if (p
->start
!= p
->end
)
402 ret
= fs_path_ensure_buf(p
, new_len
);
407 if (p
->start
!= p
->end
)
409 p
->start
-= name_len
;
410 *prepared
= p
->start
;
412 if (p
->start
!= p
->end
)
423 static int fs_path_add(struct fs_path
*p
, const char *name
, int name_len
)
428 ret
= fs_path_prepare_for_add(p
, name_len
, &prepared
);
431 memcpy(prepared
, name
, name_len
);
437 static int fs_path_add_path(struct fs_path
*p
, struct fs_path
*p2
)
442 ret
= fs_path_prepare_for_add(p
, p2
->end
- p2
->start
, &prepared
);
445 memcpy(prepared
, p2
->start
, p2
->end
- p2
->start
);
451 static int fs_path_add_from_extent_buffer(struct fs_path
*p
,
452 struct extent_buffer
*eb
,
453 unsigned long off
, int len
)
458 ret
= fs_path_prepare_for_add(p
, len
, &prepared
);
462 read_extent_buffer(eb
, prepared
, off
, len
);
468 static int fs_path_copy(struct fs_path
*p
, struct fs_path
*from
)
472 p
->reversed
= from
->reversed
;
475 ret
= fs_path_add_path(p
, from
);
481 static void fs_path_unreverse(struct fs_path
*p
)
490 len
= p
->end
- p
->start
;
492 p
->end
= p
->start
+ len
;
493 memmove(p
->start
, tmp
, len
+ 1);
497 static struct btrfs_path
*alloc_path_for_send(void)
499 struct btrfs_path
*path
;
501 path
= btrfs_alloc_path();
504 path
->search_commit_root
= 1;
505 path
->skip_locking
= 1;
506 path
->need_commit_sem
= 1;
510 static int write_buf(struct file
*filp
, const void *buf
, u32 len
, loff_t
*off
)
520 ret
= vfs_write(filp
, (__force
const char __user
*)buf
+ pos
,
522 /* TODO handle that correctly */
523 /*if (ret == -ERESTARTSYS) {
542 static int tlv_put(struct send_ctx
*sctx
, u16 attr
, const void *data
, int len
)
544 struct btrfs_tlv_header
*hdr
;
545 int total_len
= sizeof(*hdr
) + len
;
546 int left
= sctx
->send_max_size
- sctx
->send_size
;
548 if (unlikely(left
< total_len
))
551 hdr
= (struct btrfs_tlv_header
*) (sctx
->send_buf
+ sctx
->send_size
);
552 hdr
->tlv_type
= cpu_to_le16(attr
);
553 hdr
->tlv_len
= cpu_to_le16(len
);
554 memcpy(hdr
+ 1, data
, len
);
555 sctx
->send_size
+= total_len
;
560 #define TLV_PUT_DEFINE_INT(bits) \
561 static int tlv_put_u##bits(struct send_ctx *sctx, \
562 u##bits attr, u##bits value) \
564 __le##bits __tmp = cpu_to_le##bits(value); \
565 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
568 TLV_PUT_DEFINE_INT(64)
570 static int tlv_put_string(struct send_ctx
*sctx
, u16 attr
,
571 const char *str
, int len
)
575 return tlv_put(sctx
, attr
, str
, len
);
578 static int tlv_put_uuid(struct send_ctx
*sctx
, u16 attr
,
581 return tlv_put(sctx
, attr
, uuid
, BTRFS_UUID_SIZE
);
584 static int tlv_put_btrfs_timespec(struct send_ctx
*sctx
, u16 attr
,
585 struct extent_buffer
*eb
,
586 struct btrfs_timespec
*ts
)
588 struct btrfs_timespec bts
;
589 read_extent_buffer(eb
, &bts
, (unsigned long)ts
, sizeof(bts
));
590 return tlv_put(sctx
, attr
, &bts
, sizeof(bts
));
594 #define TLV_PUT(sctx, attrtype, attrlen, data) \
596 ret = tlv_put(sctx, attrtype, attrlen, data); \
598 goto tlv_put_failure; \
601 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
603 ret = tlv_put_u##bits(sctx, attrtype, value); \
605 goto tlv_put_failure; \
608 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
609 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
610 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
611 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
612 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
614 ret = tlv_put_string(sctx, attrtype, str, len); \
616 goto tlv_put_failure; \
618 #define TLV_PUT_PATH(sctx, attrtype, p) \
620 ret = tlv_put_string(sctx, attrtype, p->start, \
621 p->end - p->start); \
623 goto tlv_put_failure; \
625 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
627 ret = tlv_put_uuid(sctx, attrtype, uuid); \
629 goto tlv_put_failure; \
631 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
633 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
635 goto tlv_put_failure; \
638 static int send_header(struct send_ctx
*sctx
)
640 struct btrfs_stream_header hdr
;
642 strcpy(hdr
.magic
, BTRFS_SEND_STREAM_MAGIC
);
643 hdr
.version
= cpu_to_le32(BTRFS_SEND_STREAM_VERSION
);
645 return write_buf(sctx
->send_filp
, &hdr
, sizeof(hdr
),
650 * For each command/item we want to send to userspace, we call this function.
652 static int begin_cmd(struct send_ctx
*sctx
, int cmd
)
654 struct btrfs_cmd_header
*hdr
;
656 if (WARN_ON(!sctx
->send_buf
))
659 BUG_ON(sctx
->send_size
);
661 sctx
->send_size
+= sizeof(*hdr
);
662 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
663 hdr
->cmd
= cpu_to_le16(cmd
);
668 static int send_cmd(struct send_ctx
*sctx
)
671 struct btrfs_cmd_header
*hdr
;
674 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
675 hdr
->len
= cpu_to_le32(sctx
->send_size
- sizeof(*hdr
));
678 crc
= btrfs_crc32c(0, (unsigned char *)sctx
->send_buf
, sctx
->send_size
);
679 hdr
->crc
= cpu_to_le32(crc
);
681 ret
= write_buf(sctx
->send_filp
, sctx
->send_buf
, sctx
->send_size
,
684 sctx
->total_send_size
+= sctx
->send_size
;
685 sctx
->cmd_send_size
[le16_to_cpu(hdr
->cmd
)] += sctx
->send_size
;
692 * Sends a move instruction to user space
694 static int send_rename(struct send_ctx
*sctx
,
695 struct fs_path
*from
, struct fs_path
*to
)
699 verbose_printk("btrfs: send_rename %s -> %s\n", from
->start
, to
->start
);
701 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RENAME
);
705 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, from
);
706 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_TO
, to
);
708 ret
= send_cmd(sctx
);
716 * Sends a link instruction to user space
718 static int send_link(struct send_ctx
*sctx
,
719 struct fs_path
*path
, struct fs_path
*lnk
)
723 verbose_printk("btrfs: send_link %s -> %s\n", path
->start
, lnk
->start
);
725 ret
= begin_cmd(sctx
, BTRFS_SEND_C_LINK
);
729 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
730 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, lnk
);
732 ret
= send_cmd(sctx
);
740 * Sends an unlink instruction to user space
742 static int send_unlink(struct send_ctx
*sctx
, struct fs_path
*path
)
746 verbose_printk("btrfs: send_unlink %s\n", path
->start
);
748 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UNLINK
);
752 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
754 ret
= send_cmd(sctx
);
762 * Sends a rmdir instruction to user space
764 static int send_rmdir(struct send_ctx
*sctx
, struct fs_path
*path
)
768 verbose_printk("btrfs: send_rmdir %s\n", path
->start
);
770 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RMDIR
);
774 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
776 ret
= send_cmd(sctx
);
784 * Helper function to retrieve some fields from an inode item.
786 static int __get_inode_info(struct btrfs_root
*root
, struct btrfs_path
*path
,
787 u64 ino
, u64
*size
, u64
*gen
, u64
*mode
, u64
*uid
,
791 struct btrfs_inode_item
*ii
;
792 struct btrfs_key key
;
795 key
.type
= BTRFS_INODE_ITEM_KEY
;
797 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
804 ii
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
805 struct btrfs_inode_item
);
807 *size
= btrfs_inode_size(path
->nodes
[0], ii
);
809 *gen
= btrfs_inode_generation(path
->nodes
[0], ii
);
811 *mode
= btrfs_inode_mode(path
->nodes
[0], ii
);
813 *uid
= btrfs_inode_uid(path
->nodes
[0], ii
);
815 *gid
= btrfs_inode_gid(path
->nodes
[0], ii
);
817 *rdev
= btrfs_inode_rdev(path
->nodes
[0], ii
);
822 static int get_inode_info(struct btrfs_root
*root
,
823 u64 ino
, u64
*size
, u64
*gen
,
824 u64
*mode
, u64
*uid
, u64
*gid
,
827 struct btrfs_path
*path
;
830 path
= alloc_path_for_send();
833 ret
= __get_inode_info(root
, path
, ino
, size
, gen
, mode
, uid
, gid
,
835 btrfs_free_path(path
);
839 typedef int (*iterate_inode_ref_t
)(int num
, u64 dir
, int index
,
844 * Helper function to iterate the entries in ONE btrfs_inode_ref or
845 * btrfs_inode_extref.
846 * The iterate callback may return a non zero value to stop iteration. This can
847 * be a negative value for error codes or 1 to simply stop it.
849 * path must point to the INODE_REF or INODE_EXTREF when called.
851 static int iterate_inode_ref(struct btrfs_root
*root
, struct btrfs_path
*path
,
852 struct btrfs_key
*found_key
, int resolve
,
853 iterate_inode_ref_t iterate
, void *ctx
)
855 struct extent_buffer
*eb
= path
->nodes
[0];
856 struct btrfs_item
*item
;
857 struct btrfs_inode_ref
*iref
;
858 struct btrfs_inode_extref
*extref
;
859 struct btrfs_path
*tmp_path
;
863 int slot
= path
->slots
[0];
870 unsigned long name_off
;
871 unsigned long elem_size
;
874 p
= fs_path_alloc_reversed();
878 tmp_path
= alloc_path_for_send();
885 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
886 ptr
= (unsigned long)btrfs_item_ptr(eb
, slot
,
887 struct btrfs_inode_ref
);
888 item
= btrfs_item_nr(slot
);
889 total
= btrfs_item_size(eb
, item
);
890 elem_size
= sizeof(*iref
);
892 ptr
= btrfs_item_ptr_offset(eb
, slot
);
893 total
= btrfs_item_size_nr(eb
, slot
);
894 elem_size
= sizeof(*extref
);
897 while (cur
< total
) {
900 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
901 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur
);
902 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
903 name_off
= (unsigned long)(iref
+ 1);
904 index
= btrfs_inode_ref_index(eb
, iref
);
905 dir
= found_key
->offset
;
907 extref
= (struct btrfs_inode_extref
*)(ptr
+ cur
);
908 name_len
= btrfs_inode_extref_name_len(eb
, extref
);
909 name_off
= (unsigned long)&extref
->name
;
910 index
= btrfs_inode_extref_index(eb
, extref
);
911 dir
= btrfs_inode_extref_parent(eb
, extref
);
915 start
= btrfs_ref_to_path(root
, tmp_path
, name_len
,
919 ret
= PTR_ERR(start
);
922 if (start
< p
->buf
) {
923 /* overflow , try again with larger buffer */
924 ret
= fs_path_ensure_buf(p
,
925 p
->buf_len
+ p
->buf
- start
);
928 start
= btrfs_ref_to_path(root
, tmp_path
,
933 ret
= PTR_ERR(start
);
936 BUG_ON(start
< p
->buf
);
940 ret
= fs_path_add_from_extent_buffer(p
, eb
, name_off
,
946 cur
+= elem_size
+ name_len
;
947 ret
= iterate(num
, dir
, index
, p
, ctx
);
954 btrfs_free_path(tmp_path
);
959 typedef int (*iterate_dir_item_t
)(int num
, struct btrfs_key
*di_key
,
960 const char *name
, int name_len
,
961 const char *data
, int data_len
,
965 * Helper function to iterate the entries in ONE btrfs_dir_item.
966 * The iterate callback may return a non zero value to stop iteration. This can
967 * be a negative value for error codes or 1 to simply stop it.
969 * path must point to the dir item when called.
971 static int iterate_dir_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
972 struct btrfs_key
*found_key
,
973 iterate_dir_item_t iterate
, void *ctx
)
976 struct extent_buffer
*eb
;
977 struct btrfs_item
*item
;
978 struct btrfs_dir_item
*di
;
979 struct btrfs_key di_key
;
992 * Start with a small buffer (1 page). If later we end up needing more
993 * space, which can happen for xattrs on a fs with a leaf size greater
994 * then the page size, attempt to increase the buffer. Typically xattr
998 buf
= kmalloc(buf_len
, GFP_KERNEL
);
1004 eb
= path
->nodes
[0];
1005 slot
= path
->slots
[0];
1006 item
= btrfs_item_nr(slot
);
1007 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
1010 total
= btrfs_item_size(eb
, item
);
1013 while (cur
< total
) {
1014 name_len
= btrfs_dir_name_len(eb
, di
);
1015 data_len
= btrfs_dir_data_len(eb
, di
);
1016 type
= btrfs_dir_type(eb
, di
);
1017 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
1019 if (type
== BTRFS_FT_XATTR
) {
1020 if (name_len
> XATTR_NAME_MAX
) {
1021 ret
= -ENAMETOOLONG
;
1024 if (name_len
+ data_len
> BTRFS_MAX_XATTR_SIZE(root
)) {
1032 if (name_len
+ data_len
> PATH_MAX
) {
1033 ret
= -ENAMETOOLONG
;
1038 if (name_len
+ data_len
> buf_len
) {
1039 buf_len
= name_len
+ data_len
;
1040 if (is_vmalloc_addr(buf
)) {
1044 char *tmp
= krealloc(buf
, buf_len
,
1045 GFP_KERNEL
| __GFP_NOWARN
);
1052 buf
= vmalloc(buf_len
);
1060 read_extent_buffer(eb
, buf
, (unsigned long)(di
+ 1),
1061 name_len
+ data_len
);
1063 len
= sizeof(*di
) + name_len
+ data_len
;
1064 di
= (struct btrfs_dir_item
*)((char *)di
+ len
);
1067 ret
= iterate(num
, &di_key
, buf
, name_len
, buf
+ name_len
,
1068 data_len
, type
, ctx
);
1084 static int __copy_first_ref(int num
, u64 dir
, int index
,
1085 struct fs_path
*p
, void *ctx
)
1088 struct fs_path
*pt
= ctx
;
1090 ret
= fs_path_copy(pt
, p
);
1094 /* we want the first only */
1099 * Retrieve the first path of an inode. If an inode has more then one
1100 * ref/hardlink, this is ignored.
1102 static int get_inode_path(struct btrfs_root
*root
,
1103 u64 ino
, struct fs_path
*path
)
1106 struct btrfs_key key
, found_key
;
1107 struct btrfs_path
*p
;
1109 p
= alloc_path_for_send();
1113 fs_path_reset(path
);
1116 key
.type
= BTRFS_INODE_REF_KEY
;
1119 ret
= btrfs_search_slot_for_read(root
, &key
, p
, 1, 0);
1126 btrfs_item_key_to_cpu(p
->nodes
[0], &found_key
, p
->slots
[0]);
1127 if (found_key
.objectid
!= ino
||
1128 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1129 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1134 ret
= iterate_inode_ref(root
, p
, &found_key
, 1,
1135 __copy_first_ref
, path
);
1145 struct backref_ctx
{
1146 struct send_ctx
*sctx
;
1148 struct btrfs_path
*path
;
1149 /* number of total found references */
1153 * used for clones found in send_root. clones found behind cur_objectid
1154 * and cur_offset are not considered as allowed clones.
1159 /* may be truncated in case it's the last extent in a file */
1162 /* data offset in the file extent item */
1165 /* Just to check for bugs in backref resolving */
1169 static int __clone_root_cmp_bsearch(const void *key
, const void *elt
)
1171 u64 root
= (u64
)(uintptr_t)key
;
1172 struct clone_root
*cr
= (struct clone_root
*)elt
;
1174 if (root
< cr
->root
->objectid
)
1176 if (root
> cr
->root
->objectid
)
1181 static int __clone_root_cmp_sort(const void *e1
, const void *e2
)
1183 struct clone_root
*cr1
= (struct clone_root
*)e1
;
1184 struct clone_root
*cr2
= (struct clone_root
*)e2
;
1186 if (cr1
->root
->objectid
< cr2
->root
->objectid
)
1188 if (cr1
->root
->objectid
> cr2
->root
->objectid
)
1194 * Called for every backref that is found for the current extent.
1195 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1197 static int __iterate_backrefs(u64 ino
, u64 offset
, u64 root
, void *ctx_
)
1199 struct backref_ctx
*bctx
= ctx_
;
1200 struct clone_root
*found
;
1204 /* First check if the root is in the list of accepted clone sources */
1205 found
= bsearch((void *)(uintptr_t)root
, bctx
->sctx
->clone_roots
,
1206 bctx
->sctx
->clone_roots_cnt
,
1207 sizeof(struct clone_root
),
1208 __clone_root_cmp_bsearch
);
1212 if (found
->root
== bctx
->sctx
->send_root
&&
1213 ino
== bctx
->cur_objectid
&&
1214 offset
== bctx
->cur_offset
) {
1215 bctx
->found_itself
= 1;
1219 * There are inodes that have extents that lie behind its i_size. Don't
1220 * accept clones from these extents.
1222 ret
= __get_inode_info(found
->root
, bctx
->path
, ino
, &i_size
, NULL
, NULL
,
1224 btrfs_release_path(bctx
->path
);
1228 if (offset
+ bctx
->data_offset
+ bctx
->extent_len
> i_size
)
1232 * Make sure we don't consider clones from send_root that are
1233 * behind the current inode/offset.
1235 if (found
->root
== bctx
->sctx
->send_root
) {
1237 * TODO for the moment we don't accept clones from the inode
1238 * that is currently send. We may change this when
1239 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1242 if (ino
>= bctx
->cur_objectid
)
1245 if (ino
> bctx
->cur_objectid
)
1247 if (offset
+ bctx
->extent_len
> bctx
->cur_offset
)
1253 found
->found_refs
++;
1254 if (ino
< found
->ino
) {
1256 found
->offset
= offset
;
1257 } else if (found
->ino
== ino
) {
1259 * same extent found more then once in the same file.
1261 if (found
->offset
> offset
+ bctx
->extent_len
)
1262 found
->offset
= offset
;
1269 * Given an inode, offset and extent item, it finds a good clone for a clone
1270 * instruction. Returns -ENOENT when none could be found. The function makes
1271 * sure that the returned clone is usable at the point where sending is at the
1272 * moment. This means, that no clones are accepted which lie behind the current
1275 * path must point to the extent item when called.
1277 static int find_extent_clone(struct send_ctx
*sctx
,
1278 struct btrfs_path
*path
,
1279 u64 ino
, u64 data_offset
,
1281 struct clone_root
**found
)
1288 u64 extent_item_pos
;
1290 struct btrfs_file_extent_item
*fi
;
1291 struct extent_buffer
*eb
= path
->nodes
[0];
1292 struct backref_ctx
*backref_ctx
= NULL
;
1293 struct clone_root
*cur_clone_root
;
1294 struct btrfs_key found_key
;
1295 struct btrfs_path
*tmp_path
;
1299 tmp_path
= alloc_path_for_send();
1303 /* We only use this path under the commit sem */
1304 tmp_path
->need_commit_sem
= 0;
1306 backref_ctx
= kmalloc(sizeof(*backref_ctx
), GFP_KERNEL
);
1312 backref_ctx
->path
= tmp_path
;
1314 if (data_offset
>= ino_size
) {
1316 * There may be extents that lie behind the file's size.
1317 * I at least had this in combination with snapshotting while
1318 * writing large files.
1324 fi
= btrfs_item_ptr(eb
, path
->slots
[0],
1325 struct btrfs_file_extent_item
);
1326 extent_type
= btrfs_file_extent_type(eb
, fi
);
1327 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
1331 compressed
= btrfs_file_extent_compression(eb
, fi
);
1333 num_bytes
= btrfs_file_extent_num_bytes(eb
, fi
);
1334 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1335 if (disk_byte
== 0) {
1339 logical
= disk_byte
+ btrfs_file_extent_offset(eb
, fi
);
1341 down_read(&sctx
->send_root
->fs_info
->commit_root_sem
);
1342 ret
= extent_from_logical(sctx
->send_root
->fs_info
, disk_byte
, tmp_path
,
1343 &found_key
, &flags
);
1344 up_read(&sctx
->send_root
->fs_info
->commit_root_sem
);
1345 btrfs_release_path(tmp_path
);
1349 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1355 * Setup the clone roots.
1357 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1358 cur_clone_root
= sctx
->clone_roots
+ i
;
1359 cur_clone_root
->ino
= (u64
)-1;
1360 cur_clone_root
->offset
= 0;
1361 cur_clone_root
->found_refs
= 0;
1364 backref_ctx
->sctx
= sctx
;
1365 backref_ctx
->found
= 0;
1366 backref_ctx
->cur_objectid
= ino
;
1367 backref_ctx
->cur_offset
= data_offset
;
1368 backref_ctx
->found_itself
= 0;
1369 backref_ctx
->extent_len
= num_bytes
;
1371 * For non-compressed extents iterate_extent_inodes() gives us extent
1372 * offsets that already take into account the data offset, but not for
1373 * compressed extents, since the offset is logical and not relative to
1374 * the physical extent locations. We must take this into account to
1375 * avoid sending clone offsets that go beyond the source file's size,
1376 * which would result in the clone ioctl failing with -EINVAL on the
1379 if (compressed
== BTRFS_COMPRESS_NONE
)
1380 backref_ctx
->data_offset
= 0;
1382 backref_ctx
->data_offset
= btrfs_file_extent_offset(eb
, fi
);
1385 * The last extent of a file may be too large due to page alignment.
1386 * We need to adjust extent_len in this case so that the checks in
1387 * __iterate_backrefs work.
1389 if (data_offset
+ num_bytes
>= ino_size
)
1390 backref_ctx
->extent_len
= ino_size
- data_offset
;
1393 * Now collect all backrefs.
1395 if (compressed
== BTRFS_COMPRESS_NONE
)
1396 extent_item_pos
= logical
- found_key
.objectid
;
1398 extent_item_pos
= 0;
1399 ret
= iterate_extent_inodes(sctx
->send_root
->fs_info
,
1400 found_key
.objectid
, extent_item_pos
, 1,
1401 __iterate_backrefs
, backref_ctx
);
1406 if (!backref_ctx
->found_itself
) {
1407 /* found a bug in backref code? */
1409 btrfs_err(sctx
->send_root
->fs_info
, "did not find backref in "
1410 "send_root. inode=%llu, offset=%llu, "
1411 "disk_byte=%llu found extent=%llu",
1412 ino
, data_offset
, disk_byte
, found_key
.objectid
);
1416 verbose_printk(KERN_DEBUG
"btrfs: find_extent_clone: data_offset=%llu, "
1418 "num_bytes=%llu, logical=%llu\n",
1419 data_offset
, ino
, num_bytes
, logical
);
1421 if (!backref_ctx
->found
)
1422 verbose_printk("btrfs: no clones found\n");
1424 cur_clone_root
= NULL
;
1425 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1426 if (sctx
->clone_roots
[i
].found_refs
) {
1427 if (!cur_clone_root
)
1428 cur_clone_root
= sctx
->clone_roots
+ i
;
1429 else if (sctx
->clone_roots
[i
].root
== sctx
->send_root
)
1430 /* prefer clones from send_root over others */
1431 cur_clone_root
= sctx
->clone_roots
+ i
;
1436 if (cur_clone_root
) {
1437 *found
= cur_clone_root
;
1444 btrfs_free_path(tmp_path
);
1449 static int read_symlink(struct btrfs_root
*root
,
1451 struct fs_path
*dest
)
1454 struct btrfs_path
*path
;
1455 struct btrfs_key key
;
1456 struct btrfs_file_extent_item
*ei
;
1462 path
= alloc_path_for_send();
1467 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1469 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1474 * An empty symlink inode. Can happen in rare error paths when
1475 * creating a symlink (transaction committed before the inode
1476 * eviction handler removed the symlink inode items and a crash
1477 * happened in between or the subvol was snapshoted in between).
1478 * Print an informative message to dmesg/syslog so that the user
1479 * can delete the symlink.
1481 btrfs_err(root
->fs_info
,
1482 "Found empty symlink inode %llu at root %llu",
1483 ino
, root
->root_key
.objectid
);
1488 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1489 struct btrfs_file_extent_item
);
1490 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
1491 compression
= btrfs_file_extent_compression(path
->nodes
[0], ei
);
1492 BUG_ON(type
!= BTRFS_FILE_EXTENT_INLINE
);
1493 BUG_ON(compression
);
1495 off
= btrfs_file_extent_inline_start(ei
);
1496 len
= btrfs_file_extent_inline_len(path
->nodes
[0], path
->slots
[0], ei
);
1498 ret
= fs_path_add_from_extent_buffer(dest
, path
->nodes
[0], off
, len
);
1501 btrfs_free_path(path
);
1506 * Helper function to generate a file name that is unique in the root of
1507 * send_root and parent_root. This is used to generate names for orphan inodes.
1509 static int gen_unique_name(struct send_ctx
*sctx
,
1511 struct fs_path
*dest
)
1514 struct btrfs_path
*path
;
1515 struct btrfs_dir_item
*di
;
1520 path
= alloc_path_for_send();
1525 len
= snprintf(tmp
, sizeof(tmp
), "o%llu-%llu-%llu",
1527 ASSERT(len
< sizeof(tmp
));
1529 di
= btrfs_lookup_dir_item(NULL
, sctx
->send_root
,
1530 path
, BTRFS_FIRST_FREE_OBJECTID
,
1531 tmp
, strlen(tmp
), 0);
1532 btrfs_release_path(path
);
1538 /* not unique, try again */
1543 if (!sctx
->parent_root
) {
1549 di
= btrfs_lookup_dir_item(NULL
, sctx
->parent_root
,
1550 path
, BTRFS_FIRST_FREE_OBJECTID
,
1551 tmp
, strlen(tmp
), 0);
1552 btrfs_release_path(path
);
1558 /* not unique, try again */
1566 ret
= fs_path_add(dest
, tmp
, strlen(tmp
));
1569 btrfs_free_path(path
);
1574 inode_state_no_change
,
1575 inode_state_will_create
,
1576 inode_state_did_create
,
1577 inode_state_will_delete
,
1578 inode_state_did_delete
,
1581 static int get_cur_inode_state(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1589 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &left_gen
, NULL
, NULL
,
1591 if (ret
< 0 && ret
!= -ENOENT
)
1595 if (!sctx
->parent_root
) {
1596 right_ret
= -ENOENT
;
1598 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
, &right_gen
,
1599 NULL
, NULL
, NULL
, NULL
);
1600 if (ret
< 0 && ret
!= -ENOENT
)
1605 if (!left_ret
&& !right_ret
) {
1606 if (left_gen
== gen
&& right_gen
== gen
) {
1607 ret
= inode_state_no_change
;
1608 } else if (left_gen
== gen
) {
1609 if (ino
< sctx
->send_progress
)
1610 ret
= inode_state_did_create
;
1612 ret
= inode_state_will_create
;
1613 } else if (right_gen
== gen
) {
1614 if (ino
< sctx
->send_progress
)
1615 ret
= inode_state_did_delete
;
1617 ret
= inode_state_will_delete
;
1621 } else if (!left_ret
) {
1622 if (left_gen
== gen
) {
1623 if (ino
< sctx
->send_progress
)
1624 ret
= inode_state_did_create
;
1626 ret
= inode_state_will_create
;
1630 } else if (!right_ret
) {
1631 if (right_gen
== gen
) {
1632 if (ino
< sctx
->send_progress
)
1633 ret
= inode_state_did_delete
;
1635 ret
= inode_state_will_delete
;
1647 static int is_inode_existent(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1651 ret
= get_cur_inode_state(sctx
, ino
, gen
);
1655 if (ret
== inode_state_no_change
||
1656 ret
== inode_state_did_create
||
1657 ret
== inode_state_will_delete
)
1667 * Helper function to lookup a dir item in a dir.
1669 static int lookup_dir_item_inode(struct btrfs_root
*root
,
1670 u64 dir
, const char *name
, int name_len
,
1675 struct btrfs_dir_item
*di
;
1676 struct btrfs_key key
;
1677 struct btrfs_path
*path
;
1679 path
= alloc_path_for_send();
1683 di
= btrfs_lookup_dir_item(NULL
, root
, path
,
1684 dir
, name
, name_len
, 0);
1693 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
1694 if (key
.type
== BTRFS_ROOT_ITEM_KEY
) {
1698 *found_inode
= key
.objectid
;
1699 *found_type
= btrfs_dir_type(path
->nodes
[0], di
);
1702 btrfs_free_path(path
);
1707 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1708 * generation of the parent dir and the name of the dir entry.
1710 static int get_first_ref(struct btrfs_root
*root
, u64 ino
,
1711 u64
*dir
, u64
*dir_gen
, struct fs_path
*name
)
1714 struct btrfs_key key
;
1715 struct btrfs_key found_key
;
1716 struct btrfs_path
*path
;
1720 path
= alloc_path_for_send();
1725 key
.type
= BTRFS_INODE_REF_KEY
;
1728 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 1, 0);
1732 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1734 if (ret
|| found_key
.objectid
!= ino
||
1735 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1736 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1741 if (found_key
.type
== BTRFS_INODE_REF_KEY
) {
1742 struct btrfs_inode_ref
*iref
;
1743 iref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1744 struct btrfs_inode_ref
);
1745 len
= btrfs_inode_ref_name_len(path
->nodes
[0], iref
);
1746 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1747 (unsigned long)(iref
+ 1),
1749 parent_dir
= found_key
.offset
;
1751 struct btrfs_inode_extref
*extref
;
1752 extref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1753 struct btrfs_inode_extref
);
1754 len
= btrfs_inode_extref_name_len(path
->nodes
[0], extref
);
1755 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1756 (unsigned long)&extref
->name
, len
);
1757 parent_dir
= btrfs_inode_extref_parent(path
->nodes
[0], extref
);
1761 btrfs_release_path(path
);
1764 ret
= get_inode_info(root
, parent_dir
, NULL
, dir_gen
, NULL
,
1773 btrfs_free_path(path
);
1777 static int is_first_ref(struct btrfs_root
*root
,
1779 const char *name
, int name_len
)
1782 struct fs_path
*tmp_name
;
1785 tmp_name
= fs_path_alloc();
1789 ret
= get_first_ref(root
, ino
, &tmp_dir
, NULL
, tmp_name
);
1793 if (dir
!= tmp_dir
|| name_len
!= fs_path_len(tmp_name
)) {
1798 ret
= !memcmp(tmp_name
->start
, name
, name_len
);
1801 fs_path_free(tmp_name
);
1806 * Used by process_recorded_refs to determine if a new ref would overwrite an
1807 * already existing ref. In case it detects an overwrite, it returns the
1808 * inode/gen in who_ino/who_gen.
1809 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1810 * to make sure later references to the overwritten inode are possible.
1811 * Orphanizing is however only required for the first ref of an inode.
1812 * process_recorded_refs does an additional is_first_ref check to see if
1813 * orphanizing is really required.
1815 static int will_overwrite_ref(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
1816 const char *name
, int name_len
,
1817 u64
*who_ino
, u64
*who_gen
)
1821 u64 other_inode
= 0;
1824 if (!sctx
->parent_root
)
1827 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1832 * If we have a parent root we need to verify that the parent dir was
1833 * not deleted and then re-created, if it was then we have no overwrite
1834 * and we can just unlink this entry.
1836 if (sctx
->parent_root
) {
1837 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &gen
, NULL
,
1839 if (ret
< 0 && ret
!= -ENOENT
)
1849 ret
= lookup_dir_item_inode(sctx
->parent_root
, dir
, name
, name_len
,
1850 &other_inode
, &other_type
);
1851 if (ret
< 0 && ret
!= -ENOENT
)
1859 * Check if the overwritten ref was already processed. If yes, the ref
1860 * was already unlinked/moved, so we can safely assume that we will not
1861 * overwrite anything at this point in time.
1863 if (other_inode
> sctx
->send_progress
||
1864 is_waiting_for_move(sctx
, other_inode
)) {
1865 ret
= get_inode_info(sctx
->parent_root
, other_inode
, NULL
,
1866 who_gen
, NULL
, NULL
, NULL
, NULL
);
1871 *who_ino
= other_inode
;
1881 * Checks if the ref was overwritten by an already processed inode. This is
1882 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1883 * thus the orphan name needs be used.
1884 * process_recorded_refs also uses it to avoid unlinking of refs that were
1887 static int did_overwrite_ref(struct send_ctx
*sctx
,
1888 u64 dir
, u64 dir_gen
,
1889 u64 ino
, u64 ino_gen
,
1890 const char *name
, int name_len
)
1897 if (!sctx
->parent_root
)
1900 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1904 /* check if the ref was overwritten by another ref */
1905 ret
= lookup_dir_item_inode(sctx
->send_root
, dir
, name
, name_len
,
1906 &ow_inode
, &other_type
);
1907 if (ret
< 0 && ret
!= -ENOENT
)
1910 /* was never and will never be overwritten */
1915 ret
= get_inode_info(sctx
->send_root
, ow_inode
, NULL
, &gen
, NULL
, NULL
,
1920 if (ow_inode
== ino
&& gen
== ino_gen
) {
1926 * We know that it is or will be overwritten. Check this now.
1927 * The current inode being processed might have been the one that caused
1928 * inode 'ino' to be orphanized, therefore check if ow_inode matches
1929 * the current inode being processed.
1931 if ((ow_inode
< sctx
->send_progress
) ||
1932 (ino
!= sctx
->cur_ino
&& ow_inode
== sctx
->cur_ino
&&
1933 gen
== sctx
->cur_inode_gen
))
1943 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1944 * that got overwritten. This is used by process_recorded_refs to determine
1945 * if it has to use the path as returned by get_cur_path or the orphan name.
1947 static int did_overwrite_first_ref(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1950 struct fs_path
*name
= NULL
;
1954 if (!sctx
->parent_root
)
1957 name
= fs_path_alloc();
1961 ret
= get_first_ref(sctx
->parent_root
, ino
, &dir
, &dir_gen
, name
);
1965 ret
= did_overwrite_ref(sctx
, dir
, dir_gen
, ino
, gen
,
1966 name
->start
, fs_path_len(name
));
1974 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1975 * so we need to do some special handling in case we have clashes. This function
1976 * takes care of this with the help of name_cache_entry::radix_list.
1977 * In case of error, nce is kfreed.
1979 static int name_cache_insert(struct send_ctx
*sctx
,
1980 struct name_cache_entry
*nce
)
1983 struct list_head
*nce_head
;
1985 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
1986 (unsigned long)nce
->ino
);
1988 nce_head
= kmalloc(sizeof(*nce_head
), GFP_KERNEL
);
1993 INIT_LIST_HEAD(nce_head
);
1995 ret
= radix_tree_insert(&sctx
->name_cache
, nce
->ino
, nce_head
);
2002 list_add_tail(&nce
->radix_list
, nce_head
);
2003 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
2004 sctx
->name_cache_size
++;
2009 static void name_cache_delete(struct send_ctx
*sctx
,
2010 struct name_cache_entry
*nce
)
2012 struct list_head
*nce_head
;
2014 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
2015 (unsigned long)nce
->ino
);
2017 btrfs_err(sctx
->send_root
->fs_info
,
2018 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
2019 nce
->ino
, sctx
->name_cache_size
);
2022 list_del(&nce
->radix_list
);
2023 list_del(&nce
->list
);
2024 sctx
->name_cache_size
--;
2027 * We may not get to the final release of nce_head if the lookup fails
2029 if (nce_head
&& list_empty(nce_head
)) {
2030 radix_tree_delete(&sctx
->name_cache
, (unsigned long)nce
->ino
);
2035 static struct name_cache_entry
*name_cache_search(struct send_ctx
*sctx
,
2038 struct list_head
*nce_head
;
2039 struct name_cache_entry
*cur
;
2041 nce_head
= radix_tree_lookup(&sctx
->name_cache
, (unsigned long)ino
);
2045 list_for_each_entry(cur
, nce_head
, radix_list
) {
2046 if (cur
->ino
== ino
&& cur
->gen
== gen
)
2053 * Removes the entry from the list and adds it back to the end. This marks the
2054 * entry as recently used so that name_cache_clean_unused does not remove it.
2056 static void name_cache_used(struct send_ctx
*sctx
, struct name_cache_entry
*nce
)
2058 list_del(&nce
->list
);
2059 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
2063 * Remove some entries from the beginning of name_cache_list.
2065 static void name_cache_clean_unused(struct send_ctx
*sctx
)
2067 struct name_cache_entry
*nce
;
2069 if (sctx
->name_cache_size
< SEND_CTX_NAME_CACHE_CLEAN_SIZE
)
2072 while (sctx
->name_cache_size
> SEND_CTX_MAX_NAME_CACHE_SIZE
) {
2073 nce
= list_entry(sctx
->name_cache_list
.next
,
2074 struct name_cache_entry
, list
);
2075 name_cache_delete(sctx
, nce
);
2080 static void name_cache_free(struct send_ctx
*sctx
)
2082 struct name_cache_entry
*nce
;
2084 while (!list_empty(&sctx
->name_cache_list
)) {
2085 nce
= list_entry(sctx
->name_cache_list
.next
,
2086 struct name_cache_entry
, list
);
2087 name_cache_delete(sctx
, nce
);
2093 * Used by get_cur_path for each ref up to the root.
2094 * Returns 0 if it succeeded.
2095 * Returns 1 if the inode is not existent or got overwritten. In that case, the
2096 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
2097 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
2098 * Returns <0 in case of error.
2100 static int __get_cur_name_and_parent(struct send_ctx
*sctx
,
2104 struct fs_path
*dest
)
2108 struct name_cache_entry
*nce
= NULL
;
2111 * First check if we already did a call to this function with the same
2112 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
2113 * return the cached result.
2115 nce
= name_cache_search(sctx
, ino
, gen
);
2117 if (ino
< sctx
->send_progress
&& nce
->need_later_update
) {
2118 name_cache_delete(sctx
, nce
);
2122 name_cache_used(sctx
, nce
);
2123 *parent_ino
= nce
->parent_ino
;
2124 *parent_gen
= nce
->parent_gen
;
2125 ret
= fs_path_add(dest
, nce
->name
, nce
->name_len
);
2134 * If the inode is not existent yet, add the orphan name and return 1.
2135 * This should only happen for the parent dir that we determine in
2138 ret
= is_inode_existent(sctx
, ino
, gen
);
2143 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2151 * Depending on whether the inode was already processed or not, use
2152 * send_root or parent_root for ref lookup.
2154 if (ino
< sctx
->send_progress
)
2155 ret
= get_first_ref(sctx
->send_root
, ino
,
2156 parent_ino
, parent_gen
, dest
);
2158 ret
= get_first_ref(sctx
->parent_root
, ino
,
2159 parent_ino
, parent_gen
, dest
);
2164 * Check if the ref was overwritten by an inode's ref that was processed
2165 * earlier. If yes, treat as orphan and return 1.
2167 ret
= did_overwrite_ref(sctx
, *parent_ino
, *parent_gen
, ino
, gen
,
2168 dest
->start
, dest
->end
- dest
->start
);
2172 fs_path_reset(dest
);
2173 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2181 * Store the result of the lookup in the name cache.
2183 nce
= kmalloc(sizeof(*nce
) + fs_path_len(dest
) + 1, GFP_KERNEL
);
2191 nce
->parent_ino
= *parent_ino
;
2192 nce
->parent_gen
= *parent_gen
;
2193 nce
->name_len
= fs_path_len(dest
);
2195 strcpy(nce
->name
, dest
->start
);
2197 if (ino
< sctx
->send_progress
)
2198 nce
->need_later_update
= 0;
2200 nce
->need_later_update
= 1;
2202 nce_ret
= name_cache_insert(sctx
, nce
);
2205 name_cache_clean_unused(sctx
);
2212 * Magic happens here. This function returns the first ref to an inode as it
2213 * would look like while receiving the stream at this point in time.
2214 * We walk the path up to the root. For every inode in between, we check if it
2215 * was already processed/sent. If yes, we continue with the parent as found
2216 * in send_root. If not, we continue with the parent as found in parent_root.
2217 * If we encounter an inode that was deleted at this point in time, we use the
2218 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2219 * that were not created yet and overwritten inodes/refs.
2221 * When do we have have orphan inodes:
2222 * 1. When an inode is freshly created and thus no valid refs are available yet
2223 * 2. When a directory lost all it's refs (deleted) but still has dir items
2224 * inside which were not processed yet (pending for move/delete). If anyone
2225 * tried to get the path to the dir items, it would get a path inside that
2227 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2228 * of an unprocessed inode. If in that case the first ref would be
2229 * overwritten, the overwritten inode gets "orphanized". Later when we
2230 * process this overwritten inode, it is restored at a new place by moving
2233 * sctx->send_progress tells this function at which point in time receiving
2236 static int get_cur_path(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2237 struct fs_path
*dest
)
2240 struct fs_path
*name
= NULL
;
2241 u64 parent_inode
= 0;
2245 name
= fs_path_alloc();
2252 fs_path_reset(dest
);
2254 while (!stop
&& ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
2255 struct waiting_dir_move
*wdm
;
2257 fs_path_reset(name
);
2259 if (is_waiting_for_rm(sctx
, ino
)) {
2260 ret
= gen_unique_name(sctx
, ino
, gen
, name
);
2263 ret
= fs_path_add_path(dest
, name
);
2267 wdm
= get_waiting_dir_move(sctx
, ino
);
2268 if (wdm
&& wdm
->orphanized
) {
2269 ret
= gen_unique_name(sctx
, ino
, gen
, name
);
2272 ret
= get_first_ref(sctx
->parent_root
, ino
,
2273 &parent_inode
, &parent_gen
, name
);
2275 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
,
2285 ret
= fs_path_add_path(dest
, name
);
2296 fs_path_unreverse(dest
);
2301 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2303 static int send_subvol_begin(struct send_ctx
*sctx
)
2306 struct btrfs_root
*send_root
= sctx
->send_root
;
2307 struct btrfs_root
*parent_root
= sctx
->parent_root
;
2308 struct btrfs_path
*path
;
2309 struct btrfs_key key
;
2310 struct btrfs_root_ref
*ref
;
2311 struct extent_buffer
*leaf
;
2315 path
= btrfs_alloc_path();
2319 name
= kmalloc(BTRFS_PATH_NAME_MAX
, GFP_KERNEL
);
2321 btrfs_free_path(path
);
2325 key
.objectid
= send_root
->objectid
;
2326 key
.type
= BTRFS_ROOT_BACKREF_KEY
;
2329 ret
= btrfs_search_slot_for_read(send_root
->fs_info
->tree_root
,
2338 leaf
= path
->nodes
[0];
2339 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2340 if (key
.type
!= BTRFS_ROOT_BACKREF_KEY
||
2341 key
.objectid
!= send_root
->objectid
) {
2345 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
2346 namelen
= btrfs_root_ref_name_len(leaf
, ref
);
2347 read_extent_buffer(leaf
, name
, (unsigned long)(ref
+ 1), namelen
);
2348 btrfs_release_path(path
);
2351 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SNAPSHOT
);
2355 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SUBVOL
);
2360 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_PATH
, name
, namelen
);
2362 if (!btrfs_is_empty_uuid(sctx
->send_root
->root_item
.received_uuid
))
2363 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2364 sctx
->send_root
->root_item
.received_uuid
);
2366 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2367 sctx
->send_root
->root_item
.uuid
);
2369 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CTRANSID
,
2370 le64_to_cpu(sctx
->send_root
->root_item
.ctransid
));
2372 if (!btrfs_is_empty_uuid(parent_root
->root_item
.received_uuid
))
2373 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2374 parent_root
->root_item
.received_uuid
);
2376 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2377 parent_root
->root_item
.uuid
);
2378 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
2379 le64_to_cpu(sctx
->parent_root
->root_item
.ctransid
));
2382 ret
= send_cmd(sctx
);
2386 btrfs_free_path(path
);
2391 static int send_truncate(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 size
)
2396 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino
, size
);
2398 p
= fs_path_alloc();
2402 ret
= begin_cmd(sctx
, BTRFS_SEND_C_TRUNCATE
);
2406 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2409 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2410 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, size
);
2412 ret
= send_cmd(sctx
);
2420 static int send_chmod(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 mode
)
2425 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino
, mode
);
2427 p
= fs_path_alloc();
2431 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHMOD
);
2435 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2438 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2439 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
& 07777);
2441 ret
= send_cmd(sctx
);
2449 static int send_chown(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 uid
, u64 gid
)
2454 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino
, uid
, gid
);
2456 p
= fs_path_alloc();
2460 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHOWN
);
2464 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2467 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2468 TLV_PUT_U64(sctx
, BTRFS_SEND_A_UID
, uid
);
2469 TLV_PUT_U64(sctx
, BTRFS_SEND_A_GID
, gid
);
2471 ret
= send_cmd(sctx
);
2479 static int send_utimes(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
2482 struct fs_path
*p
= NULL
;
2483 struct btrfs_inode_item
*ii
;
2484 struct btrfs_path
*path
= NULL
;
2485 struct extent_buffer
*eb
;
2486 struct btrfs_key key
;
2489 verbose_printk("btrfs: send_utimes %llu\n", ino
);
2491 p
= fs_path_alloc();
2495 path
= alloc_path_for_send();
2502 key
.type
= BTRFS_INODE_ITEM_KEY
;
2504 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2508 eb
= path
->nodes
[0];
2509 slot
= path
->slots
[0];
2510 ii
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
2512 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UTIMES
);
2516 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2519 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2520 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_ATIME
, eb
, &ii
->atime
);
2521 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_MTIME
, eb
, &ii
->mtime
);
2522 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_CTIME
, eb
, &ii
->ctime
);
2523 /* TODO Add otime support when the otime patches get into upstream */
2525 ret
= send_cmd(sctx
);
2530 btrfs_free_path(path
);
2535 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2536 * a valid path yet because we did not process the refs yet. So, the inode
2537 * is created as orphan.
2539 static int send_create_inode(struct send_ctx
*sctx
, u64 ino
)
2548 verbose_printk("btrfs: send_create_inode %llu\n", ino
);
2550 p
= fs_path_alloc();
2554 if (ino
!= sctx
->cur_ino
) {
2555 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &gen
, &mode
,
2560 gen
= sctx
->cur_inode_gen
;
2561 mode
= sctx
->cur_inode_mode
;
2562 rdev
= sctx
->cur_inode_rdev
;
2565 if (S_ISREG(mode
)) {
2566 cmd
= BTRFS_SEND_C_MKFILE
;
2567 } else if (S_ISDIR(mode
)) {
2568 cmd
= BTRFS_SEND_C_MKDIR
;
2569 } else if (S_ISLNK(mode
)) {
2570 cmd
= BTRFS_SEND_C_SYMLINK
;
2571 } else if (S_ISCHR(mode
) || S_ISBLK(mode
)) {
2572 cmd
= BTRFS_SEND_C_MKNOD
;
2573 } else if (S_ISFIFO(mode
)) {
2574 cmd
= BTRFS_SEND_C_MKFIFO
;
2575 } else if (S_ISSOCK(mode
)) {
2576 cmd
= BTRFS_SEND_C_MKSOCK
;
2578 btrfs_warn(sctx
->send_root
->fs_info
, "unexpected inode type %o",
2579 (int)(mode
& S_IFMT
));
2584 ret
= begin_cmd(sctx
, cmd
);
2588 ret
= gen_unique_name(sctx
, ino
, gen
, p
);
2592 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2593 TLV_PUT_U64(sctx
, BTRFS_SEND_A_INO
, ino
);
2595 if (S_ISLNK(mode
)) {
2597 ret
= read_symlink(sctx
->send_root
, ino
, p
);
2600 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, p
);
2601 } else if (S_ISCHR(mode
) || S_ISBLK(mode
) ||
2602 S_ISFIFO(mode
) || S_ISSOCK(mode
)) {
2603 TLV_PUT_U64(sctx
, BTRFS_SEND_A_RDEV
, new_encode_dev(rdev
));
2604 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
);
2607 ret
= send_cmd(sctx
);
2619 * We need some special handling for inodes that get processed before the parent
2620 * directory got created. See process_recorded_refs for details.
2621 * This function does the check if we already created the dir out of order.
2623 static int did_create_dir(struct send_ctx
*sctx
, u64 dir
)
2626 struct btrfs_path
*path
= NULL
;
2627 struct btrfs_key key
;
2628 struct btrfs_key found_key
;
2629 struct btrfs_key di_key
;
2630 struct extent_buffer
*eb
;
2631 struct btrfs_dir_item
*di
;
2634 path
= alloc_path_for_send();
2641 key
.type
= BTRFS_DIR_INDEX_KEY
;
2643 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2648 eb
= path
->nodes
[0];
2649 slot
= path
->slots
[0];
2650 if (slot
>= btrfs_header_nritems(eb
)) {
2651 ret
= btrfs_next_leaf(sctx
->send_root
, path
);
2654 } else if (ret
> 0) {
2661 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
2662 if (found_key
.objectid
!= key
.objectid
||
2663 found_key
.type
!= key
.type
) {
2668 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
2669 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
2671 if (di_key
.type
!= BTRFS_ROOT_ITEM_KEY
&&
2672 di_key
.objectid
< sctx
->send_progress
) {
2681 btrfs_free_path(path
);
2686 * Only creates the inode if it is:
2687 * 1. Not a directory
2688 * 2. Or a directory which was not created already due to out of order
2689 * directories. See did_create_dir and process_recorded_refs for details.
2691 static int send_create_inode_if_needed(struct send_ctx
*sctx
)
2695 if (S_ISDIR(sctx
->cur_inode_mode
)) {
2696 ret
= did_create_dir(sctx
, sctx
->cur_ino
);
2705 ret
= send_create_inode(sctx
, sctx
->cur_ino
);
2713 struct recorded_ref
{
2714 struct list_head list
;
2717 struct fs_path
*full_path
;
2725 * We need to process new refs before deleted refs, but compare_tree gives us
2726 * everything mixed. So we first record all refs and later process them.
2727 * This function is a helper to record one ref.
2729 static int __record_ref(struct list_head
*head
, u64 dir
,
2730 u64 dir_gen
, struct fs_path
*path
)
2732 struct recorded_ref
*ref
;
2734 ref
= kmalloc(sizeof(*ref
), GFP_KERNEL
);
2739 ref
->dir_gen
= dir_gen
;
2740 ref
->full_path
= path
;
2742 ref
->name
= (char *)kbasename(ref
->full_path
->start
);
2743 ref
->name_len
= ref
->full_path
->end
- ref
->name
;
2744 ref
->dir_path
= ref
->full_path
->start
;
2745 if (ref
->name
== ref
->full_path
->start
)
2746 ref
->dir_path_len
= 0;
2748 ref
->dir_path_len
= ref
->full_path
->end
-
2749 ref
->full_path
->start
- 1 - ref
->name_len
;
2751 list_add_tail(&ref
->list
, head
);
2755 static int dup_ref(struct recorded_ref
*ref
, struct list_head
*list
)
2757 struct recorded_ref
*new;
2759 new = kmalloc(sizeof(*ref
), GFP_KERNEL
);
2763 new->dir
= ref
->dir
;
2764 new->dir_gen
= ref
->dir_gen
;
2765 new->full_path
= NULL
;
2766 INIT_LIST_HEAD(&new->list
);
2767 list_add_tail(&new->list
, list
);
2771 static void __free_recorded_refs(struct list_head
*head
)
2773 struct recorded_ref
*cur
;
2775 while (!list_empty(head
)) {
2776 cur
= list_entry(head
->next
, struct recorded_ref
, list
);
2777 fs_path_free(cur
->full_path
);
2778 list_del(&cur
->list
);
2783 static void free_recorded_refs(struct send_ctx
*sctx
)
2785 __free_recorded_refs(&sctx
->new_refs
);
2786 __free_recorded_refs(&sctx
->deleted_refs
);
2790 * Renames/moves a file/dir to its orphan name. Used when the first
2791 * ref of an unprocessed inode gets overwritten and for all non empty
2794 static int orphanize_inode(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2795 struct fs_path
*path
)
2798 struct fs_path
*orphan
;
2800 orphan
= fs_path_alloc();
2804 ret
= gen_unique_name(sctx
, ino
, gen
, orphan
);
2808 ret
= send_rename(sctx
, path
, orphan
);
2811 fs_path_free(orphan
);
2815 static struct orphan_dir_info
*
2816 add_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2818 struct rb_node
**p
= &sctx
->orphan_dirs
.rb_node
;
2819 struct rb_node
*parent
= NULL
;
2820 struct orphan_dir_info
*entry
, *odi
;
2822 odi
= kmalloc(sizeof(*odi
), GFP_KERNEL
);
2824 return ERR_PTR(-ENOMEM
);
2830 entry
= rb_entry(parent
, struct orphan_dir_info
, node
);
2831 if (dir_ino
< entry
->ino
) {
2833 } else if (dir_ino
> entry
->ino
) {
2834 p
= &(*p
)->rb_right
;
2841 rb_link_node(&odi
->node
, parent
, p
);
2842 rb_insert_color(&odi
->node
, &sctx
->orphan_dirs
);
2846 static struct orphan_dir_info
*
2847 get_orphan_dir_info(struct send_ctx
*sctx
, u64 dir_ino
)
2849 struct rb_node
*n
= sctx
->orphan_dirs
.rb_node
;
2850 struct orphan_dir_info
*entry
;
2853 entry
= rb_entry(n
, struct orphan_dir_info
, node
);
2854 if (dir_ino
< entry
->ino
)
2856 else if (dir_ino
> entry
->ino
)
2864 static int is_waiting_for_rm(struct send_ctx
*sctx
, u64 dir_ino
)
2866 struct orphan_dir_info
*odi
= get_orphan_dir_info(sctx
, dir_ino
);
2871 static void free_orphan_dir_info(struct send_ctx
*sctx
,
2872 struct orphan_dir_info
*odi
)
2876 rb_erase(&odi
->node
, &sctx
->orphan_dirs
);
2881 * Returns 1 if a directory can be removed at this point in time.
2882 * We check this by iterating all dir items and checking if the inode behind
2883 * the dir item was already processed.
2885 static int can_rmdir(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
2889 struct btrfs_root
*root
= sctx
->parent_root
;
2890 struct btrfs_path
*path
;
2891 struct btrfs_key key
;
2892 struct btrfs_key found_key
;
2893 struct btrfs_key loc
;
2894 struct btrfs_dir_item
*di
;
2897 * Don't try to rmdir the top/root subvolume dir.
2899 if (dir
== BTRFS_FIRST_FREE_OBJECTID
)
2902 path
= alloc_path_for_send();
2907 key
.type
= BTRFS_DIR_INDEX_KEY
;
2909 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2914 struct waiting_dir_move
*dm
;
2916 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
2917 ret
= btrfs_next_leaf(root
, path
);
2924 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2926 if (found_key
.objectid
!= key
.objectid
||
2927 found_key
.type
!= key
.type
)
2930 di
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2931 struct btrfs_dir_item
);
2932 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &loc
);
2934 dm
= get_waiting_dir_move(sctx
, loc
.objectid
);
2936 struct orphan_dir_info
*odi
;
2938 odi
= add_orphan_dir_info(sctx
, dir
);
2944 dm
->rmdir_ino
= dir
;
2949 if (loc
.objectid
> send_progress
) {
2960 btrfs_free_path(path
);
2964 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
)
2966 struct waiting_dir_move
*entry
= get_waiting_dir_move(sctx
, ino
);
2968 return entry
!= NULL
;
2971 static int add_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
, bool orphanized
)
2973 struct rb_node
**p
= &sctx
->waiting_dir_moves
.rb_node
;
2974 struct rb_node
*parent
= NULL
;
2975 struct waiting_dir_move
*entry
, *dm
;
2977 dm
= kmalloc(sizeof(*dm
), GFP_KERNEL
);
2982 dm
->orphanized
= orphanized
;
2986 entry
= rb_entry(parent
, struct waiting_dir_move
, node
);
2987 if (ino
< entry
->ino
) {
2989 } else if (ino
> entry
->ino
) {
2990 p
= &(*p
)->rb_right
;
2997 rb_link_node(&dm
->node
, parent
, p
);
2998 rb_insert_color(&dm
->node
, &sctx
->waiting_dir_moves
);
3002 static struct waiting_dir_move
*
3003 get_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
)
3005 struct rb_node
*n
= sctx
->waiting_dir_moves
.rb_node
;
3006 struct waiting_dir_move
*entry
;
3009 entry
= rb_entry(n
, struct waiting_dir_move
, node
);
3010 if (ino
< entry
->ino
)
3012 else if (ino
> entry
->ino
)
3020 static void free_waiting_dir_move(struct send_ctx
*sctx
,
3021 struct waiting_dir_move
*dm
)
3025 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
3029 static int add_pending_dir_move(struct send_ctx
*sctx
,
3033 struct list_head
*new_refs
,
3034 struct list_head
*deleted_refs
,
3035 const bool is_orphan
)
3037 struct rb_node
**p
= &sctx
->pending_dir_moves
.rb_node
;
3038 struct rb_node
*parent
= NULL
;
3039 struct pending_dir_move
*entry
= NULL
, *pm
;
3040 struct recorded_ref
*cur
;
3044 pm
= kmalloc(sizeof(*pm
), GFP_KERNEL
);
3047 pm
->parent_ino
= parent_ino
;
3050 INIT_LIST_HEAD(&pm
->list
);
3051 INIT_LIST_HEAD(&pm
->update_refs
);
3052 RB_CLEAR_NODE(&pm
->node
);
3056 entry
= rb_entry(parent
, struct pending_dir_move
, node
);
3057 if (parent_ino
< entry
->parent_ino
) {
3059 } else if (parent_ino
> entry
->parent_ino
) {
3060 p
= &(*p
)->rb_right
;
3067 list_for_each_entry(cur
, deleted_refs
, list
) {
3068 ret
= dup_ref(cur
, &pm
->update_refs
);
3072 list_for_each_entry(cur
, new_refs
, list
) {
3073 ret
= dup_ref(cur
, &pm
->update_refs
);
3078 ret
= add_waiting_dir_move(sctx
, pm
->ino
, is_orphan
);
3083 list_add_tail(&pm
->list
, &entry
->list
);
3085 rb_link_node(&pm
->node
, parent
, p
);
3086 rb_insert_color(&pm
->node
, &sctx
->pending_dir_moves
);
3091 __free_recorded_refs(&pm
->update_refs
);
3097 static struct pending_dir_move
*get_pending_dir_moves(struct send_ctx
*sctx
,
3100 struct rb_node
*n
= sctx
->pending_dir_moves
.rb_node
;
3101 struct pending_dir_move
*entry
;
3104 entry
= rb_entry(n
, struct pending_dir_move
, node
);
3105 if (parent_ino
< entry
->parent_ino
)
3107 else if (parent_ino
> entry
->parent_ino
)
3115 static int path_loop(struct send_ctx
*sctx
, struct fs_path
*name
,
3116 u64 ino
, u64 gen
, u64
*ancestor_ino
)
3119 u64 parent_inode
= 0;
3121 u64 start_ino
= ino
;
3124 while (ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
3125 fs_path_reset(name
);
3127 if (is_waiting_for_rm(sctx
, ino
))
3129 if (is_waiting_for_move(sctx
, ino
)) {
3130 if (*ancestor_ino
== 0)
3131 *ancestor_ino
= ino
;
3132 ret
= get_first_ref(sctx
->parent_root
, ino
,
3133 &parent_inode
, &parent_gen
, name
);
3135 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
,
3145 if (parent_inode
== start_ino
) {
3147 if (*ancestor_ino
== 0)
3148 *ancestor_ino
= ino
;
3157 static int apply_dir_move(struct send_ctx
*sctx
, struct pending_dir_move
*pm
)
3159 struct fs_path
*from_path
= NULL
;
3160 struct fs_path
*to_path
= NULL
;
3161 struct fs_path
*name
= NULL
;
3162 u64 orig_progress
= sctx
->send_progress
;
3163 struct recorded_ref
*cur
;
3164 u64 parent_ino
, parent_gen
;
3165 struct waiting_dir_move
*dm
= NULL
;
3171 name
= fs_path_alloc();
3172 from_path
= fs_path_alloc();
3173 if (!name
|| !from_path
) {
3178 dm
= get_waiting_dir_move(sctx
, pm
->ino
);
3180 rmdir_ino
= dm
->rmdir_ino
;
3181 is_orphan
= dm
->orphanized
;
3182 free_waiting_dir_move(sctx
, dm
);
3185 ret
= gen_unique_name(sctx
, pm
->ino
,
3186 pm
->gen
, from_path
);
3188 ret
= get_first_ref(sctx
->parent_root
, pm
->ino
,
3189 &parent_ino
, &parent_gen
, name
);
3192 ret
= get_cur_path(sctx
, parent_ino
, parent_gen
,
3196 ret
= fs_path_add_path(from_path
, name
);
3201 sctx
->send_progress
= sctx
->cur_ino
+ 1;
3202 ret
= path_loop(sctx
, name
, pm
->ino
, pm
->gen
, &ancestor
);
3206 LIST_HEAD(deleted_refs
);
3207 ASSERT(ancestor
> BTRFS_FIRST_FREE_OBJECTID
);
3208 ret
= add_pending_dir_move(sctx
, pm
->ino
, pm
->gen
, ancestor
,
3209 &pm
->update_refs
, &deleted_refs
,
3214 dm
= get_waiting_dir_move(sctx
, pm
->ino
);
3216 dm
->rmdir_ino
= rmdir_ino
;
3220 fs_path_reset(name
);
3223 ret
= get_cur_path(sctx
, pm
->ino
, pm
->gen
, to_path
);
3227 ret
= send_rename(sctx
, from_path
, to_path
);
3232 struct orphan_dir_info
*odi
;
3234 odi
= get_orphan_dir_info(sctx
, rmdir_ino
);
3236 /* already deleted */
3239 ret
= can_rmdir(sctx
, rmdir_ino
, odi
->gen
, sctx
->cur_ino
+ 1);
3245 name
= fs_path_alloc();
3250 ret
= get_cur_path(sctx
, rmdir_ino
, odi
->gen
, name
);
3253 ret
= send_rmdir(sctx
, name
);
3256 free_orphan_dir_info(sctx
, odi
);
3260 ret
= send_utimes(sctx
, pm
->ino
, pm
->gen
);
3265 * After rename/move, need to update the utimes of both new parent(s)
3266 * and old parent(s).
3268 list_for_each_entry(cur
, &pm
->update_refs
, list
) {
3269 if (cur
->dir
== rmdir_ino
)
3271 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
3278 fs_path_free(from_path
);
3279 fs_path_free(to_path
);
3280 sctx
->send_progress
= orig_progress
;
3285 static void free_pending_move(struct send_ctx
*sctx
, struct pending_dir_move
*m
)
3287 if (!list_empty(&m
->list
))
3289 if (!RB_EMPTY_NODE(&m
->node
))
3290 rb_erase(&m
->node
, &sctx
->pending_dir_moves
);
3291 __free_recorded_refs(&m
->update_refs
);
3295 static void tail_append_pending_moves(struct pending_dir_move
*moves
,
3296 struct list_head
*stack
)
3298 if (list_empty(&moves
->list
)) {
3299 list_add_tail(&moves
->list
, stack
);
3302 list_splice_init(&moves
->list
, &list
);
3303 list_add_tail(&moves
->list
, stack
);
3304 list_splice_tail(&list
, stack
);
3308 static int apply_children_dir_moves(struct send_ctx
*sctx
)
3310 struct pending_dir_move
*pm
;
3311 struct list_head stack
;
3312 u64 parent_ino
= sctx
->cur_ino
;
3315 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3319 INIT_LIST_HEAD(&stack
);
3320 tail_append_pending_moves(pm
, &stack
);
3322 while (!list_empty(&stack
)) {
3323 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3324 parent_ino
= pm
->ino
;
3325 ret
= apply_dir_move(sctx
, pm
);
3326 free_pending_move(sctx
, pm
);
3329 pm
= get_pending_dir_moves(sctx
, parent_ino
);
3331 tail_append_pending_moves(pm
, &stack
);
3336 while (!list_empty(&stack
)) {
3337 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
3338 free_pending_move(sctx
, pm
);
3344 * We might need to delay a directory rename even when no ancestor directory
3345 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was
3346 * renamed. This happens when we rename a directory to the old name (the name
3347 * in the parent root) of some other unrelated directory that got its rename
3348 * delayed due to some ancestor with higher number that got renamed.
3354 * |---- a/ (ino 257)
3355 * | |---- file (ino 260)
3357 * |---- b/ (ino 258)
3358 * |---- c/ (ino 259)
3362 * |---- a/ (ino 258)
3363 * |---- x/ (ino 259)
3364 * |---- y/ (ino 257)
3365 * |----- file (ino 260)
3367 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
3368 * from 'a' to 'x/y' happening first, which in turn depends on the rename of
3369 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream
3372 * 1 - rename 259 from 'c' to 'x'
3373 * 2 - rename 257 from 'a' to 'x/y'
3374 * 3 - rename 258 from 'b' to 'a'
3376 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
3377 * be done right away and < 0 on error.
3379 static int wait_for_dest_dir_move(struct send_ctx
*sctx
,
3380 struct recorded_ref
*parent_ref
,
3381 const bool is_orphan
)
3383 struct btrfs_path
*path
;
3384 struct btrfs_key key
;
3385 struct btrfs_key di_key
;
3386 struct btrfs_dir_item
*di
;
3390 struct waiting_dir_move
*wdm
;
3392 if (RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
))
3395 path
= alloc_path_for_send();
3399 key
.objectid
= parent_ref
->dir
;
3400 key
.type
= BTRFS_DIR_ITEM_KEY
;
3401 key
.offset
= btrfs_name_hash(parent_ref
->name
, parent_ref
->name_len
);
3403 ret
= btrfs_search_slot(NULL
, sctx
->parent_root
, &key
, path
, 0, 0);
3406 } else if (ret
> 0) {
3411 di
= btrfs_match_dir_item_name(sctx
->parent_root
, path
,
3412 parent_ref
->name
, parent_ref
->name_len
);
3418 * di_key.objectid has the number of the inode that has a dentry in the
3419 * parent directory with the same name that sctx->cur_ino is being
3420 * renamed to. We need to check if that inode is in the send root as
3421 * well and if it is currently marked as an inode with a pending rename,
3422 * if it is, we need to delay the rename of sctx->cur_ino as well, so
3423 * that it happens after that other inode is renamed.
3425 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &di_key
);
3426 if (di_key
.type
!= BTRFS_INODE_ITEM_KEY
) {
3431 ret
= get_inode_info(sctx
->parent_root
, di_key
.objectid
, NULL
,
3432 &left_gen
, NULL
, NULL
, NULL
, NULL
);
3435 ret
= get_inode_info(sctx
->send_root
, di_key
.objectid
, NULL
,
3436 &right_gen
, NULL
, NULL
, NULL
, NULL
);
3443 /* Different inode, no need to delay the rename of sctx->cur_ino */
3444 if (right_gen
!= left_gen
) {
3449 wdm
= get_waiting_dir_move(sctx
, di_key
.objectid
);
3450 if (wdm
&& !wdm
->orphanized
) {
3451 ret
= add_pending_dir_move(sctx
,
3453 sctx
->cur_inode_gen
,
3456 &sctx
->deleted_refs
,
3462 btrfs_free_path(path
);
3467 * Check if ino ino1 is an ancestor of inode ino2 in the given root.
3468 * Return 1 if true, 0 if false and < 0 on error.
3470 static int is_ancestor(struct btrfs_root
*root
,
3474 struct fs_path
*fs_path
)
3478 while (ino
> BTRFS_FIRST_FREE_OBJECTID
) {
3483 fs_path_reset(fs_path
);
3484 ret
= get_first_ref(root
, ino
, &parent
, &parent_gen
, fs_path
);
3486 if (ret
== -ENOENT
&& ino
== ino2
)
3491 return parent_gen
== ino1_gen
? 1 : 0;
3497 static int wait_for_parent_move(struct send_ctx
*sctx
,
3498 struct recorded_ref
*parent_ref
,
3499 const bool is_orphan
)
3502 u64 ino
= parent_ref
->dir
;
3503 u64 parent_ino_before
, parent_ino_after
;
3504 struct fs_path
*path_before
= NULL
;
3505 struct fs_path
*path_after
= NULL
;
3508 path_after
= fs_path_alloc();
3509 path_before
= fs_path_alloc();
3510 if (!path_after
|| !path_before
) {
3516 * Our current directory inode may not yet be renamed/moved because some
3517 * ancestor (immediate or not) has to be renamed/moved first. So find if
3518 * such ancestor exists and make sure our own rename/move happens after
3519 * that ancestor is processed to avoid path build infinite loops (done
3520 * at get_cur_path()).
3522 while (ino
> BTRFS_FIRST_FREE_OBJECTID
) {
3523 if (is_waiting_for_move(sctx
, ino
)) {
3525 * If the current inode is an ancestor of ino in the
3526 * parent root, we need to delay the rename of the
3527 * current inode, otherwise don't delayed the rename
3528 * because we can end up with a circular dependency
3529 * of renames, resulting in some directories never
3530 * getting the respective rename operations issued in
3531 * the send stream or getting into infinite path build
3534 ret
= is_ancestor(sctx
->parent_root
,
3535 sctx
->cur_ino
, sctx
->cur_inode_gen
,
3540 fs_path_reset(path_before
);
3541 fs_path_reset(path_after
);
3543 ret
= get_first_ref(sctx
->send_root
, ino
, &parent_ino_after
,
3547 ret
= get_first_ref(sctx
->parent_root
, ino
, &parent_ino_before
,
3549 if (ret
< 0 && ret
!= -ENOENT
) {
3551 } else if (ret
== -ENOENT
) {
3556 len1
= fs_path_len(path_before
);
3557 len2
= fs_path_len(path_after
);
3558 if (ino
> sctx
->cur_ino
&&
3559 (parent_ino_before
!= parent_ino_after
|| len1
!= len2
||
3560 memcmp(path_before
->start
, path_after
->start
, len1
))) {
3564 ino
= parent_ino_after
;
3568 fs_path_free(path_before
);
3569 fs_path_free(path_after
);
3572 ret
= add_pending_dir_move(sctx
,
3574 sctx
->cur_inode_gen
,
3577 &sctx
->deleted_refs
,
3587 * This does all the move/link/unlink/rmdir magic.
3589 static int process_recorded_refs(struct send_ctx
*sctx
, int *pending_move
)
3592 struct recorded_ref
*cur
;
3593 struct recorded_ref
*cur2
;
3594 struct list_head check_dirs
;
3595 struct fs_path
*valid_path
= NULL
;
3598 int did_overwrite
= 0;
3600 u64 last_dir_ino_rm
= 0;
3601 bool can_rename
= true;
3603 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx
->cur_ino
);
3606 * This should never happen as the root dir always has the same ref
3607 * which is always '..'
3609 BUG_ON(sctx
->cur_ino
<= BTRFS_FIRST_FREE_OBJECTID
);
3610 INIT_LIST_HEAD(&check_dirs
);
3612 valid_path
= fs_path_alloc();
3619 * First, check if the first ref of the current inode was overwritten
3620 * before. If yes, we know that the current inode was already orphanized
3621 * and thus use the orphan name. If not, we can use get_cur_path to
3622 * get the path of the first ref as it would like while receiving at
3623 * this point in time.
3624 * New inodes are always orphan at the beginning, so force to use the
3625 * orphan name in this case.
3626 * The first ref is stored in valid_path and will be updated if it
3627 * gets moved around.
3629 if (!sctx
->cur_inode_new
) {
3630 ret
= did_overwrite_first_ref(sctx
, sctx
->cur_ino
,
3631 sctx
->cur_inode_gen
);
3637 if (sctx
->cur_inode_new
|| did_overwrite
) {
3638 ret
= gen_unique_name(sctx
, sctx
->cur_ino
,
3639 sctx
->cur_inode_gen
, valid_path
);
3644 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3650 list_for_each_entry(cur
, &sctx
->new_refs
, list
) {
3652 * We may have refs where the parent directory does not exist
3653 * yet. This happens if the parent directories inum is higher
3654 * the the current inum. To handle this case, we create the
3655 * parent directory out of order. But we need to check if this
3656 * did already happen before due to other refs in the same dir.
3658 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3661 if (ret
== inode_state_will_create
) {
3664 * First check if any of the current inodes refs did
3665 * already create the dir.
3667 list_for_each_entry(cur2
, &sctx
->new_refs
, list
) {
3670 if (cur2
->dir
== cur
->dir
) {
3677 * If that did not happen, check if a previous inode
3678 * did already create the dir.
3681 ret
= did_create_dir(sctx
, cur
->dir
);
3685 ret
= send_create_inode(sctx
, cur
->dir
);
3692 * Check if this new ref would overwrite the first ref of
3693 * another unprocessed inode. If yes, orphanize the
3694 * overwritten inode. If we find an overwritten ref that is
3695 * not the first ref, simply unlink it.
3697 ret
= will_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3698 cur
->name
, cur
->name_len
,
3699 &ow_inode
, &ow_gen
);
3703 ret
= is_first_ref(sctx
->parent_root
,
3704 ow_inode
, cur
->dir
, cur
->name
,
3709 struct name_cache_entry
*nce
;
3710 struct waiting_dir_move
*wdm
;
3712 ret
= orphanize_inode(sctx
, ow_inode
, ow_gen
,
3718 * If ow_inode has its rename operation delayed
3719 * make sure that its orphanized name is used in
3720 * the source path when performing its rename
3723 if (is_waiting_for_move(sctx
, ow_inode
)) {
3724 wdm
= get_waiting_dir_move(sctx
,
3727 wdm
->orphanized
= true;
3731 * Make sure we clear our orphanized inode's
3732 * name from the name cache. This is because the
3733 * inode ow_inode might be an ancestor of some
3734 * other inode that will be orphanized as well
3735 * later and has an inode number greater than
3736 * sctx->send_progress. We need to prevent
3737 * future name lookups from using the old name
3738 * and get instead the orphan name.
3740 nce
= name_cache_search(sctx
, ow_inode
, ow_gen
);
3742 name_cache_delete(sctx
, nce
);
3747 * ow_inode might currently be an ancestor of
3748 * cur_ino, therefore compute valid_path (the
3749 * current path of cur_ino) again because it
3750 * might contain the pre-orphanization name of
3751 * ow_inode, which is no longer valid.
3753 fs_path_reset(valid_path
);
3754 ret
= get_cur_path(sctx
, sctx
->cur_ino
,
3755 sctx
->cur_inode_gen
, valid_path
);
3759 ret
= send_unlink(sctx
, cur
->full_path
);
3765 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->parent_root
) {
3766 ret
= wait_for_dest_dir_move(sctx
, cur
, is_orphan
);
3775 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->parent_root
&&
3777 ret
= wait_for_parent_move(sctx
, cur
, is_orphan
);
3787 * link/move the ref to the new place. If we have an orphan
3788 * inode, move it and update valid_path. If not, link or move
3789 * it depending on the inode mode.
3791 if (is_orphan
&& can_rename
) {
3792 ret
= send_rename(sctx
, valid_path
, cur
->full_path
);
3796 ret
= fs_path_copy(valid_path
, cur
->full_path
);
3799 } else if (can_rename
) {
3800 if (S_ISDIR(sctx
->cur_inode_mode
)) {
3802 * Dirs can't be linked, so move it. For moved
3803 * dirs, we always have one new and one deleted
3804 * ref. The deleted ref is ignored later.
3806 ret
= send_rename(sctx
, valid_path
,
3809 ret
= fs_path_copy(valid_path
,
3814 ret
= send_link(sctx
, cur
->full_path
,
3820 ret
= dup_ref(cur
, &check_dirs
);
3825 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->cur_inode_deleted
) {
3827 * Check if we can already rmdir the directory. If not,
3828 * orphanize it. For every dir item inside that gets deleted
3829 * later, we do this check again and rmdir it then if possible.
3830 * See the use of check_dirs for more details.
3832 ret
= can_rmdir(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3837 ret
= send_rmdir(sctx
, valid_path
);
3840 } else if (!is_orphan
) {
3841 ret
= orphanize_inode(sctx
, sctx
->cur_ino
,
3842 sctx
->cur_inode_gen
, valid_path
);
3848 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
3849 ret
= dup_ref(cur
, &check_dirs
);
3853 } else if (S_ISDIR(sctx
->cur_inode_mode
) &&
3854 !list_empty(&sctx
->deleted_refs
)) {
3856 * We have a moved dir. Add the old parent to check_dirs
3858 cur
= list_entry(sctx
->deleted_refs
.next
, struct recorded_ref
,
3860 ret
= dup_ref(cur
, &check_dirs
);
3863 } else if (!S_ISDIR(sctx
->cur_inode_mode
)) {
3865 * We have a non dir inode. Go through all deleted refs and
3866 * unlink them if they were not already overwritten by other
3869 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
3870 ret
= did_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3871 sctx
->cur_ino
, sctx
->cur_inode_gen
,
3872 cur
->name
, cur
->name_len
);
3876 ret
= send_unlink(sctx
, cur
->full_path
);
3880 ret
= dup_ref(cur
, &check_dirs
);
3885 * If the inode is still orphan, unlink the orphan. This may
3886 * happen when a previous inode did overwrite the first ref
3887 * of this inode and no new refs were added for the current
3888 * inode. Unlinking does not mean that the inode is deleted in
3889 * all cases. There may still be links to this inode in other
3893 ret
= send_unlink(sctx
, valid_path
);
3900 * We did collect all parent dirs where cur_inode was once located. We
3901 * now go through all these dirs and check if they are pending for
3902 * deletion and if it's finally possible to perform the rmdir now.
3903 * We also update the inode stats of the parent dirs here.
3905 list_for_each_entry(cur
, &check_dirs
, list
) {
3907 * In case we had refs into dirs that were not processed yet,
3908 * we don't need to do the utime and rmdir logic for these dirs.
3909 * The dir will be processed later.
3911 if (cur
->dir
> sctx
->cur_ino
)
3914 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3918 if (ret
== inode_state_did_create
||
3919 ret
== inode_state_no_change
) {
3920 /* TODO delayed utimes */
3921 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
3924 } else if (ret
== inode_state_did_delete
&&
3925 cur
->dir
!= last_dir_ino_rm
) {
3926 ret
= can_rmdir(sctx
, cur
->dir
, cur
->dir_gen
,
3931 ret
= get_cur_path(sctx
, cur
->dir
,
3932 cur
->dir_gen
, valid_path
);
3935 ret
= send_rmdir(sctx
, valid_path
);
3938 last_dir_ino_rm
= cur
->dir
;
3946 __free_recorded_refs(&check_dirs
);
3947 free_recorded_refs(sctx
);
3948 fs_path_free(valid_path
);
3952 static int record_ref(struct btrfs_root
*root
, int num
, u64 dir
, int index
,
3953 struct fs_path
*name
, void *ctx
, struct list_head
*refs
)
3956 struct send_ctx
*sctx
= ctx
;
3960 p
= fs_path_alloc();
3964 ret
= get_inode_info(root
, dir
, NULL
, &gen
, NULL
, NULL
,
3969 ret
= get_cur_path(sctx
, dir
, gen
, p
);
3972 ret
= fs_path_add_path(p
, name
);
3976 ret
= __record_ref(refs
, dir
, gen
, p
);
3984 static int __record_new_ref(int num
, u64 dir
, int index
,
3985 struct fs_path
*name
,
3988 struct send_ctx
*sctx
= ctx
;
3989 return record_ref(sctx
->send_root
, num
, dir
, index
, name
,
3990 ctx
, &sctx
->new_refs
);
3994 static int __record_deleted_ref(int num
, u64 dir
, int index
,
3995 struct fs_path
*name
,
3998 struct send_ctx
*sctx
= ctx
;
3999 return record_ref(sctx
->parent_root
, num
, dir
, index
, name
,
4000 ctx
, &sctx
->deleted_refs
);
4003 static int record_new_ref(struct send_ctx
*sctx
)
4007 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
4008 sctx
->cmp_key
, 0, __record_new_ref
, sctx
);
4017 static int record_deleted_ref(struct send_ctx
*sctx
)
4021 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
4022 sctx
->cmp_key
, 0, __record_deleted_ref
, sctx
);
4031 struct find_ref_ctx
{
4034 struct btrfs_root
*root
;
4035 struct fs_path
*name
;
4039 static int __find_iref(int num
, u64 dir
, int index
,
4040 struct fs_path
*name
,
4043 struct find_ref_ctx
*ctx
= ctx_
;
4047 if (dir
== ctx
->dir
&& fs_path_len(name
) == fs_path_len(ctx
->name
) &&
4048 strncmp(name
->start
, ctx
->name
->start
, fs_path_len(name
)) == 0) {
4050 * To avoid doing extra lookups we'll only do this if everything
4053 ret
= get_inode_info(ctx
->root
, dir
, NULL
, &dir_gen
, NULL
,
4057 if (dir_gen
!= ctx
->dir_gen
)
4059 ctx
->found_idx
= num
;
4065 static int find_iref(struct btrfs_root
*root
,
4066 struct btrfs_path
*path
,
4067 struct btrfs_key
*key
,
4068 u64 dir
, u64 dir_gen
, struct fs_path
*name
)
4071 struct find_ref_ctx ctx
;
4075 ctx
.dir_gen
= dir_gen
;
4079 ret
= iterate_inode_ref(root
, path
, key
, 0, __find_iref
, &ctx
);
4083 if (ctx
.found_idx
== -1)
4086 return ctx
.found_idx
;
4089 static int __record_changed_new_ref(int num
, u64 dir
, int index
,
4090 struct fs_path
*name
,
4095 struct send_ctx
*sctx
= ctx
;
4097 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &dir_gen
, NULL
,
4102 ret
= find_iref(sctx
->parent_root
, sctx
->right_path
,
4103 sctx
->cmp_key
, dir
, dir_gen
, name
);
4105 ret
= __record_new_ref(num
, dir
, index
, name
, sctx
);
4112 static int __record_changed_deleted_ref(int num
, u64 dir
, int index
,
4113 struct fs_path
*name
,
4118 struct send_ctx
*sctx
= ctx
;
4120 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &dir_gen
, NULL
,
4125 ret
= find_iref(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
4126 dir
, dir_gen
, name
);
4128 ret
= __record_deleted_ref(num
, dir
, index
, name
, sctx
);
4135 static int record_changed_ref(struct send_ctx
*sctx
)
4139 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
4140 sctx
->cmp_key
, 0, __record_changed_new_ref
, sctx
);
4143 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
4144 sctx
->cmp_key
, 0, __record_changed_deleted_ref
, sctx
);
4154 * Record and process all refs at once. Needed when an inode changes the
4155 * generation number, which means that it was deleted and recreated.
4157 static int process_all_refs(struct send_ctx
*sctx
,
4158 enum btrfs_compare_tree_result cmd
)
4161 struct btrfs_root
*root
;
4162 struct btrfs_path
*path
;
4163 struct btrfs_key key
;
4164 struct btrfs_key found_key
;
4165 struct extent_buffer
*eb
;
4167 iterate_inode_ref_t cb
;
4168 int pending_move
= 0;
4170 path
= alloc_path_for_send();
4174 if (cmd
== BTRFS_COMPARE_TREE_NEW
) {
4175 root
= sctx
->send_root
;
4176 cb
= __record_new_ref
;
4177 } else if (cmd
== BTRFS_COMPARE_TREE_DELETED
) {
4178 root
= sctx
->parent_root
;
4179 cb
= __record_deleted_ref
;
4181 btrfs_err(sctx
->send_root
->fs_info
,
4182 "Wrong command %d in process_all_refs", cmd
);
4187 key
.objectid
= sctx
->cmp_key
->objectid
;
4188 key
.type
= BTRFS_INODE_REF_KEY
;
4190 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4195 eb
= path
->nodes
[0];
4196 slot
= path
->slots
[0];
4197 if (slot
>= btrfs_header_nritems(eb
)) {
4198 ret
= btrfs_next_leaf(root
, path
);
4206 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4208 if (found_key
.objectid
!= key
.objectid
||
4209 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
4210 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
))
4213 ret
= iterate_inode_ref(root
, path
, &found_key
, 0, cb
, sctx
);
4219 btrfs_release_path(path
);
4221 ret
= process_recorded_refs(sctx
, &pending_move
);
4222 /* Only applicable to an incremental send. */
4223 ASSERT(pending_move
== 0);
4226 btrfs_free_path(path
);
4230 static int send_set_xattr(struct send_ctx
*sctx
,
4231 struct fs_path
*path
,
4232 const char *name
, int name_len
,
4233 const char *data
, int data_len
)
4237 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SET_XATTR
);
4241 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
4242 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
4243 TLV_PUT(sctx
, BTRFS_SEND_A_XATTR_DATA
, data
, data_len
);
4245 ret
= send_cmd(sctx
);
4252 static int send_remove_xattr(struct send_ctx
*sctx
,
4253 struct fs_path
*path
,
4254 const char *name
, int name_len
)
4258 ret
= begin_cmd(sctx
, BTRFS_SEND_C_REMOVE_XATTR
);
4262 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
4263 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
4265 ret
= send_cmd(sctx
);
4272 static int __process_new_xattr(int num
, struct btrfs_key
*di_key
,
4273 const char *name
, int name_len
,
4274 const char *data
, int data_len
,
4278 struct send_ctx
*sctx
= ctx
;
4280 posix_acl_xattr_header dummy_acl
;
4282 p
= fs_path_alloc();
4287 * This hack is needed because empty acls are stored as zero byte
4288 * data in xattrs. Problem with that is, that receiving these zero byte
4289 * acls will fail later. To fix this, we send a dummy acl list that
4290 * only contains the version number and no entries.
4292 if (!strncmp(name
, XATTR_NAME_POSIX_ACL_ACCESS
, name_len
) ||
4293 !strncmp(name
, XATTR_NAME_POSIX_ACL_DEFAULT
, name_len
)) {
4294 if (data_len
== 0) {
4295 dummy_acl
.a_version
=
4296 cpu_to_le32(POSIX_ACL_XATTR_VERSION
);
4297 data
= (char *)&dummy_acl
;
4298 data_len
= sizeof(dummy_acl
);
4302 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4306 ret
= send_set_xattr(sctx
, p
, name
, name_len
, data
, data_len
);
4313 static int __process_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4314 const char *name
, int name_len
,
4315 const char *data
, int data_len
,
4319 struct send_ctx
*sctx
= ctx
;
4322 p
= fs_path_alloc();
4326 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4330 ret
= send_remove_xattr(sctx
, p
, name
, name_len
);
4337 static int process_new_xattr(struct send_ctx
*sctx
)
4341 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4342 sctx
->cmp_key
, __process_new_xattr
, sctx
);
4347 static int process_deleted_xattr(struct send_ctx
*sctx
)
4351 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4352 sctx
->cmp_key
, __process_deleted_xattr
, sctx
);
4357 struct find_xattr_ctx
{
4365 static int __find_xattr(int num
, struct btrfs_key
*di_key
,
4366 const char *name
, int name_len
,
4367 const char *data
, int data_len
,
4368 u8 type
, void *vctx
)
4370 struct find_xattr_ctx
*ctx
= vctx
;
4372 if (name_len
== ctx
->name_len
&&
4373 strncmp(name
, ctx
->name
, name_len
) == 0) {
4374 ctx
->found_idx
= num
;
4375 ctx
->found_data_len
= data_len
;
4376 ctx
->found_data
= kmemdup(data
, data_len
, GFP_KERNEL
);
4377 if (!ctx
->found_data
)
4384 static int find_xattr(struct btrfs_root
*root
,
4385 struct btrfs_path
*path
,
4386 struct btrfs_key
*key
,
4387 const char *name
, int name_len
,
4388 char **data
, int *data_len
)
4391 struct find_xattr_ctx ctx
;
4394 ctx
.name_len
= name_len
;
4396 ctx
.found_data
= NULL
;
4397 ctx
.found_data_len
= 0;
4399 ret
= iterate_dir_item(root
, path
, key
, __find_xattr
, &ctx
);
4403 if (ctx
.found_idx
== -1)
4406 *data
= ctx
.found_data
;
4407 *data_len
= ctx
.found_data_len
;
4409 kfree(ctx
.found_data
);
4411 return ctx
.found_idx
;
4415 static int __process_changed_new_xattr(int num
, struct btrfs_key
*di_key
,
4416 const char *name
, int name_len
,
4417 const char *data
, int data_len
,
4421 struct send_ctx
*sctx
= ctx
;
4422 char *found_data
= NULL
;
4423 int found_data_len
= 0;
4425 ret
= find_xattr(sctx
->parent_root
, sctx
->right_path
,
4426 sctx
->cmp_key
, name
, name_len
, &found_data
,
4428 if (ret
== -ENOENT
) {
4429 ret
= __process_new_xattr(num
, di_key
, name
, name_len
, data
,
4430 data_len
, type
, ctx
);
4431 } else if (ret
>= 0) {
4432 if (data_len
!= found_data_len
||
4433 memcmp(data
, found_data
, data_len
)) {
4434 ret
= __process_new_xattr(num
, di_key
, name
, name_len
,
4435 data
, data_len
, type
, ctx
);
4445 static int __process_changed_deleted_xattr(int num
, struct btrfs_key
*di_key
,
4446 const char *name
, int name_len
,
4447 const char *data
, int data_len
,
4451 struct send_ctx
*sctx
= ctx
;
4453 ret
= find_xattr(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
4454 name
, name_len
, NULL
, NULL
);
4456 ret
= __process_deleted_xattr(num
, di_key
, name
, name_len
, data
,
4457 data_len
, type
, ctx
);
4464 static int process_changed_xattr(struct send_ctx
*sctx
)
4468 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
4469 sctx
->cmp_key
, __process_changed_new_xattr
, sctx
);
4472 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
4473 sctx
->cmp_key
, __process_changed_deleted_xattr
, sctx
);
4479 static int process_all_new_xattrs(struct send_ctx
*sctx
)
4482 struct btrfs_root
*root
;
4483 struct btrfs_path
*path
;
4484 struct btrfs_key key
;
4485 struct btrfs_key found_key
;
4486 struct extent_buffer
*eb
;
4489 path
= alloc_path_for_send();
4493 root
= sctx
->send_root
;
4495 key
.objectid
= sctx
->cmp_key
->objectid
;
4496 key
.type
= BTRFS_XATTR_ITEM_KEY
;
4498 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4503 eb
= path
->nodes
[0];
4504 slot
= path
->slots
[0];
4505 if (slot
>= btrfs_header_nritems(eb
)) {
4506 ret
= btrfs_next_leaf(root
, path
);
4509 } else if (ret
> 0) {
4516 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4517 if (found_key
.objectid
!= key
.objectid
||
4518 found_key
.type
!= key
.type
) {
4523 ret
= iterate_dir_item(root
, path
, &found_key
,
4524 __process_new_xattr
, sctx
);
4532 btrfs_free_path(path
);
4536 static ssize_t
fill_read_buf(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4538 struct btrfs_root
*root
= sctx
->send_root
;
4539 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4540 struct inode
*inode
;
4543 struct btrfs_key key
;
4544 pgoff_t index
= offset
>> PAGE_SHIFT
;
4546 unsigned pg_offset
= offset
& ~PAGE_MASK
;
4549 key
.objectid
= sctx
->cur_ino
;
4550 key
.type
= BTRFS_INODE_ITEM_KEY
;
4553 inode
= btrfs_iget(fs_info
->sb
, &key
, root
, NULL
);
4555 return PTR_ERR(inode
);
4557 if (offset
+ len
> i_size_read(inode
)) {
4558 if (offset
> i_size_read(inode
))
4561 len
= offset
- i_size_read(inode
);
4566 last_index
= (offset
+ len
- 1) >> PAGE_SHIFT
;
4568 /* initial readahead */
4569 memset(&sctx
->ra
, 0, sizeof(struct file_ra_state
));
4570 file_ra_state_init(&sctx
->ra
, inode
->i_mapping
);
4571 btrfs_force_ra(inode
->i_mapping
, &sctx
->ra
, NULL
, index
,
4572 last_index
- index
+ 1);
4574 while (index
<= last_index
) {
4575 unsigned cur_len
= min_t(unsigned, len
,
4576 PAGE_SIZE
- pg_offset
);
4577 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_KERNEL
);
4583 if (!PageUptodate(page
)) {
4584 btrfs_readpage(NULL
, page
);
4586 if (!PageUptodate(page
)) {
4595 memcpy(sctx
->read_buf
+ ret
, addr
+ pg_offset
, cur_len
);
4610 * Read some bytes from the current inode/file and send a write command to
4613 static int send_write(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4617 ssize_t num_read
= 0;
4619 p
= fs_path_alloc();
4623 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset
, len
);
4625 num_read
= fill_read_buf(sctx
, offset
, len
);
4626 if (num_read
<= 0) {
4632 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4636 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4640 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4641 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4642 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, num_read
);
4644 ret
= send_cmd(sctx
);
4655 * Send a clone command to user space.
4657 static int send_clone(struct send_ctx
*sctx
,
4658 u64 offset
, u32 len
,
4659 struct clone_root
*clone_root
)
4665 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
4666 "clone_inode=%llu, clone_offset=%llu\n", offset
, len
,
4667 clone_root
->root
->objectid
, clone_root
->ino
,
4668 clone_root
->offset
);
4670 p
= fs_path_alloc();
4674 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CLONE
);
4678 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4682 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4683 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_LEN
, len
);
4684 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4686 if (clone_root
->root
== sctx
->send_root
) {
4687 ret
= get_inode_info(sctx
->send_root
, clone_root
->ino
, NULL
,
4688 &gen
, NULL
, NULL
, NULL
, NULL
);
4691 ret
= get_cur_path(sctx
, clone_root
->ino
, gen
, p
);
4693 ret
= get_inode_path(clone_root
->root
, clone_root
->ino
, p
);
4699 * If the parent we're using has a received_uuid set then use that as
4700 * our clone source as that is what we will look for when doing a
4703 * This covers the case that we create a snapshot off of a received
4704 * subvolume and then use that as the parent and try to receive on a
4707 if (!btrfs_is_empty_uuid(clone_root
->root
->root_item
.received_uuid
))
4708 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4709 clone_root
->root
->root_item
.received_uuid
);
4711 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4712 clone_root
->root
->root_item
.uuid
);
4713 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
4714 le64_to_cpu(clone_root
->root
->root_item
.ctransid
));
4715 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_CLONE_PATH
, p
);
4716 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_OFFSET
,
4717 clone_root
->offset
);
4719 ret
= send_cmd(sctx
);
4728 * Send an update extent command to user space.
4730 static int send_update_extent(struct send_ctx
*sctx
,
4731 u64 offset
, u32 len
)
4736 p
= fs_path_alloc();
4740 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UPDATE_EXTENT
);
4744 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4748 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4749 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4750 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, len
);
4752 ret
= send_cmd(sctx
);
4760 static int send_hole(struct send_ctx
*sctx
, u64 end
)
4762 struct fs_path
*p
= NULL
;
4763 u64 offset
= sctx
->cur_inode_last_extent
;
4767 p
= fs_path_alloc();
4770 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4772 goto tlv_put_failure
;
4773 memset(sctx
->read_buf
, 0, BTRFS_SEND_READ_SIZE
);
4774 while (offset
< end
) {
4775 len
= min_t(u64
, end
- offset
, BTRFS_SEND_READ_SIZE
);
4777 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4780 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4781 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4782 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, len
);
4783 ret
= send_cmd(sctx
);
4793 static int send_extent_data(struct send_ctx
*sctx
,
4799 if (sctx
->flags
& BTRFS_SEND_FLAG_NO_FILE_DATA
)
4800 return send_update_extent(sctx
, offset
, len
);
4802 while (sent
< len
) {
4803 u64 size
= len
- sent
;
4806 if (size
> BTRFS_SEND_READ_SIZE
)
4807 size
= BTRFS_SEND_READ_SIZE
;
4808 ret
= send_write(sctx
, offset
+ sent
, size
);
4818 static int clone_range(struct send_ctx
*sctx
,
4819 struct clone_root
*clone_root
,
4820 const u64 disk_byte
,
4825 struct btrfs_path
*path
;
4826 struct btrfs_key key
;
4829 path
= alloc_path_for_send();
4834 * We can't send a clone operation for the entire range if we find
4835 * extent items in the respective range in the source file that
4836 * refer to different extents or if we find holes.
4837 * So check for that and do a mix of clone and regular write/copy
4838 * operations if needed.
4842 * mkfs.btrfs -f /dev/sda
4843 * mount /dev/sda /mnt
4844 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo
4845 * cp --reflink=always /mnt/foo /mnt/bar
4846 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo
4847 * btrfs subvolume snapshot -r /mnt /mnt/snap
4849 * If when we send the snapshot and we are processing file bar (which
4850 * has a higher inode number than foo) we blindly send a clone operation
4851 * for the [0, 100K[ range from foo to bar, the receiver ends up getting
4852 * a file bar that matches the content of file foo - iow, doesn't match
4853 * the content from bar in the original filesystem.
4855 key
.objectid
= clone_root
->ino
;
4856 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4857 key
.offset
= clone_root
->offset
;
4858 ret
= btrfs_search_slot(NULL
, clone_root
->root
, &key
, path
, 0, 0);
4861 if (ret
> 0 && path
->slots
[0] > 0) {
4862 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0] - 1);
4863 if (key
.objectid
== clone_root
->ino
&&
4864 key
.type
== BTRFS_EXTENT_DATA_KEY
)
4869 struct extent_buffer
*leaf
= path
->nodes
[0];
4870 int slot
= path
->slots
[0];
4871 struct btrfs_file_extent_item
*ei
;
4876 if (slot
>= btrfs_header_nritems(leaf
)) {
4877 ret
= btrfs_next_leaf(clone_root
->root
, path
);
4885 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
4888 * We might have an implicit trailing hole (NO_HOLES feature
4889 * enabled). We deal with it after leaving this loop.
4891 if (key
.objectid
!= clone_root
->ino
||
4892 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
4895 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
4896 type
= btrfs_file_extent_type(leaf
, ei
);
4897 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4898 ext_len
= btrfs_file_extent_inline_len(leaf
, slot
, ei
);
4899 ext_len
= PAGE_ALIGN(ext_len
);
4901 ext_len
= btrfs_file_extent_num_bytes(leaf
, ei
);
4904 if (key
.offset
+ ext_len
<= clone_root
->offset
)
4907 if (key
.offset
> clone_root
->offset
) {
4908 /* Implicit hole, NO_HOLES feature enabled. */
4909 u64 hole_len
= key
.offset
- clone_root
->offset
;
4913 ret
= send_extent_data(sctx
, offset
, hole_len
);
4921 clone_root
->offset
+= hole_len
;
4922 data_offset
+= hole_len
;
4925 if (key
.offset
>= clone_root
->offset
+ len
)
4928 clone_len
= min_t(u64
, ext_len
, len
);
4930 if (btrfs_file_extent_disk_bytenr(leaf
, ei
) == disk_byte
&&
4931 btrfs_file_extent_offset(leaf
, ei
) == data_offset
)
4932 ret
= send_clone(sctx
, offset
, clone_len
, clone_root
);
4934 ret
= send_extent_data(sctx
, offset
, clone_len
);
4942 offset
+= clone_len
;
4943 clone_root
->offset
+= clone_len
;
4944 data_offset
+= clone_len
;
4950 ret
= send_extent_data(sctx
, offset
, len
);
4954 btrfs_free_path(path
);
4958 static int send_write_or_clone(struct send_ctx
*sctx
,
4959 struct btrfs_path
*path
,
4960 struct btrfs_key
*key
,
4961 struct clone_root
*clone_root
)
4964 struct btrfs_file_extent_item
*ei
;
4965 u64 offset
= key
->offset
;
4968 u64 bs
= sctx
->send_root
->fs_info
->sb
->s_blocksize
;
4970 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4971 struct btrfs_file_extent_item
);
4972 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
4973 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4974 len
= btrfs_file_extent_inline_len(path
->nodes
[0],
4975 path
->slots
[0], ei
);
4977 * it is possible the inline item won't cover the whole page,
4978 * but there may be items after this page. Make
4979 * sure to send the whole thing
4981 len
= PAGE_ALIGN(len
);
4983 len
= btrfs_file_extent_num_bytes(path
->nodes
[0], ei
);
4986 if (offset
+ len
> sctx
->cur_inode_size
)
4987 len
= sctx
->cur_inode_size
- offset
;
4993 if (clone_root
&& IS_ALIGNED(offset
+ len
, bs
)) {
4997 disk_byte
= btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
);
4998 data_offset
= btrfs_file_extent_offset(path
->nodes
[0], ei
);
4999 ret
= clone_range(sctx
, clone_root
, disk_byte
, data_offset
,
5002 ret
= send_extent_data(sctx
, offset
, len
);
5008 static int is_extent_unchanged(struct send_ctx
*sctx
,
5009 struct btrfs_path
*left_path
,
5010 struct btrfs_key
*ekey
)
5013 struct btrfs_key key
;
5014 struct btrfs_path
*path
= NULL
;
5015 struct extent_buffer
*eb
;
5017 struct btrfs_key found_key
;
5018 struct btrfs_file_extent_item
*ei
;
5023 u64 left_offset_fixed
;
5031 path
= alloc_path_for_send();
5035 eb
= left_path
->nodes
[0];
5036 slot
= left_path
->slots
[0];
5037 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
5038 left_type
= btrfs_file_extent_type(eb
, ei
);
5040 if (left_type
!= BTRFS_FILE_EXTENT_REG
) {
5044 left_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
5045 left_len
= btrfs_file_extent_num_bytes(eb
, ei
);
5046 left_offset
= btrfs_file_extent_offset(eb
, ei
);
5047 left_gen
= btrfs_file_extent_generation(eb
, ei
);
5050 * Following comments will refer to these graphics. L is the left
5051 * extents which we are checking at the moment. 1-8 are the right
5052 * extents that we iterate.
5055 * |-1-|-2a-|-3-|-4-|-5-|-6-|
5058 * |--1--|-2b-|...(same as above)
5060 * Alternative situation. Happens on files where extents got split.
5062 * |-----------7-----------|-6-|
5064 * Alternative situation. Happens on files which got larger.
5067 * Nothing follows after 8.
5070 key
.objectid
= ekey
->objectid
;
5071 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5072 key
.offset
= ekey
->offset
;
5073 ret
= btrfs_search_slot_for_read(sctx
->parent_root
, &key
, path
, 0, 0);
5082 * Handle special case where the right side has no extents at all.
5084 eb
= path
->nodes
[0];
5085 slot
= path
->slots
[0];
5086 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5087 if (found_key
.objectid
!= key
.objectid
||
5088 found_key
.type
!= key
.type
) {
5089 /* If we're a hole then just pretend nothing changed */
5090 ret
= (left_disknr
) ? 0 : 1;
5095 * We're now on 2a, 2b or 7.
5098 while (key
.offset
< ekey
->offset
+ left_len
) {
5099 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
5100 right_type
= btrfs_file_extent_type(eb
, ei
);
5101 if (right_type
!= BTRFS_FILE_EXTENT_REG
) {
5106 right_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
5107 right_len
= btrfs_file_extent_num_bytes(eb
, ei
);
5108 right_offset
= btrfs_file_extent_offset(eb
, ei
);
5109 right_gen
= btrfs_file_extent_generation(eb
, ei
);
5112 * Are we at extent 8? If yes, we know the extent is changed.
5113 * This may only happen on the first iteration.
5115 if (found_key
.offset
+ right_len
<= ekey
->offset
) {
5116 /* If we're a hole just pretend nothing changed */
5117 ret
= (left_disknr
) ? 0 : 1;
5121 left_offset_fixed
= left_offset
;
5122 if (key
.offset
< ekey
->offset
) {
5123 /* Fix the right offset for 2a and 7. */
5124 right_offset
+= ekey
->offset
- key
.offset
;
5126 /* Fix the left offset for all behind 2a and 2b */
5127 left_offset_fixed
+= key
.offset
- ekey
->offset
;
5131 * Check if we have the same extent.
5133 if (left_disknr
!= right_disknr
||
5134 left_offset_fixed
!= right_offset
||
5135 left_gen
!= right_gen
) {
5141 * Go to the next extent.
5143 ret
= btrfs_next_item(sctx
->parent_root
, path
);
5147 eb
= path
->nodes
[0];
5148 slot
= path
->slots
[0];
5149 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5151 if (ret
|| found_key
.objectid
!= key
.objectid
||
5152 found_key
.type
!= key
.type
) {
5153 key
.offset
+= right_len
;
5156 if (found_key
.offset
!= key
.offset
+ right_len
) {
5164 * We're now behind the left extent (treat as unchanged) or at the end
5165 * of the right side (treat as changed).
5167 if (key
.offset
>= ekey
->offset
+ left_len
)
5174 btrfs_free_path(path
);
5178 static int get_last_extent(struct send_ctx
*sctx
, u64 offset
)
5180 struct btrfs_path
*path
;
5181 struct btrfs_root
*root
= sctx
->send_root
;
5182 struct btrfs_file_extent_item
*fi
;
5183 struct btrfs_key key
;
5188 path
= alloc_path_for_send();
5192 sctx
->cur_inode_last_extent
= 0;
5194 key
.objectid
= sctx
->cur_ino
;
5195 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5196 key
.offset
= offset
;
5197 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 0, 1);
5201 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
5202 if (key
.objectid
!= sctx
->cur_ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
5205 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5206 struct btrfs_file_extent_item
);
5207 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
5208 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5209 u64 size
= btrfs_file_extent_inline_len(path
->nodes
[0],
5210 path
->slots
[0], fi
);
5211 extent_end
= ALIGN(key
.offset
+ size
,
5212 sctx
->send_root
->sectorsize
);
5214 extent_end
= key
.offset
+
5215 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
5217 sctx
->cur_inode_last_extent
= extent_end
;
5219 btrfs_free_path(path
);
5223 static int maybe_send_hole(struct send_ctx
*sctx
, struct btrfs_path
*path
,
5224 struct btrfs_key
*key
)
5226 struct btrfs_file_extent_item
*fi
;
5231 if (sctx
->cur_ino
!= key
->objectid
|| !need_send_hole(sctx
))
5234 if (sctx
->cur_inode_last_extent
== (u64
)-1) {
5235 ret
= get_last_extent(sctx
, key
->offset
- 1);
5240 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5241 struct btrfs_file_extent_item
);
5242 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
5243 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
5244 u64 size
= btrfs_file_extent_inline_len(path
->nodes
[0],
5245 path
->slots
[0], fi
);
5246 extent_end
= ALIGN(key
->offset
+ size
,
5247 sctx
->send_root
->sectorsize
);
5249 extent_end
= key
->offset
+
5250 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
5253 if (path
->slots
[0] == 0 &&
5254 sctx
->cur_inode_last_extent
< key
->offset
) {
5256 * We might have skipped entire leafs that contained only
5257 * file extent items for our current inode. These leafs have
5258 * a generation number smaller (older) than the one in the
5259 * current leaf and the leaf our last extent came from, and
5260 * are located between these 2 leafs.
5262 ret
= get_last_extent(sctx
, key
->offset
- 1);
5267 if (sctx
->cur_inode_last_extent
< key
->offset
)
5268 ret
= send_hole(sctx
, key
->offset
);
5269 sctx
->cur_inode_last_extent
= extent_end
;
5273 static int process_extent(struct send_ctx
*sctx
,
5274 struct btrfs_path
*path
,
5275 struct btrfs_key
*key
)
5277 struct clone_root
*found_clone
= NULL
;
5280 if (S_ISLNK(sctx
->cur_inode_mode
))
5283 if (sctx
->parent_root
&& !sctx
->cur_inode_new
) {
5284 ret
= is_extent_unchanged(sctx
, path
, key
);
5292 struct btrfs_file_extent_item
*ei
;
5295 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
5296 struct btrfs_file_extent_item
);
5297 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
5298 if (type
== BTRFS_FILE_EXTENT_PREALLOC
||
5299 type
== BTRFS_FILE_EXTENT_REG
) {
5301 * The send spec does not have a prealloc command yet,
5302 * so just leave a hole for prealloc'ed extents until
5303 * we have enough commands queued up to justify rev'ing
5306 if (type
== BTRFS_FILE_EXTENT_PREALLOC
) {
5311 /* Have a hole, just skip it. */
5312 if (btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
) == 0) {
5319 ret
= find_extent_clone(sctx
, path
, key
->objectid
, key
->offset
,
5320 sctx
->cur_inode_size
, &found_clone
);
5321 if (ret
!= -ENOENT
&& ret
< 0)
5324 ret
= send_write_or_clone(sctx
, path
, key
, found_clone
);
5328 ret
= maybe_send_hole(sctx
, path
, key
);
5333 static int process_all_extents(struct send_ctx
*sctx
)
5336 struct btrfs_root
*root
;
5337 struct btrfs_path
*path
;
5338 struct btrfs_key key
;
5339 struct btrfs_key found_key
;
5340 struct extent_buffer
*eb
;
5343 root
= sctx
->send_root
;
5344 path
= alloc_path_for_send();
5348 key
.objectid
= sctx
->cmp_key
->objectid
;
5349 key
.type
= BTRFS_EXTENT_DATA_KEY
;
5351 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5356 eb
= path
->nodes
[0];
5357 slot
= path
->slots
[0];
5359 if (slot
>= btrfs_header_nritems(eb
)) {
5360 ret
= btrfs_next_leaf(root
, path
);
5363 } else if (ret
> 0) {
5370 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5372 if (found_key
.objectid
!= key
.objectid
||
5373 found_key
.type
!= key
.type
) {
5378 ret
= process_extent(sctx
, path
, &found_key
);
5386 btrfs_free_path(path
);
5390 static int process_recorded_refs_if_needed(struct send_ctx
*sctx
, int at_end
,
5392 int *refs_processed
)
5396 if (sctx
->cur_ino
== 0)
5398 if (!at_end
&& sctx
->cur_ino
== sctx
->cmp_key
->objectid
&&
5399 sctx
->cmp_key
->type
<= BTRFS_INODE_EXTREF_KEY
)
5401 if (list_empty(&sctx
->new_refs
) && list_empty(&sctx
->deleted_refs
))
5404 ret
= process_recorded_refs(sctx
, pending_move
);
5408 *refs_processed
= 1;
5413 static int finish_inode_if_needed(struct send_ctx
*sctx
, int at_end
)
5424 int pending_move
= 0;
5425 int refs_processed
= 0;
5427 ret
= process_recorded_refs_if_needed(sctx
, at_end
, &pending_move
,
5433 * We have processed the refs and thus need to advance send_progress.
5434 * Now, calls to get_cur_xxx will take the updated refs of the current
5435 * inode into account.
5437 * On the other hand, if our current inode is a directory and couldn't
5438 * be moved/renamed because its parent was renamed/moved too and it has
5439 * a higher inode number, we can only move/rename our current inode
5440 * after we moved/renamed its parent. Therefore in this case operate on
5441 * the old path (pre move/rename) of our current inode, and the
5442 * move/rename will be performed later.
5444 if (refs_processed
&& !pending_move
)
5445 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5447 if (sctx
->cur_ino
== 0 || sctx
->cur_inode_deleted
)
5449 if (!at_end
&& sctx
->cmp_key
->objectid
== sctx
->cur_ino
)
5452 ret
= get_inode_info(sctx
->send_root
, sctx
->cur_ino
, NULL
, NULL
,
5453 &left_mode
, &left_uid
, &left_gid
, NULL
);
5457 if (!sctx
->parent_root
|| sctx
->cur_inode_new
) {
5459 if (!S_ISLNK(sctx
->cur_inode_mode
))
5462 ret
= get_inode_info(sctx
->parent_root
, sctx
->cur_ino
,
5463 NULL
, NULL
, &right_mode
, &right_uid
,
5468 if (left_uid
!= right_uid
|| left_gid
!= right_gid
)
5470 if (!S_ISLNK(sctx
->cur_inode_mode
) && left_mode
!= right_mode
)
5474 if (S_ISREG(sctx
->cur_inode_mode
)) {
5475 if (need_send_hole(sctx
)) {
5476 if (sctx
->cur_inode_last_extent
== (u64
)-1 ||
5477 sctx
->cur_inode_last_extent
<
5478 sctx
->cur_inode_size
) {
5479 ret
= get_last_extent(sctx
, (u64
)-1);
5483 if (sctx
->cur_inode_last_extent
<
5484 sctx
->cur_inode_size
) {
5485 ret
= send_hole(sctx
, sctx
->cur_inode_size
);
5490 ret
= send_truncate(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5491 sctx
->cur_inode_size
);
5497 ret
= send_chown(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5498 left_uid
, left_gid
);
5503 ret
= send_chmod(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
5510 * If other directory inodes depended on our current directory
5511 * inode's move/rename, now do their move/rename operations.
5513 if (!is_waiting_for_move(sctx
, sctx
->cur_ino
)) {
5514 ret
= apply_children_dir_moves(sctx
);
5518 * Need to send that every time, no matter if it actually
5519 * changed between the two trees as we have done changes to
5520 * the inode before. If our inode is a directory and it's
5521 * waiting to be moved/renamed, we will send its utimes when
5522 * it's moved/renamed, therefore we don't need to do it here.
5524 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5525 ret
= send_utimes(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
);
5534 static int changed_inode(struct send_ctx
*sctx
,
5535 enum btrfs_compare_tree_result result
)
5538 struct btrfs_key
*key
= sctx
->cmp_key
;
5539 struct btrfs_inode_item
*left_ii
= NULL
;
5540 struct btrfs_inode_item
*right_ii
= NULL
;
5544 sctx
->cur_ino
= key
->objectid
;
5545 sctx
->cur_inode_new_gen
= 0;
5546 sctx
->cur_inode_last_extent
= (u64
)-1;
5549 * Set send_progress to current inode. This will tell all get_cur_xxx
5550 * functions that the current inode's refs are not updated yet. Later,
5551 * when process_recorded_refs is finished, it is set to cur_ino + 1.
5553 sctx
->send_progress
= sctx
->cur_ino
;
5555 if (result
== BTRFS_COMPARE_TREE_NEW
||
5556 result
== BTRFS_COMPARE_TREE_CHANGED
) {
5557 left_ii
= btrfs_item_ptr(sctx
->left_path
->nodes
[0],
5558 sctx
->left_path
->slots
[0],
5559 struct btrfs_inode_item
);
5560 left_gen
= btrfs_inode_generation(sctx
->left_path
->nodes
[0],
5563 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
5564 sctx
->right_path
->slots
[0],
5565 struct btrfs_inode_item
);
5566 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
5569 if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
5570 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
5571 sctx
->right_path
->slots
[0],
5572 struct btrfs_inode_item
);
5574 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
5578 * The cur_ino = root dir case is special here. We can't treat
5579 * the inode as deleted+reused because it would generate a
5580 * stream that tries to delete/mkdir the root dir.
5582 if (left_gen
!= right_gen
&&
5583 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
5584 sctx
->cur_inode_new_gen
= 1;
5587 if (result
== BTRFS_COMPARE_TREE_NEW
) {
5588 sctx
->cur_inode_gen
= left_gen
;
5589 sctx
->cur_inode_new
= 1;
5590 sctx
->cur_inode_deleted
= 0;
5591 sctx
->cur_inode_size
= btrfs_inode_size(
5592 sctx
->left_path
->nodes
[0], left_ii
);
5593 sctx
->cur_inode_mode
= btrfs_inode_mode(
5594 sctx
->left_path
->nodes
[0], left_ii
);
5595 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
5596 sctx
->left_path
->nodes
[0], left_ii
);
5597 if (sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
5598 ret
= send_create_inode_if_needed(sctx
);
5599 } else if (result
== BTRFS_COMPARE_TREE_DELETED
) {
5600 sctx
->cur_inode_gen
= right_gen
;
5601 sctx
->cur_inode_new
= 0;
5602 sctx
->cur_inode_deleted
= 1;
5603 sctx
->cur_inode_size
= btrfs_inode_size(
5604 sctx
->right_path
->nodes
[0], right_ii
);
5605 sctx
->cur_inode_mode
= btrfs_inode_mode(
5606 sctx
->right_path
->nodes
[0], right_ii
);
5607 } else if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
5609 * We need to do some special handling in case the inode was
5610 * reported as changed with a changed generation number. This
5611 * means that the original inode was deleted and new inode
5612 * reused the same inum. So we have to treat the old inode as
5613 * deleted and the new one as new.
5615 if (sctx
->cur_inode_new_gen
) {
5617 * First, process the inode as if it was deleted.
5619 sctx
->cur_inode_gen
= right_gen
;
5620 sctx
->cur_inode_new
= 0;
5621 sctx
->cur_inode_deleted
= 1;
5622 sctx
->cur_inode_size
= btrfs_inode_size(
5623 sctx
->right_path
->nodes
[0], right_ii
);
5624 sctx
->cur_inode_mode
= btrfs_inode_mode(
5625 sctx
->right_path
->nodes
[0], right_ii
);
5626 ret
= process_all_refs(sctx
,
5627 BTRFS_COMPARE_TREE_DELETED
);
5632 * Now process the inode as if it was new.
5634 sctx
->cur_inode_gen
= left_gen
;
5635 sctx
->cur_inode_new
= 1;
5636 sctx
->cur_inode_deleted
= 0;
5637 sctx
->cur_inode_size
= btrfs_inode_size(
5638 sctx
->left_path
->nodes
[0], left_ii
);
5639 sctx
->cur_inode_mode
= btrfs_inode_mode(
5640 sctx
->left_path
->nodes
[0], left_ii
);
5641 sctx
->cur_inode_rdev
= btrfs_inode_rdev(
5642 sctx
->left_path
->nodes
[0], left_ii
);
5643 ret
= send_create_inode_if_needed(sctx
);
5647 ret
= process_all_refs(sctx
, BTRFS_COMPARE_TREE_NEW
);
5651 * Advance send_progress now as we did not get into
5652 * process_recorded_refs_if_needed in the new_gen case.
5654 sctx
->send_progress
= sctx
->cur_ino
+ 1;
5657 * Now process all extents and xattrs of the inode as if
5658 * they were all new.
5660 ret
= process_all_extents(sctx
);
5663 ret
= process_all_new_xattrs(sctx
);
5667 sctx
->cur_inode_gen
= left_gen
;
5668 sctx
->cur_inode_new
= 0;
5669 sctx
->cur_inode_new_gen
= 0;
5670 sctx
->cur_inode_deleted
= 0;
5671 sctx
->cur_inode_size
= btrfs_inode_size(
5672 sctx
->left_path
->nodes
[0], left_ii
);
5673 sctx
->cur_inode_mode
= btrfs_inode_mode(
5674 sctx
->left_path
->nodes
[0], left_ii
);
5683 * We have to process new refs before deleted refs, but compare_trees gives us
5684 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
5685 * first and later process them in process_recorded_refs.
5686 * For the cur_inode_new_gen case, we skip recording completely because
5687 * changed_inode did already initiate processing of refs. The reason for this is
5688 * that in this case, compare_tree actually compares the refs of 2 different
5689 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
5690 * refs of the right tree as deleted and all refs of the left tree as new.
5692 static int changed_ref(struct send_ctx
*sctx
,
5693 enum btrfs_compare_tree_result result
)
5697 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
5699 if (!sctx
->cur_inode_new_gen
&&
5700 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
5701 if (result
== BTRFS_COMPARE_TREE_NEW
)
5702 ret
= record_new_ref(sctx
);
5703 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
5704 ret
= record_deleted_ref(sctx
);
5705 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
5706 ret
= record_changed_ref(sctx
);
5713 * Process new/deleted/changed xattrs. We skip processing in the
5714 * cur_inode_new_gen case because changed_inode did already initiate processing
5715 * of xattrs. The reason is the same as in changed_ref
5717 static int changed_xattr(struct send_ctx
*sctx
,
5718 enum btrfs_compare_tree_result result
)
5722 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
5724 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
5725 if (result
== BTRFS_COMPARE_TREE_NEW
)
5726 ret
= process_new_xattr(sctx
);
5727 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
5728 ret
= process_deleted_xattr(sctx
);
5729 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
5730 ret
= process_changed_xattr(sctx
);
5737 * Process new/deleted/changed extents. We skip processing in the
5738 * cur_inode_new_gen case because changed_inode did already initiate processing
5739 * of extents. The reason is the same as in changed_ref
5741 static int changed_extent(struct send_ctx
*sctx
,
5742 enum btrfs_compare_tree_result result
)
5746 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
5748 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
5749 if (result
!= BTRFS_COMPARE_TREE_DELETED
)
5750 ret
= process_extent(sctx
, sctx
->left_path
,
5757 static int dir_changed(struct send_ctx
*sctx
, u64 dir
)
5759 u64 orig_gen
, new_gen
;
5762 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &new_gen
, NULL
, NULL
,
5767 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &orig_gen
, NULL
,
5772 return (orig_gen
!= new_gen
) ? 1 : 0;
5775 static int compare_refs(struct send_ctx
*sctx
, struct btrfs_path
*path
,
5776 struct btrfs_key
*key
)
5778 struct btrfs_inode_extref
*extref
;
5779 struct extent_buffer
*leaf
;
5780 u64 dirid
= 0, last_dirid
= 0;
5787 /* Easy case, just check this one dirid */
5788 if (key
->type
== BTRFS_INODE_REF_KEY
) {
5789 dirid
= key
->offset
;
5791 ret
= dir_changed(sctx
, dirid
);
5795 leaf
= path
->nodes
[0];
5796 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
5797 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
5798 while (cur_offset
< item_size
) {
5799 extref
= (struct btrfs_inode_extref
*)(ptr
+
5801 dirid
= btrfs_inode_extref_parent(leaf
, extref
);
5802 ref_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
5803 cur_offset
+= ref_name_len
+ sizeof(*extref
);
5804 if (dirid
== last_dirid
)
5806 ret
= dir_changed(sctx
, dirid
);
5816 * Updates compare related fields in sctx and simply forwards to the actual
5817 * changed_xxx functions.
5819 static int changed_cb(struct btrfs_root
*left_root
,
5820 struct btrfs_root
*right_root
,
5821 struct btrfs_path
*left_path
,
5822 struct btrfs_path
*right_path
,
5823 struct btrfs_key
*key
,
5824 enum btrfs_compare_tree_result result
,
5828 struct send_ctx
*sctx
= ctx
;
5830 if (result
== BTRFS_COMPARE_TREE_SAME
) {
5831 if (key
->type
== BTRFS_INODE_REF_KEY
||
5832 key
->type
== BTRFS_INODE_EXTREF_KEY
) {
5833 ret
= compare_refs(sctx
, left_path
, key
);
5838 } else if (key
->type
== BTRFS_EXTENT_DATA_KEY
) {
5839 return maybe_send_hole(sctx
, left_path
, key
);
5843 result
= BTRFS_COMPARE_TREE_CHANGED
;
5847 sctx
->left_path
= left_path
;
5848 sctx
->right_path
= right_path
;
5849 sctx
->cmp_key
= key
;
5851 ret
= finish_inode_if_needed(sctx
, 0);
5855 /* Ignore non-FS objects */
5856 if (key
->objectid
== BTRFS_FREE_INO_OBJECTID
||
5857 key
->objectid
== BTRFS_FREE_SPACE_OBJECTID
)
5860 if (key
->type
== BTRFS_INODE_ITEM_KEY
)
5861 ret
= changed_inode(sctx
, result
);
5862 else if (key
->type
== BTRFS_INODE_REF_KEY
||
5863 key
->type
== BTRFS_INODE_EXTREF_KEY
)
5864 ret
= changed_ref(sctx
, result
);
5865 else if (key
->type
== BTRFS_XATTR_ITEM_KEY
)
5866 ret
= changed_xattr(sctx
, result
);
5867 else if (key
->type
== BTRFS_EXTENT_DATA_KEY
)
5868 ret
= changed_extent(sctx
, result
);
5874 static int full_send_tree(struct send_ctx
*sctx
)
5877 struct btrfs_root
*send_root
= sctx
->send_root
;
5878 struct btrfs_key key
;
5879 struct btrfs_key found_key
;
5880 struct btrfs_path
*path
;
5881 struct extent_buffer
*eb
;
5884 path
= alloc_path_for_send();
5888 key
.objectid
= BTRFS_FIRST_FREE_OBJECTID
;
5889 key
.type
= BTRFS_INODE_ITEM_KEY
;
5892 ret
= btrfs_search_slot_for_read(send_root
, &key
, path
, 1, 0);
5899 eb
= path
->nodes
[0];
5900 slot
= path
->slots
[0];
5901 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5903 ret
= changed_cb(send_root
, NULL
, path
, NULL
,
5904 &found_key
, BTRFS_COMPARE_TREE_NEW
, sctx
);
5908 key
.objectid
= found_key
.objectid
;
5909 key
.type
= found_key
.type
;
5910 key
.offset
= found_key
.offset
+ 1;
5912 ret
= btrfs_next_item(send_root
, path
);
5922 ret
= finish_inode_if_needed(sctx
, 1);
5925 btrfs_free_path(path
);
5929 static int send_subvol(struct send_ctx
*sctx
)
5933 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_STREAM_HEADER
)) {
5934 ret
= send_header(sctx
);
5939 ret
= send_subvol_begin(sctx
);
5943 if (sctx
->parent_root
) {
5944 ret
= btrfs_compare_trees(sctx
->send_root
, sctx
->parent_root
,
5948 ret
= finish_inode_if_needed(sctx
, 1);
5952 ret
= full_send_tree(sctx
);
5958 free_recorded_refs(sctx
);
5963 * If orphan cleanup did remove any orphans from a root, it means the tree
5964 * was modified and therefore the commit root is not the same as the current
5965 * root anymore. This is a problem, because send uses the commit root and
5966 * therefore can see inode items that don't exist in the current root anymore,
5967 * and for example make calls to btrfs_iget, which will do tree lookups based
5968 * on the current root and not on the commit root. Those lookups will fail,
5969 * returning a -ESTALE error, and making send fail with that error. So make
5970 * sure a send does not see any orphans we have just removed, and that it will
5971 * see the same inodes regardless of whether a transaction commit happened
5972 * before it started (meaning that the commit root will be the same as the
5973 * current root) or not.
5975 static int ensure_commit_roots_uptodate(struct send_ctx
*sctx
)
5978 struct btrfs_trans_handle
*trans
= NULL
;
5981 if (sctx
->parent_root
&&
5982 sctx
->parent_root
->node
!= sctx
->parent_root
->commit_root
)
5985 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
5986 if (sctx
->clone_roots
[i
].root
->node
!=
5987 sctx
->clone_roots
[i
].root
->commit_root
)
5991 return btrfs_end_transaction(trans
, sctx
->send_root
);
5996 /* Use any root, all fs roots will get their commit roots updated. */
5998 trans
= btrfs_join_transaction(sctx
->send_root
);
6000 return PTR_ERR(trans
);
6004 return btrfs_commit_transaction(trans
, sctx
->send_root
);
6007 static void btrfs_root_dec_send_in_progress(struct btrfs_root
* root
)
6009 spin_lock(&root
->root_item_lock
);
6010 root
->send_in_progress
--;
6012 * Not much left to do, we don't know why it's unbalanced and
6013 * can't blindly reset it to 0.
6015 if (root
->send_in_progress
< 0)
6016 btrfs_err(root
->fs_info
,
6017 "send_in_progres unbalanced %d root %llu",
6018 root
->send_in_progress
, root
->root_key
.objectid
);
6019 spin_unlock(&root
->root_item_lock
);
6022 long btrfs_ioctl_send(struct file
*mnt_file
, void __user
*arg_
)
6025 struct btrfs_root
*send_root
;
6026 struct btrfs_root
*clone_root
;
6027 struct btrfs_fs_info
*fs_info
;
6028 struct btrfs_ioctl_send_args
*arg
= NULL
;
6029 struct btrfs_key key
;
6030 struct send_ctx
*sctx
= NULL
;
6032 u64
*clone_sources_tmp
= NULL
;
6033 int clone_sources_to_rollback
= 0;
6034 unsigned alloc_size
;
6035 int sort_clone_roots
= 0;
6038 if (!capable(CAP_SYS_ADMIN
))
6041 send_root
= BTRFS_I(file_inode(mnt_file
))->root
;
6042 fs_info
= send_root
->fs_info
;
6045 * The subvolume must remain read-only during send, protect against
6046 * making it RW. This also protects against deletion.
6048 spin_lock(&send_root
->root_item_lock
);
6049 send_root
->send_in_progress
++;
6050 spin_unlock(&send_root
->root_item_lock
);
6053 * This is done when we lookup the root, it should already be complete
6054 * by the time we get here.
6056 WARN_ON(send_root
->orphan_cleanup_state
!= ORPHAN_CLEANUP_DONE
);
6059 * Userspace tools do the checks and warn the user if it's
6062 if (!btrfs_root_readonly(send_root
)) {
6067 arg
= memdup_user(arg_
, sizeof(*arg
));
6074 if (arg
->clone_sources_count
>
6075 ULLONG_MAX
/ sizeof(*arg
->clone_sources
)) {
6080 if (!access_ok(VERIFY_READ
, arg
->clone_sources
,
6081 sizeof(*arg
->clone_sources
) *
6082 arg
->clone_sources_count
)) {
6087 if (arg
->flags
& ~BTRFS_SEND_FLAG_MASK
) {
6092 sctx
= kzalloc(sizeof(struct send_ctx
), GFP_KERNEL
);
6098 INIT_LIST_HEAD(&sctx
->new_refs
);
6099 INIT_LIST_HEAD(&sctx
->deleted_refs
);
6100 INIT_RADIX_TREE(&sctx
->name_cache
, GFP_KERNEL
);
6101 INIT_LIST_HEAD(&sctx
->name_cache_list
);
6103 sctx
->flags
= arg
->flags
;
6105 sctx
->send_filp
= fget(arg
->send_fd
);
6106 if (!sctx
->send_filp
) {
6111 sctx
->send_root
= send_root
;
6113 * Unlikely but possible, if the subvolume is marked for deletion but
6114 * is slow to remove the directory entry, send can still be started
6116 if (btrfs_root_dead(sctx
->send_root
)) {
6121 sctx
->clone_roots_cnt
= arg
->clone_sources_count
;
6123 sctx
->send_max_size
= BTRFS_SEND_BUF_SIZE
;
6124 sctx
->send_buf
= kmalloc(sctx
->send_max_size
, GFP_KERNEL
| __GFP_NOWARN
);
6125 if (!sctx
->send_buf
) {
6126 sctx
->send_buf
= vmalloc(sctx
->send_max_size
);
6127 if (!sctx
->send_buf
) {
6133 sctx
->read_buf
= kmalloc(BTRFS_SEND_READ_SIZE
, GFP_KERNEL
| __GFP_NOWARN
);
6134 if (!sctx
->read_buf
) {
6135 sctx
->read_buf
= vmalloc(BTRFS_SEND_READ_SIZE
);
6136 if (!sctx
->read_buf
) {
6142 sctx
->pending_dir_moves
= RB_ROOT
;
6143 sctx
->waiting_dir_moves
= RB_ROOT
;
6144 sctx
->orphan_dirs
= RB_ROOT
;
6146 alloc_size
= sizeof(struct clone_root
) * (arg
->clone_sources_count
+ 1);
6148 sctx
->clone_roots
= kzalloc(alloc_size
, GFP_KERNEL
| __GFP_NOWARN
);
6149 if (!sctx
->clone_roots
) {
6150 sctx
->clone_roots
= vzalloc(alloc_size
);
6151 if (!sctx
->clone_roots
) {
6157 alloc_size
= arg
->clone_sources_count
* sizeof(*arg
->clone_sources
);
6159 if (arg
->clone_sources_count
) {
6160 clone_sources_tmp
= kmalloc(alloc_size
, GFP_KERNEL
| __GFP_NOWARN
);
6161 if (!clone_sources_tmp
) {
6162 clone_sources_tmp
= vmalloc(alloc_size
);
6163 if (!clone_sources_tmp
) {
6169 ret
= copy_from_user(clone_sources_tmp
, arg
->clone_sources
,
6176 for (i
= 0; i
< arg
->clone_sources_count
; i
++) {
6177 key
.objectid
= clone_sources_tmp
[i
];
6178 key
.type
= BTRFS_ROOT_ITEM_KEY
;
6179 key
.offset
= (u64
)-1;
6181 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
6183 clone_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
6184 if (IS_ERR(clone_root
)) {
6185 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6186 ret
= PTR_ERR(clone_root
);
6189 spin_lock(&clone_root
->root_item_lock
);
6190 if (!btrfs_root_readonly(clone_root
) ||
6191 btrfs_root_dead(clone_root
)) {
6192 spin_unlock(&clone_root
->root_item_lock
);
6193 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6197 clone_root
->send_in_progress
++;
6198 spin_unlock(&clone_root
->root_item_lock
);
6199 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6201 sctx
->clone_roots
[i
].root
= clone_root
;
6202 clone_sources_to_rollback
= i
+ 1;
6204 kvfree(clone_sources_tmp
);
6205 clone_sources_tmp
= NULL
;
6208 if (arg
->parent_root
) {
6209 key
.objectid
= arg
->parent_root
;
6210 key
.type
= BTRFS_ROOT_ITEM_KEY
;
6211 key
.offset
= (u64
)-1;
6213 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
6215 sctx
->parent_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
6216 if (IS_ERR(sctx
->parent_root
)) {
6217 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6218 ret
= PTR_ERR(sctx
->parent_root
);
6222 spin_lock(&sctx
->parent_root
->root_item_lock
);
6223 sctx
->parent_root
->send_in_progress
++;
6224 if (!btrfs_root_readonly(sctx
->parent_root
) ||
6225 btrfs_root_dead(sctx
->parent_root
)) {
6226 spin_unlock(&sctx
->parent_root
->root_item_lock
);
6227 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6231 spin_unlock(&sctx
->parent_root
->root_item_lock
);
6233 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
6237 * Clones from send_root are allowed, but only if the clone source
6238 * is behind the current send position. This is checked while searching
6239 * for possible clone sources.
6241 sctx
->clone_roots
[sctx
->clone_roots_cnt
++].root
= sctx
->send_root
;
6243 /* We do a bsearch later */
6244 sort(sctx
->clone_roots
, sctx
->clone_roots_cnt
,
6245 sizeof(*sctx
->clone_roots
), __clone_root_cmp_sort
,
6247 sort_clone_roots
= 1;
6249 ret
= ensure_commit_roots_uptodate(sctx
);
6253 current
->journal_info
= BTRFS_SEND_TRANS_STUB
;
6254 ret
= send_subvol(sctx
);
6255 current
->journal_info
= NULL
;
6259 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_END_CMD
)) {
6260 ret
= begin_cmd(sctx
, BTRFS_SEND_C_END
);
6263 ret
= send_cmd(sctx
);
6269 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
));
6270 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
)) {
6272 struct pending_dir_move
*pm
;
6274 n
= rb_first(&sctx
->pending_dir_moves
);
6275 pm
= rb_entry(n
, struct pending_dir_move
, node
);
6276 while (!list_empty(&pm
->list
)) {
6277 struct pending_dir_move
*pm2
;
6279 pm2
= list_first_entry(&pm
->list
,
6280 struct pending_dir_move
, list
);
6281 free_pending_move(sctx
, pm2
);
6283 free_pending_move(sctx
, pm
);
6286 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
));
6287 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
)) {
6289 struct waiting_dir_move
*dm
;
6291 n
= rb_first(&sctx
->waiting_dir_moves
);
6292 dm
= rb_entry(n
, struct waiting_dir_move
, node
);
6293 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
6297 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
));
6298 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->orphan_dirs
)) {
6300 struct orphan_dir_info
*odi
;
6302 n
= rb_first(&sctx
->orphan_dirs
);
6303 odi
= rb_entry(n
, struct orphan_dir_info
, node
);
6304 free_orphan_dir_info(sctx
, odi
);
6307 if (sort_clone_roots
) {
6308 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
6309 btrfs_root_dec_send_in_progress(
6310 sctx
->clone_roots
[i
].root
);
6312 for (i
= 0; sctx
&& i
< clone_sources_to_rollback
; i
++)
6313 btrfs_root_dec_send_in_progress(
6314 sctx
->clone_roots
[i
].root
);
6316 btrfs_root_dec_send_in_progress(send_root
);
6318 if (sctx
&& !IS_ERR_OR_NULL(sctx
->parent_root
))
6319 btrfs_root_dec_send_in_progress(sctx
->parent_root
);
6322 kvfree(clone_sources_tmp
);
6325 if (sctx
->send_filp
)
6326 fput(sctx
->send_filp
);
6328 kvfree(sctx
->clone_roots
);
6329 kvfree(sctx
->send_buf
);
6330 kvfree(sctx
->read_buf
);
6332 name_cache_free(sctx
);