2 * Copyright (C) 2012 Alexander Block. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bsearch.h>
21 #include <linux/file.h>
22 #include <linux/sort.h>
23 #include <linux/mount.h>
24 #include <linux/xattr.h>
25 #include <linux/posix_acl_xattr.h>
26 #include <linux/radix-tree.h>
27 #include <linux/vmalloc.h>
28 #include <linux/string.h>
35 #include "btrfs_inode.h"
36 #include "transaction.h"
38 static int g_verbose
= 0;
40 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__)
43 * A fs_path is a helper to dynamically build path names with unknown size.
44 * It reallocates the internal buffer on demand.
45 * It allows fast adding of path elements on the right side (normal path) and
46 * fast adding to the left side (reversed path). A reversed path can also be
47 * unreversed if needed.
56 unsigned short buf_len
:15;
57 unsigned short reversed
:1;
61 * Average path length does not exceed 200 bytes, we'll have
62 * better packing in the slab and higher chance to satisfy
63 * a allocation later during send.
68 #define FS_PATH_INLINE_SIZE \
69 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf))
72 /* reused for each extent */
74 struct btrfs_root
*root
;
81 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128
82 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2)
85 struct file
*send_filp
;
91 u64 cmd_send_size
[BTRFS_SEND_C_MAX
+ 1];
92 u64 flags
; /* 'flags' member of btrfs_ioctl_send_args is u64 */
94 struct btrfs_root
*send_root
;
95 struct btrfs_root
*parent_root
;
96 struct clone_root
*clone_roots
;
99 /* current state of the compare_tree call */
100 struct btrfs_path
*left_path
;
101 struct btrfs_path
*right_path
;
102 struct btrfs_key
*cmp_key
;
105 * infos of the currently processed inode. In case of deleted inodes,
106 * these are the values from the deleted inode.
111 int cur_inode_new_gen
;
112 int cur_inode_deleted
;
115 u64 cur_inode_last_extent
;
119 struct list_head new_refs
;
120 struct list_head deleted_refs
;
122 struct radix_tree_root name_cache
;
123 struct list_head name_cache_list
;
129 * We process inodes by their increasing order, so if before an
130 * incremental send we reverse the parent/child relationship of
131 * directories such that a directory with a lower inode number was
132 * the parent of a directory with a higher inode number, and the one
133 * becoming the new parent got renamed too, we can't rename/move the
134 * directory with lower inode number when we finish processing it - we
135 * must process the directory with higher inode number first, then
136 * rename/move it and then rename/move the directory with lower inode
137 * number. Example follows.
139 * Tree state when the first send was performed:
151 * Tree state when the second (incremental) send is performed:
160 * The sequence of steps that lead to the second state was:
162 * mv /a/b/c/d /a/b/c2/d2
163 * mv /a/b/c /a/b/c2/d2/cc
165 * "c" has lower inode number, but we can't move it (2nd mv operation)
166 * before we move "d", which has higher inode number.
168 * So we just memorize which move/rename operations must be performed
169 * later when their respective parent is processed and moved/renamed.
172 /* Indexed by parent directory inode number. */
173 struct rb_root pending_dir_moves
;
176 * Reverse index, indexed by the inode number of a directory that
177 * is waiting for the move/rename of its immediate parent before its
178 * own move/rename can be performed.
180 struct rb_root waiting_dir_moves
;
183 struct pending_dir_move
{
185 struct list_head list
;
189 struct list_head update_refs
;
192 struct waiting_dir_move
{
197 struct name_cache_entry
{
198 struct list_head list
;
200 * radix_tree has only 32bit entries but we need to handle 64bit inums.
201 * We use the lower 32bit of the 64bit inum to store it in the tree. If
202 * more then one inum would fall into the same entry, we use radix_list
203 * to store the additional entries. radix_list is also used to store
204 * entries where two entries have the same inum but different
207 struct list_head radix_list
;
213 int need_later_update
;
218 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
);
220 static int need_send_hole(struct send_ctx
*sctx
)
222 return (sctx
->parent_root
&& !sctx
->cur_inode_new
&&
223 !sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
&&
224 S_ISREG(sctx
->cur_inode_mode
));
227 static void fs_path_reset(struct fs_path
*p
)
230 p
->start
= p
->buf
+ p
->buf_len
- 1;
240 static struct fs_path
*fs_path_alloc(void)
244 p
= kmalloc(sizeof(*p
), GFP_NOFS
);
248 p
->buf
= p
->inline_buf
;
249 p
->buf_len
= FS_PATH_INLINE_SIZE
;
254 static struct fs_path
*fs_path_alloc_reversed(void)
266 static void fs_path_free(struct fs_path
*p
)
270 if (p
->buf
!= p
->inline_buf
)
275 static int fs_path_len(struct fs_path
*p
)
277 return p
->end
- p
->start
;
280 static int fs_path_ensure_buf(struct fs_path
*p
, int len
)
288 if (p
->buf_len
>= len
)
292 * First time the inline_buf does not suffice
294 if (p
->buf
== p
->inline_buf
) {
295 p
->buf
= kmalloc(len
, GFP_NOFS
);
299 * The real size of the buffer is bigger, this will let the
300 * fast path happen most of the time
302 p
->buf_len
= ksize(p
->buf
);
306 tmp
= krealloc(p
->buf
, len
, GFP_NOFS
);
310 p
->buf_len
= ksize(p
->buf
);
313 path_len
= p
->end
- p
->start
;
314 old_buf_len
= p
->buf_len
;
317 tmp_buf
= p
->buf
+ old_buf_len
- path_len
- 1;
318 p
->end
= p
->buf
+ p
->buf_len
- 1;
319 p
->start
= p
->end
- path_len
;
320 memmove(p
->start
, tmp_buf
, path_len
+ 1);
323 p
->end
= p
->start
+ path_len
;
328 static int fs_path_prepare_for_add(struct fs_path
*p
, int name_len
,
334 new_len
= p
->end
- p
->start
+ name_len
;
335 if (p
->start
!= p
->end
)
337 ret
= fs_path_ensure_buf(p
, new_len
);
342 if (p
->start
!= p
->end
)
344 p
->start
-= name_len
;
345 *prepared
= p
->start
;
347 if (p
->start
!= p
->end
)
358 static int fs_path_add(struct fs_path
*p
, const char *name
, int name_len
)
363 ret
= fs_path_prepare_for_add(p
, name_len
, &prepared
);
366 memcpy(prepared
, name
, name_len
);
372 static int fs_path_add_path(struct fs_path
*p
, struct fs_path
*p2
)
377 ret
= fs_path_prepare_for_add(p
, p2
->end
- p2
->start
, &prepared
);
380 memcpy(prepared
, p2
->start
, p2
->end
- p2
->start
);
386 static int fs_path_add_from_extent_buffer(struct fs_path
*p
,
387 struct extent_buffer
*eb
,
388 unsigned long off
, int len
)
393 ret
= fs_path_prepare_for_add(p
, len
, &prepared
);
397 read_extent_buffer(eb
, prepared
, off
, len
);
403 static int fs_path_copy(struct fs_path
*p
, struct fs_path
*from
)
407 p
->reversed
= from
->reversed
;
410 ret
= fs_path_add_path(p
, from
);
416 static void fs_path_unreverse(struct fs_path
*p
)
425 len
= p
->end
- p
->start
;
427 p
->end
= p
->start
+ len
;
428 memmove(p
->start
, tmp
, len
+ 1);
432 static struct btrfs_path
*alloc_path_for_send(void)
434 struct btrfs_path
*path
;
436 path
= btrfs_alloc_path();
439 path
->search_commit_root
= 1;
440 path
->skip_locking
= 1;
444 static int write_buf(struct file
*filp
, const void *buf
, u32 len
, loff_t
*off
)
454 ret
= vfs_write(filp
, (char *)buf
+ pos
, len
- pos
, off
);
455 /* TODO handle that correctly */
456 /*if (ret == -ERESTARTSYS) {
475 static int tlv_put(struct send_ctx
*sctx
, u16 attr
, const void *data
, int len
)
477 struct btrfs_tlv_header
*hdr
;
478 int total_len
= sizeof(*hdr
) + len
;
479 int left
= sctx
->send_max_size
- sctx
->send_size
;
481 if (unlikely(left
< total_len
))
484 hdr
= (struct btrfs_tlv_header
*) (sctx
->send_buf
+ sctx
->send_size
);
485 hdr
->tlv_type
= cpu_to_le16(attr
);
486 hdr
->tlv_len
= cpu_to_le16(len
);
487 memcpy(hdr
+ 1, data
, len
);
488 sctx
->send_size
+= total_len
;
493 #define TLV_PUT_DEFINE_INT(bits) \
494 static int tlv_put_u##bits(struct send_ctx *sctx, \
495 u##bits attr, u##bits value) \
497 __le##bits __tmp = cpu_to_le##bits(value); \
498 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \
501 TLV_PUT_DEFINE_INT(64)
503 static int tlv_put_string(struct send_ctx
*sctx
, u16 attr
,
504 const char *str
, int len
)
508 return tlv_put(sctx
, attr
, str
, len
);
511 static int tlv_put_uuid(struct send_ctx
*sctx
, u16 attr
,
514 return tlv_put(sctx
, attr
, uuid
, BTRFS_UUID_SIZE
);
517 static int tlv_put_btrfs_timespec(struct send_ctx
*sctx
, u16 attr
,
518 struct extent_buffer
*eb
,
519 struct btrfs_timespec
*ts
)
521 struct btrfs_timespec bts
;
522 read_extent_buffer(eb
, &bts
, (unsigned long)ts
, sizeof(bts
));
523 return tlv_put(sctx
, attr
, &bts
, sizeof(bts
));
527 #define TLV_PUT(sctx, attrtype, attrlen, data) \
529 ret = tlv_put(sctx, attrtype, attrlen, data); \
531 goto tlv_put_failure; \
534 #define TLV_PUT_INT(sctx, attrtype, bits, value) \
536 ret = tlv_put_u##bits(sctx, attrtype, value); \
538 goto tlv_put_failure; \
541 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data)
542 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data)
543 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data)
544 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data)
545 #define TLV_PUT_STRING(sctx, attrtype, str, len) \
547 ret = tlv_put_string(sctx, attrtype, str, len); \
549 goto tlv_put_failure; \
551 #define TLV_PUT_PATH(sctx, attrtype, p) \
553 ret = tlv_put_string(sctx, attrtype, p->start, \
554 p->end - p->start); \
556 goto tlv_put_failure; \
558 #define TLV_PUT_UUID(sctx, attrtype, uuid) \
560 ret = tlv_put_uuid(sctx, attrtype, uuid); \
562 goto tlv_put_failure; \
564 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \
566 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \
568 goto tlv_put_failure; \
571 static int send_header(struct send_ctx
*sctx
)
573 struct btrfs_stream_header hdr
;
575 strcpy(hdr
.magic
, BTRFS_SEND_STREAM_MAGIC
);
576 hdr
.version
= cpu_to_le32(BTRFS_SEND_STREAM_VERSION
);
578 return write_buf(sctx
->send_filp
, &hdr
, sizeof(hdr
),
583 * For each command/item we want to send to userspace, we call this function.
585 static int begin_cmd(struct send_ctx
*sctx
, int cmd
)
587 struct btrfs_cmd_header
*hdr
;
589 if (WARN_ON(!sctx
->send_buf
))
592 BUG_ON(sctx
->send_size
);
594 sctx
->send_size
+= sizeof(*hdr
);
595 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
596 hdr
->cmd
= cpu_to_le16(cmd
);
601 static int send_cmd(struct send_ctx
*sctx
)
604 struct btrfs_cmd_header
*hdr
;
607 hdr
= (struct btrfs_cmd_header
*)sctx
->send_buf
;
608 hdr
->len
= cpu_to_le32(sctx
->send_size
- sizeof(*hdr
));
611 crc
= btrfs_crc32c(0, (unsigned char *)sctx
->send_buf
, sctx
->send_size
);
612 hdr
->crc
= cpu_to_le32(crc
);
614 ret
= write_buf(sctx
->send_filp
, sctx
->send_buf
, sctx
->send_size
,
617 sctx
->total_send_size
+= sctx
->send_size
;
618 sctx
->cmd_send_size
[le16_to_cpu(hdr
->cmd
)] += sctx
->send_size
;
625 * Sends a move instruction to user space
627 static int send_rename(struct send_ctx
*sctx
,
628 struct fs_path
*from
, struct fs_path
*to
)
632 verbose_printk("btrfs: send_rename %s -> %s\n", from
->start
, to
->start
);
634 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RENAME
);
638 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, from
);
639 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_TO
, to
);
641 ret
= send_cmd(sctx
);
649 * Sends a link instruction to user space
651 static int send_link(struct send_ctx
*sctx
,
652 struct fs_path
*path
, struct fs_path
*lnk
)
656 verbose_printk("btrfs: send_link %s -> %s\n", path
->start
, lnk
->start
);
658 ret
= begin_cmd(sctx
, BTRFS_SEND_C_LINK
);
662 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
663 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, lnk
);
665 ret
= send_cmd(sctx
);
673 * Sends an unlink instruction to user space
675 static int send_unlink(struct send_ctx
*sctx
, struct fs_path
*path
)
679 verbose_printk("btrfs: send_unlink %s\n", path
->start
);
681 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UNLINK
);
685 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
687 ret
= send_cmd(sctx
);
695 * Sends a rmdir instruction to user space
697 static int send_rmdir(struct send_ctx
*sctx
, struct fs_path
*path
)
701 verbose_printk("btrfs: send_rmdir %s\n", path
->start
);
703 ret
= begin_cmd(sctx
, BTRFS_SEND_C_RMDIR
);
707 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
709 ret
= send_cmd(sctx
);
717 * Helper function to retrieve some fields from an inode item.
719 static int get_inode_info(struct btrfs_root
*root
,
720 u64 ino
, u64
*size
, u64
*gen
,
721 u64
*mode
, u64
*uid
, u64
*gid
,
725 struct btrfs_inode_item
*ii
;
726 struct btrfs_key key
;
727 struct btrfs_path
*path
;
729 path
= alloc_path_for_send();
734 key
.type
= BTRFS_INODE_ITEM_KEY
;
736 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
744 ii
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
745 struct btrfs_inode_item
);
747 *size
= btrfs_inode_size(path
->nodes
[0], ii
);
749 *gen
= btrfs_inode_generation(path
->nodes
[0], ii
);
751 *mode
= btrfs_inode_mode(path
->nodes
[0], ii
);
753 *uid
= btrfs_inode_uid(path
->nodes
[0], ii
);
755 *gid
= btrfs_inode_gid(path
->nodes
[0], ii
);
757 *rdev
= btrfs_inode_rdev(path
->nodes
[0], ii
);
760 btrfs_free_path(path
);
764 typedef int (*iterate_inode_ref_t
)(int num
, u64 dir
, int index
,
769 * Helper function to iterate the entries in ONE btrfs_inode_ref or
770 * btrfs_inode_extref.
771 * The iterate callback may return a non zero value to stop iteration. This can
772 * be a negative value for error codes or 1 to simply stop it.
774 * path must point to the INODE_REF or INODE_EXTREF when called.
776 static int iterate_inode_ref(struct btrfs_root
*root
, struct btrfs_path
*path
,
777 struct btrfs_key
*found_key
, int resolve
,
778 iterate_inode_ref_t iterate
, void *ctx
)
780 struct extent_buffer
*eb
= path
->nodes
[0];
781 struct btrfs_item
*item
;
782 struct btrfs_inode_ref
*iref
;
783 struct btrfs_inode_extref
*extref
;
784 struct btrfs_path
*tmp_path
;
788 int slot
= path
->slots
[0];
795 unsigned long name_off
;
796 unsigned long elem_size
;
799 p
= fs_path_alloc_reversed();
803 tmp_path
= alloc_path_for_send();
810 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
811 ptr
= (unsigned long)btrfs_item_ptr(eb
, slot
,
812 struct btrfs_inode_ref
);
813 item
= btrfs_item_nr(slot
);
814 total
= btrfs_item_size(eb
, item
);
815 elem_size
= sizeof(*iref
);
817 ptr
= btrfs_item_ptr_offset(eb
, slot
);
818 total
= btrfs_item_size_nr(eb
, slot
);
819 elem_size
= sizeof(*extref
);
822 while (cur
< total
) {
825 if (found_key
->type
== BTRFS_INODE_REF_KEY
) {
826 iref
= (struct btrfs_inode_ref
*)(ptr
+ cur
);
827 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
828 name_off
= (unsigned long)(iref
+ 1);
829 index
= btrfs_inode_ref_index(eb
, iref
);
830 dir
= found_key
->offset
;
832 extref
= (struct btrfs_inode_extref
*)(ptr
+ cur
);
833 name_len
= btrfs_inode_extref_name_len(eb
, extref
);
834 name_off
= (unsigned long)&extref
->name
;
835 index
= btrfs_inode_extref_index(eb
, extref
);
836 dir
= btrfs_inode_extref_parent(eb
, extref
);
840 start
= btrfs_ref_to_path(root
, tmp_path
, name_len
,
844 ret
= PTR_ERR(start
);
847 if (start
< p
->buf
) {
848 /* overflow , try again with larger buffer */
849 ret
= fs_path_ensure_buf(p
,
850 p
->buf_len
+ p
->buf
- start
);
853 start
= btrfs_ref_to_path(root
, tmp_path
,
858 ret
= PTR_ERR(start
);
861 BUG_ON(start
< p
->buf
);
865 ret
= fs_path_add_from_extent_buffer(p
, eb
, name_off
,
871 cur
+= elem_size
+ name_len
;
872 ret
= iterate(num
, dir
, index
, p
, ctx
);
879 btrfs_free_path(tmp_path
);
884 typedef int (*iterate_dir_item_t
)(int num
, struct btrfs_key
*di_key
,
885 const char *name
, int name_len
,
886 const char *data
, int data_len
,
890 * Helper function to iterate the entries in ONE btrfs_dir_item.
891 * The iterate callback may return a non zero value to stop iteration. This can
892 * be a negative value for error codes or 1 to simply stop it.
894 * path must point to the dir item when called.
896 static int iterate_dir_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
897 struct btrfs_key
*found_key
,
898 iterate_dir_item_t iterate
, void *ctx
)
901 struct extent_buffer
*eb
;
902 struct btrfs_item
*item
;
903 struct btrfs_dir_item
*di
;
904 struct btrfs_key di_key
;
906 const int buf_len
= PATH_MAX
;
916 buf
= kmalloc(buf_len
, GFP_NOFS
);
923 slot
= path
->slots
[0];
924 item
= btrfs_item_nr(slot
);
925 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
928 total
= btrfs_item_size(eb
, item
);
931 while (cur
< total
) {
932 name_len
= btrfs_dir_name_len(eb
, di
);
933 data_len
= btrfs_dir_data_len(eb
, di
);
934 type
= btrfs_dir_type(eb
, di
);
935 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
940 if (name_len
+ data_len
> buf_len
) {
945 read_extent_buffer(eb
, buf
, (unsigned long)(di
+ 1),
946 name_len
+ data_len
);
948 len
= sizeof(*di
) + name_len
+ data_len
;
949 di
= (struct btrfs_dir_item
*)((char *)di
+ len
);
952 ret
= iterate(num
, &di_key
, buf
, name_len
, buf
+ name_len
,
953 data_len
, type
, ctx
);
969 static int __copy_first_ref(int num
, u64 dir
, int index
,
970 struct fs_path
*p
, void *ctx
)
973 struct fs_path
*pt
= ctx
;
975 ret
= fs_path_copy(pt
, p
);
979 /* we want the first only */
984 * Retrieve the first path of an inode. If an inode has more then one
985 * ref/hardlink, this is ignored.
987 static int get_inode_path(struct btrfs_root
*root
,
988 u64 ino
, struct fs_path
*path
)
991 struct btrfs_key key
, found_key
;
992 struct btrfs_path
*p
;
994 p
= alloc_path_for_send();
1001 key
.type
= BTRFS_INODE_REF_KEY
;
1004 ret
= btrfs_search_slot_for_read(root
, &key
, p
, 1, 0);
1011 btrfs_item_key_to_cpu(p
->nodes
[0], &found_key
, p
->slots
[0]);
1012 if (found_key
.objectid
!= ino
||
1013 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1014 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1019 ret
= iterate_inode_ref(root
, p
, &found_key
, 1,
1020 __copy_first_ref
, path
);
1030 struct backref_ctx
{
1031 struct send_ctx
*sctx
;
1033 /* number of total found references */
1037 * used for clones found in send_root. clones found behind cur_objectid
1038 * and cur_offset are not considered as allowed clones.
1043 /* may be truncated in case it's the last extent in a file */
1046 /* Just to check for bugs in backref resolving */
1050 static int __clone_root_cmp_bsearch(const void *key
, const void *elt
)
1052 u64 root
= (u64
)(uintptr_t)key
;
1053 struct clone_root
*cr
= (struct clone_root
*)elt
;
1055 if (root
< cr
->root
->objectid
)
1057 if (root
> cr
->root
->objectid
)
1062 static int __clone_root_cmp_sort(const void *e1
, const void *e2
)
1064 struct clone_root
*cr1
= (struct clone_root
*)e1
;
1065 struct clone_root
*cr2
= (struct clone_root
*)e2
;
1067 if (cr1
->root
->objectid
< cr2
->root
->objectid
)
1069 if (cr1
->root
->objectid
> cr2
->root
->objectid
)
1075 * Called for every backref that is found for the current extent.
1076 * Results are collected in sctx->clone_roots->ino/offset/found_refs
1078 static int __iterate_backrefs(u64 ino
, u64 offset
, u64 root
, void *ctx_
)
1080 struct backref_ctx
*bctx
= ctx_
;
1081 struct clone_root
*found
;
1085 /* First check if the root is in the list of accepted clone sources */
1086 found
= bsearch((void *)(uintptr_t)root
, bctx
->sctx
->clone_roots
,
1087 bctx
->sctx
->clone_roots_cnt
,
1088 sizeof(struct clone_root
),
1089 __clone_root_cmp_bsearch
);
1093 if (found
->root
== bctx
->sctx
->send_root
&&
1094 ino
== bctx
->cur_objectid
&&
1095 offset
== bctx
->cur_offset
) {
1096 bctx
->found_itself
= 1;
1100 * There are inodes that have extents that lie behind its i_size. Don't
1101 * accept clones from these extents.
1103 ret
= get_inode_info(found
->root
, ino
, &i_size
, NULL
, NULL
, NULL
, NULL
,
1108 if (offset
+ bctx
->extent_len
> i_size
)
1112 * Make sure we don't consider clones from send_root that are
1113 * behind the current inode/offset.
1115 if (found
->root
== bctx
->sctx
->send_root
) {
1117 * TODO for the moment we don't accept clones from the inode
1118 * that is currently send. We may change this when
1119 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same
1122 if (ino
>= bctx
->cur_objectid
)
1125 if (ino
> bctx
->cur_objectid
)
1127 if (offset
+ bctx
->extent_len
> bctx
->cur_offset
)
1133 found
->found_refs
++;
1134 if (ino
< found
->ino
) {
1136 found
->offset
= offset
;
1137 } else if (found
->ino
== ino
) {
1139 * same extent found more then once in the same file.
1141 if (found
->offset
> offset
+ bctx
->extent_len
)
1142 found
->offset
= offset
;
1149 * Given an inode, offset and extent item, it finds a good clone for a clone
1150 * instruction. Returns -ENOENT when none could be found. The function makes
1151 * sure that the returned clone is usable at the point where sending is at the
1152 * moment. This means, that no clones are accepted which lie behind the current
1155 * path must point to the extent item when called.
1157 static int find_extent_clone(struct send_ctx
*sctx
,
1158 struct btrfs_path
*path
,
1159 u64 ino
, u64 data_offset
,
1161 struct clone_root
**found
)
1168 u64 extent_item_pos
;
1170 struct btrfs_file_extent_item
*fi
;
1171 struct extent_buffer
*eb
= path
->nodes
[0];
1172 struct backref_ctx
*backref_ctx
= NULL
;
1173 struct clone_root
*cur_clone_root
;
1174 struct btrfs_key found_key
;
1175 struct btrfs_path
*tmp_path
;
1179 tmp_path
= alloc_path_for_send();
1183 backref_ctx
= kmalloc(sizeof(*backref_ctx
), GFP_NOFS
);
1189 if (data_offset
>= ino_size
) {
1191 * There may be extents that lie behind the file's size.
1192 * I at least had this in combination with snapshotting while
1193 * writing large files.
1199 fi
= btrfs_item_ptr(eb
, path
->slots
[0],
1200 struct btrfs_file_extent_item
);
1201 extent_type
= btrfs_file_extent_type(eb
, fi
);
1202 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
1206 compressed
= btrfs_file_extent_compression(eb
, fi
);
1208 num_bytes
= btrfs_file_extent_num_bytes(eb
, fi
);
1209 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
1210 if (disk_byte
== 0) {
1214 logical
= disk_byte
+ btrfs_file_extent_offset(eb
, fi
);
1216 ret
= extent_from_logical(sctx
->send_root
->fs_info
, disk_byte
, tmp_path
,
1217 &found_key
, &flags
);
1218 btrfs_release_path(tmp_path
);
1222 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1228 * Setup the clone roots.
1230 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1231 cur_clone_root
= sctx
->clone_roots
+ i
;
1232 cur_clone_root
->ino
= (u64
)-1;
1233 cur_clone_root
->offset
= 0;
1234 cur_clone_root
->found_refs
= 0;
1237 backref_ctx
->sctx
= sctx
;
1238 backref_ctx
->found
= 0;
1239 backref_ctx
->cur_objectid
= ino
;
1240 backref_ctx
->cur_offset
= data_offset
;
1241 backref_ctx
->found_itself
= 0;
1242 backref_ctx
->extent_len
= num_bytes
;
1245 * The last extent of a file may be too large due to page alignment.
1246 * We need to adjust extent_len in this case so that the checks in
1247 * __iterate_backrefs work.
1249 if (data_offset
+ num_bytes
>= ino_size
)
1250 backref_ctx
->extent_len
= ino_size
- data_offset
;
1253 * Now collect all backrefs.
1255 if (compressed
== BTRFS_COMPRESS_NONE
)
1256 extent_item_pos
= logical
- found_key
.objectid
;
1258 extent_item_pos
= 0;
1259 ret
= iterate_extent_inodes(sctx
->send_root
->fs_info
,
1260 found_key
.objectid
, extent_item_pos
, 1,
1261 __iterate_backrefs
, backref_ctx
);
1266 if (!backref_ctx
->found_itself
) {
1267 /* found a bug in backref code? */
1269 btrfs_err(sctx
->send_root
->fs_info
, "did not find backref in "
1270 "send_root. inode=%llu, offset=%llu, "
1271 "disk_byte=%llu found extent=%llu\n",
1272 ino
, data_offset
, disk_byte
, found_key
.objectid
);
1276 verbose_printk(KERN_DEBUG
"btrfs: find_extent_clone: data_offset=%llu, "
1278 "num_bytes=%llu, logical=%llu\n",
1279 data_offset
, ino
, num_bytes
, logical
);
1281 if (!backref_ctx
->found
)
1282 verbose_printk("btrfs: no clones found\n");
1284 cur_clone_root
= NULL
;
1285 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++) {
1286 if (sctx
->clone_roots
[i
].found_refs
) {
1287 if (!cur_clone_root
)
1288 cur_clone_root
= sctx
->clone_roots
+ i
;
1289 else if (sctx
->clone_roots
[i
].root
== sctx
->send_root
)
1290 /* prefer clones from send_root over others */
1291 cur_clone_root
= sctx
->clone_roots
+ i
;
1296 if (cur_clone_root
) {
1297 if (compressed
!= BTRFS_COMPRESS_NONE
) {
1299 * Offsets given by iterate_extent_inodes() are relative
1300 * to the start of the extent, we need to add logical
1301 * offset from the file extent item.
1302 * (See why at backref.c:check_extent_in_eb())
1304 cur_clone_root
->offset
+= btrfs_file_extent_offset(eb
,
1307 *found
= cur_clone_root
;
1314 btrfs_free_path(tmp_path
);
1319 static int read_symlink(struct btrfs_root
*root
,
1321 struct fs_path
*dest
)
1324 struct btrfs_path
*path
;
1325 struct btrfs_key key
;
1326 struct btrfs_file_extent_item
*ei
;
1332 path
= alloc_path_for_send();
1337 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1339 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
1344 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1345 struct btrfs_file_extent_item
);
1346 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
1347 compression
= btrfs_file_extent_compression(path
->nodes
[0], ei
);
1348 BUG_ON(type
!= BTRFS_FILE_EXTENT_INLINE
);
1349 BUG_ON(compression
);
1351 off
= btrfs_file_extent_inline_start(ei
);
1352 len
= btrfs_file_extent_inline_len(path
->nodes
[0], path
->slots
[0], ei
);
1354 ret
= fs_path_add_from_extent_buffer(dest
, path
->nodes
[0], off
, len
);
1357 btrfs_free_path(path
);
1362 * Helper function to generate a file name that is unique in the root of
1363 * send_root and parent_root. This is used to generate names for orphan inodes.
1365 static int gen_unique_name(struct send_ctx
*sctx
,
1367 struct fs_path
*dest
)
1370 struct btrfs_path
*path
;
1371 struct btrfs_dir_item
*di
;
1376 path
= alloc_path_for_send();
1381 len
= snprintf(tmp
, sizeof(tmp
), "o%llu-%llu-%llu",
1383 ASSERT(len
< sizeof(tmp
));
1385 di
= btrfs_lookup_dir_item(NULL
, sctx
->send_root
,
1386 path
, BTRFS_FIRST_FREE_OBJECTID
,
1387 tmp
, strlen(tmp
), 0);
1388 btrfs_release_path(path
);
1394 /* not unique, try again */
1399 if (!sctx
->parent_root
) {
1405 di
= btrfs_lookup_dir_item(NULL
, sctx
->parent_root
,
1406 path
, BTRFS_FIRST_FREE_OBJECTID
,
1407 tmp
, strlen(tmp
), 0);
1408 btrfs_release_path(path
);
1414 /* not unique, try again */
1422 ret
= fs_path_add(dest
, tmp
, strlen(tmp
));
1425 btrfs_free_path(path
);
1430 inode_state_no_change
,
1431 inode_state_will_create
,
1432 inode_state_did_create
,
1433 inode_state_will_delete
,
1434 inode_state_did_delete
,
1437 static int get_cur_inode_state(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1445 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &left_gen
, NULL
, NULL
,
1447 if (ret
< 0 && ret
!= -ENOENT
)
1451 if (!sctx
->parent_root
) {
1452 right_ret
= -ENOENT
;
1454 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
, &right_gen
,
1455 NULL
, NULL
, NULL
, NULL
);
1456 if (ret
< 0 && ret
!= -ENOENT
)
1461 if (!left_ret
&& !right_ret
) {
1462 if (left_gen
== gen
&& right_gen
== gen
) {
1463 ret
= inode_state_no_change
;
1464 } else if (left_gen
== gen
) {
1465 if (ino
< sctx
->send_progress
)
1466 ret
= inode_state_did_create
;
1468 ret
= inode_state_will_create
;
1469 } else if (right_gen
== gen
) {
1470 if (ino
< sctx
->send_progress
)
1471 ret
= inode_state_did_delete
;
1473 ret
= inode_state_will_delete
;
1477 } else if (!left_ret
) {
1478 if (left_gen
== gen
) {
1479 if (ino
< sctx
->send_progress
)
1480 ret
= inode_state_did_create
;
1482 ret
= inode_state_will_create
;
1486 } else if (!right_ret
) {
1487 if (right_gen
== gen
) {
1488 if (ino
< sctx
->send_progress
)
1489 ret
= inode_state_did_delete
;
1491 ret
= inode_state_will_delete
;
1503 static int is_inode_existent(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1507 ret
= get_cur_inode_state(sctx
, ino
, gen
);
1511 if (ret
== inode_state_no_change
||
1512 ret
== inode_state_did_create
||
1513 ret
== inode_state_will_delete
)
1523 * Helper function to lookup a dir item in a dir.
1525 static int lookup_dir_item_inode(struct btrfs_root
*root
,
1526 u64 dir
, const char *name
, int name_len
,
1531 struct btrfs_dir_item
*di
;
1532 struct btrfs_key key
;
1533 struct btrfs_path
*path
;
1535 path
= alloc_path_for_send();
1539 di
= btrfs_lookup_dir_item(NULL
, root
, path
,
1540 dir
, name
, name_len
, 0);
1549 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
1550 *found_inode
= key
.objectid
;
1551 *found_type
= btrfs_dir_type(path
->nodes
[0], di
);
1554 btrfs_free_path(path
);
1559 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir,
1560 * generation of the parent dir and the name of the dir entry.
1562 static int get_first_ref(struct btrfs_root
*root
, u64 ino
,
1563 u64
*dir
, u64
*dir_gen
, struct fs_path
*name
)
1566 struct btrfs_key key
;
1567 struct btrfs_key found_key
;
1568 struct btrfs_path
*path
;
1572 path
= alloc_path_for_send();
1577 key
.type
= BTRFS_INODE_REF_KEY
;
1580 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 1, 0);
1584 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
1586 if (ret
|| found_key
.objectid
!= ino
||
1587 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
1588 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
)) {
1593 if (key
.type
== BTRFS_INODE_REF_KEY
) {
1594 struct btrfs_inode_ref
*iref
;
1595 iref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1596 struct btrfs_inode_ref
);
1597 len
= btrfs_inode_ref_name_len(path
->nodes
[0], iref
);
1598 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1599 (unsigned long)(iref
+ 1),
1601 parent_dir
= found_key
.offset
;
1603 struct btrfs_inode_extref
*extref
;
1604 extref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
1605 struct btrfs_inode_extref
);
1606 len
= btrfs_inode_extref_name_len(path
->nodes
[0], extref
);
1607 ret
= fs_path_add_from_extent_buffer(name
, path
->nodes
[0],
1608 (unsigned long)&extref
->name
, len
);
1609 parent_dir
= btrfs_inode_extref_parent(path
->nodes
[0], extref
);
1613 btrfs_release_path(path
);
1615 ret
= get_inode_info(root
, parent_dir
, NULL
, dir_gen
, NULL
, NULL
,
1623 btrfs_free_path(path
);
1627 static int is_first_ref(struct btrfs_root
*root
,
1629 const char *name
, int name_len
)
1632 struct fs_path
*tmp_name
;
1636 tmp_name
= fs_path_alloc();
1640 ret
= get_first_ref(root
, ino
, &tmp_dir
, &tmp_dir_gen
, tmp_name
);
1644 if (dir
!= tmp_dir
|| name_len
!= fs_path_len(tmp_name
)) {
1649 ret
= !memcmp(tmp_name
->start
, name
, name_len
);
1652 fs_path_free(tmp_name
);
1657 * Used by process_recorded_refs to determine if a new ref would overwrite an
1658 * already existing ref. In case it detects an overwrite, it returns the
1659 * inode/gen in who_ino/who_gen.
1660 * When an overwrite is detected, process_recorded_refs does proper orphanizing
1661 * to make sure later references to the overwritten inode are possible.
1662 * Orphanizing is however only required for the first ref of an inode.
1663 * process_recorded_refs does an additional is_first_ref check to see if
1664 * orphanizing is really required.
1666 static int will_overwrite_ref(struct send_ctx
*sctx
, u64 dir
, u64 dir_gen
,
1667 const char *name
, int name_len
,
1668 u64
*who_ino
, u64
*who_gen
)
1672 u64 other_inode
= 0;
1675 if (!sctx
->parent_root
)
1678 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1683 * If we have a parent root we need to verify that the parent dir was
1684 * not delted and then re-created, if it was then we have no overwrite
1685 * and we can just unlink this entry.
1687 if (sctx
->parent_root
) {
1688 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &gen
, NULL
,
1690 if (ret
< 0 && ret
!= -ENOENT
)
1700 ret
= lookup_dir_item_inode(sctx
->parent_root
, dir
, name
, name_len
,
1701 &other_inode
, &other_type
);
1702 if (ret
< 0 && ret
!= -ENOENT
)
1710 * Check if the overwritten ref was already processed. If yes, the ref
1711 * was already unlinked/moved, so we can safely assume that we will not
1712 * overwrite anything at this point in time.
1714 if (other_inode
> sctx
->send_progress
) {
1715 ret
= get_inode_info(sctx
->parent_root
, other_inode
, NULL
,
1716 who_gen
, NULL
, NULL
, NULL
, NULL
);
1721 *who_ino
= other_inode
;
1731 * Checks if the ref was overwritten by an already processed inode. This is
1732 * used by __get_cur_name_and_parent to find out if the ref was orphanized and
1733 * thus the orphan name needs be used.
1734 * process_recorded_refs also uses it to avoid unlinking of refs that were
1737 static int did_overwrite_ref(struct send_ctx
*sctx
,
1738 u64 dir
, u64 dir_gen
,
1739 u64 ino
, u64 ino_gen
,
1740 const char *name
, int name_len
)
1747 if (!sctx
->parent_root
)
1750 ret
= is_inode_existent(sctx
, dir
, dir_gen
);
1754 /* check if the ref was overwritten by another ref */
1755 ret
= lookup_dir_item_inode(sctx
->send_root
, dir
, name
, name_len
,
1756 &ow_inode
, &other_type
);
1757 if (ret
< 0 && ret
!= -ENOENT
)
1760 /* was never and will never be overwritten */
1765 ret
= get_inode_info(sctx
->send_root
, ow_inode
, NULL
, &gen
, NULL
, NULL
,
1770 if (ow_inode
== ino
&& gen
== ino_gen
) {
1775 /* we know that it is or will be overwritten. check this now */
1776 if (ow_inode
< sctx
->send_progress
)
1786 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode
1787 * that got overwritten. This is used by process_recorded_refs to determine
1788 * if it has to use the path as returned by get_cur_path or the orphan name.
1790 static int did_overwrite_first_ref(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
1793 struct fs_path
*name
= NULL
;
1797 if (!sctx
->parent_root
)
1800 name
= fs_path_alloc();
1804 ret
= get_first_ref(sctx
->parent_root
, ino
, &dir
, &dir_gen
, name
);
1808 ret
= did_overwrite_ref(sctx
, dir
, dir_gen
, ino
, gen
,
1809 name
->start
, fs_path_len(name
));
1817 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit,
1818 * so we need to do some special handling in case we have clashes. This function
1819 * takes care of this with the help of name_cache_entry::radix_list.
1820 * In case of error, nce is kfreed.
1822 static int name_cache_insert(struct send_ctx
*sctx
,
1823 struct name_cache_entry
*nce
)
1826 struct list_head
*nce_head
;
1828 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
1829 (unsigned long)nce
->ino
);
1831 nce_head
= kmalloc(sizeof(*nce_head
), GFP_NOFS
);
1836 INIT_LIST_HEAD(nce_head
);
1838 ret
= radix_tree_insert(&sctx
->name_cache
, nce
->ino
, nce_head
);
1845 list_add_tail(&nce
->radix_list
, nce_head
);
1846 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
1847 sctx
->name_cache_size
++;
1852 static void name_cache_delete(struct send_ctx
*sctx
,
1853 struct name_cache_entry
*nce
)
1855 struct list_head
*nce_head
;
1857 nce_head
= radix_tree_lookup(&sctx
->name_cache
,
1858 (unsigned long)nce
->ino
);
1860 btrfs_err(sctx
->send_root
->fs_info
,
1861 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory",
1862 nce
->ino
, sctx
->name_cache_size
);
1865 list_del(&nce
->radix_list
);
1866 list_del(&nce
->list
);
1867 sctx
->name_cache_size
--;
1870 * We may not get to the final release of nce_head if the lookup fails
1872 if (nce_head
&& list_empty(nce_head
)) {
1873 radix_tree_delete(&sctx
->name_cache
, (unsigned long)nce
->ino
);
1878 static struct name_cache_entry
*name_cache_search(struct send_ctx
*sctx
,
1881 struct list_head
*nce_head
;
1882 struct name_cache_entry
*cur
;
1884 nce_head
= radix_tree_lookup(&sctx
->name_cache
, (unsigned long)ino
);
1888 list_for_each_entry(cur
, nce_head
, radix_list
) {
1889 if (cur
->ino
== ino
&& cur
->gen
== gen
)
1896 * Removes the entry from the list and adds it back to the end. This marks the
1897 * entry as recently used so that name_cache_clean_unused does not remove it.
1899 static void name_cache_used(struct send_ctx
*sctx
, struct name_cache_entry
*nce
)
1901 list_del(&nce
->list
);
1902 list_add_tail(&nce
->list
, &sctx
->name_cache_list
);
1906 * Remove some entries from the beginning of name_cache_list.
1908 static void name_cache_clean_unused(struct send_ctx
*sctx
)
1910 struct name_cache_entry
*nce
;
1912 if (sctx
->name_cache_size
< SEND_CTX_NAME_CACHE_CLEAN_SIZE
)
1915 while (sctx
->name_cache_size
> SEND_CTX_MAX_NAME_CACHE_SIZE
) {
1916 nce
= list_entry(sctx
->name_cache_list
.next
,
1917 struct name_cache_entry
, list
);
1918 name_cache_delete(sctx
, nce
);
1923 static void name_cache_free(struct send_ctx
*sctx
)
1925 struct name_cache_entry
*nce
;
1927 while (!list_empty(&sctx
->name_cache_list
)) {
1928 nce
= list_entry(sctx
->name_cache_list
.next
,
1929 struct name_cache_entry
, list
);
1930 name_cache_delete(sctx
, nce
);
1936 * Used by get_cur_path for each ref up to the root.
1937 * Returns 0 if it succeeded.
1938 * Returns 1 if the inode is not existent or got overwritten. In that case, the
1939 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1
1940 * is returned, parent_ino/parent_gen are not guaranteed to be valid.
1941 * Returns <0 in case of error.
1943 static int __get_cur_name_and_parent(struct send_ctx
*sctx
,
1945 int skip_name_cache
,
1948 struct fs_path
*dest
)
1952 struct btrfs_path
*path
= NULL
;
1953 struct name_cache_entry
*nce
= NULL
;
1955 if (skip_name_cache
)
1958 * First check if we already did a call to this function with the same
1959 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes
1960 * return the cached result.
1962 nce
= name_cache_search(sctx
, ino
, gen
);
1964 if (ino
< sctx
->send_progress
&& nce
->need_later_update
) {
1965 name_cache_delete(sctx
, nce
);
1969 name_cache_used(sctx
, nce
);
1970 *parent_ino
= nce
->parent_ino
;
1971 *parent_gen
= nce
->parent_gen
;
1972 ret
= fs_path_add(dest
, nce
->name
, nce
->name_len
);
1980 path
= alloc_path_for_send();
1985 * If the inode is not existent yet, add the orphan name and return 1.
1986 * This should only happen for the parent dir that we determine in
1989 ret
= is_inode_existent(sctx
, ino
, gen
);
1994 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2003 * Depending on whether the inode was already processed or not, use
2004 * send_root or parent_root for ref lookup.
2006 if (ino
< sctx
->send_progress
&& !skip_name_cache
)
2007 ret
= get_first_ref(sctx
->send_root
, ino
,
2008 parent_ino
, parent_gen
, dest
);
2010 ret
= get_first_ref(sctx
->parent_root
, ino
,
2011 parent_ino
, parent_gen
, dest
);
2016 * Check if the ref was overwritten by an inode's ref that was processed
2017 * earlier. If yes, treat as orphan and return 1.
2019 ret
= did_overwrite_ref(sctx
, *parent_ino
, *parent_gen
, ino
, gen
,
2020 dest
->start
, dest
->end
- dest
->start
);
2024 fs_path_reset(dest
);
2025 ret
= gen_unique_name(sctx
, ino
, gen
, dest
);
2030 if (skip_name_cache
)
2035 * Store the result of the lookup in the name cache.
2037 nce
= kmalloc(sizeof(*nce
) + fs_path_len(dest
) + 1, GFP_NOFS
);
2045 nce
->parent_ino
= *parent_ino
;
2046 nce
->parent_gen
= *parent_gen
;
2047 nce
->name_len
= fs_path_len(dest
);
2049 strcpy(nce
->name
, dest
->start
);
2051 if (ino
< sctx
->send_progress
)
2052 nce
->need_later_update
= 0;
2054 nce
->need_later_update
= 1;
2056 nce_ret
= name_cache_insert(sctx
, nce
);
2059 name_cache_clean_unused(sctx
);
2062 btrfs_free_path(path
);
2067 * Magic happens here. This function returns the first ref to an inode as it
2068 * would look like while receiving the stream at this point in time.
2069 * We walk the path up to the root. For every inode in between, we check if it
2070 * was already processed/sent. If yes, we continue with the parent as found
2071 * in send_root. If not, we continue with the parent as found in parent_root.
2072 * If we encounter an inode that was deleted at this point in time, we use the
2073 * inodes "orphan" name instead of the real name and stop. Same with new inodes
2074 * that were not created yet and overwritten inodes/refs.
2076 * When do we have have orphan inodes:
2077 * 1. When an inode is freshly created and thus no valid refs are available yet
2078 * 2. When a directory lost all it's refs (deleted) but still has dir items
2079 * inside which were not processed yet (pending for move/delete). If anyone
2080 * tried to get the path to the dir items, it would get a path inside that
2082 * 3. When an inode is moved around or gets new links, it may overwrite the ref
2083 * of an unprocessed inode. If in that case the first ref would be
2084 * overwritten, the overwritten inode gets "orphanized". Later when we
2085 * process this overwritten inode, it is restored at a new place by moving
2088 * sctx->send_progress tells this function at which point in time receiving
2091 static int get_cur_path(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2092 struct fs_path
*dest
)
2095 struct fs_path
*name
= NULL
;
2096 u64 parent_inode
= 0;
2099 int skip_name_cache
= 0;
2101 name
= fs_path_alloc();
2107 if (is_waiting_for_move(sctx
, ino
))
2108 skip_name_cache
= 1;
2111 fs_path_reset(dest
);
2113 while (!stop
&& ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
2114 fs_path_reset(name
);
2116 ret
= __get_cur_name_and_parent(sctx
, ino
, gen
, skip_name_cache
,
2117 &parent_inode
, &parent_gen
, name
);
2123 if (!skip_name_cache
&&
2124 is_waiting_for_move(sctx
, parent_inode
))
2125 skip_name_cache
= 1;
2127 ret
= fs_path_add_path(dest
, name
);
2138 fs_path_unreverse(dest
);
2143 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace
2145 static int send_subvol_begin(struct send_ctx
*sctx
)
2148 struct btrfs_root
*send_root
= sctx
->send_root
;
2149 struct btrfs_root
*parent_root
= sctx
->parent_root
;
2150 struct btrfs_path
*path
;
2151 struct btrfs_key key
;
2152 struct btrfs_root_ref
*ref
;
2153 struct extent_buffer
*leaf
;
2157 path
= btrfs_alloc_path();
2161 name
= kmalloc(BTRFS_PATH_NAME_MAX
, GFP_NOFS
);
2163 btrfs_free_path(path
);
2167 key
.objectid
= send_root
->objectid
;
2168 key
.type
= BTRFS_ROOT_BACKREF_KEY
;
2171 ret
= btrfs_search_slot_for_read(send_root
->fs_info
->tree_root
,
2180 leaf
= path
->nodes
[0];
2181 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
2182 if (key
.type
!= BTRFS_ROOT_BACKREF_KEY
||
2183 key
.objectid
!= send_root
->objectid
) {
2187 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
2188 namelen
= btrfs_root_ref_name_len(leaf
, ref
);
2189 read_extent_buffer(leaf
, name
, (unsigned long)(ref
+ 1), namelen
);
2190 btrfs_release_path(path
);
2193 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SNAPSHOT
);
2197 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SUBVOL
);
2202 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_PATH
, name
, namelen
);
2203 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_UUID
,
2204 sctx
->send_root
->root_item
.uuid
);
2205 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CTRANSID
,
2206 le64_to_cpu(sctx
->send_root
->root_item
.ctransid
));
2208 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
2209 sctx
->parent_root
->root_item
.uuid
);
2210 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
2211 le64_to_cpu(sctx
->parent_root
->root_item
.ctransid
));
2214 ret
= send_cmd(sctx
);
2218 btrfs_free_path(path
);
2223 static int send_truncate(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 size
)
2228 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino
, size
);
2230 p
= fs_path_alloc();
2234 ret
= begin_cmd(sctx
, BTRFS_SEND_C_TRUNCATE
);
2238 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2241 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2242 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, size
);
2244 ret
= send_cmd(sctx
);
2252 static int send_chmod(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 mode
)
2257 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino
, mode
);
2259 p
= fs_path_alloc();
2263 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHMOD
);
2267 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2270 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2271 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
& 07777);
2273 ret
= send_cmd(sctx
);
2281 static int send_chown(struct send_ctx
*sctx
, u64 ino
, u64 gen
, u64 uid
, u64 gid
)
2286 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino
, uid
, gid
);
2288 p
= fs_path_alloc();
2292 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CHOWN
);
2296 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2299 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2300 TLV_PUT_U64(sctx
, BTRFS_SEND_A_UID
, uid
);
2301 TLV_PUT_U64(sctx
, BTRFS_SEND_A_GID
, gid
);
2303 ret
= send_cmd(sctx
);
2311 static int send_utimes(struct send_ctx
*sctx
, u64 ino
, u64 gen
)
2314 struct fs_path
*p
= NULL
;
2315 struct btrfs_inode_item
*ii
;
2316 struct btrfs_path
*path
= NULL
;
2317 struct extent_buffer
*eb
;
2318 struct btrfs_key key
;
2321 verbose_printk("btrfs: send_utimes %llu\n", ino
);
2323 p
= fs_path_alloc();
2327 path
= alloc_path_for_send();
2334 key
.type
= BTRFS_INODE_ITEM_KEY
;
2336 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2340 eb
= path
->nodes
[0];
2341 slot
= path
->slots
[0];
2342 ii
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_item
);
2344 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UTIMES
);
2348 ret
= get_cur_path(sctx
, ino
, gen
, p
);
2351 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2352 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_ATIME
, eb
,
2353 btrfs_inode_atime(ii
));
2354 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_MTIME
, eb
,
2355 btrfs_inode_mtime(ii
));
2356 TLV_PUT_BTRFS_TIMESPEC(sctx
, BTRFS_SEND_A_CTIME
, eb
,
2357 btrfs_inode_ctime(ii
));
2358 /* TODO Add otime support when the otime patches get into upstream */
2360 ret
= send_cmd(sctx
);
2365 btrfs_free_path(path
);
2370 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have
2371 * a valid path yet because we did not process the refs yet. So, the inode
2372 * is created as orphan.
2374 static int send_create_inode(struct send_ctx
*sctx
, u64 ino
)
2383 verbose_printk("btrfs: send_create_inode %llu\n", ino
);
2385 p
= fs_path_alloc();
2389 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &gen
, &mode
, NULL
,
2394 if (S_ISREG(mode
)) {
2395 cmd
= BTRFS_SEND_C_MKFILE
;
2396 } else if (S_ISDIR(mode
)) {
2397 cmd
= BTRFS_SEND_C_MKDIR
;
2398 } else if (S_ISLNK(mode
)) {
2399 cmd
= BTRFS_SEND_C_SYMLINK
;
2400 } else if (S_ISCHR(mode
) || S_ISBLK(mode
)) {
2401 cmd
= BTRFS_SEND_C_MKNOD
;
2402 } else if (S_ISFIFO(mode
)) {
2403 cmd
= BTRFS_SEND_C_MKFIFO
;
2404 } else if (S_ISSOCK(mode
)) {
2405 cmd
= BTRFS_SEND_C_MKSOCK
;
2407 printk(KERN_WARNING
"btrfs: unexpected inode type %o",
2408 (int)(mode
& S_IFMT
));
2413 ret
= begin_cmd(sctx
, cmd
);
2417 ret
= gen_unique_name(sctx
, ino
, gen
, p
);
2421 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
2422 TLV_PUT_U64(sctx
, BTRFS_SEND_A_INO
, ino
);
2424 if (S_ISLNK(mode
)) {
2426 ret
= read_symlink(sctx
->send_root
, ino
, p
);
2429 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH_LINK
, p
);
2430 } else if (S_ISCHR(mode
) || S_ISBLK(mode
) ||
2431 S_ISFIFO(mode
) || S_ISSOCK(mode
)) {
2432 TLV_PUT_U64(sctx
, BTRFS_SEND_A_RDEV
, new_encode_dev(rdev
));
2433 TLV_PUT_U64(sctx
, BTRFS_SEND_A_MODE
, mode
);
2436 ret
= send_cmd(sctx
);
2448 * We need some special handling for inodes that get processed before the parent
2449 * directory got created. See process_recorded_refs for details.
2450 * This function does the check if we already created the dir out of order.
2452 static int did_create_dir(struct send_ctx
*sctx
, u64 dir
)
2455 struct btrfs_path
*path
= NULL
;
2456 struct btrfs_key key
;
2457 struct btrfs_key found_key
;
2458 struct btrfs_key di_key
;
2459 struct extent_buffer
*eb
;
2460 struct btrfs_dir_item
*di
;
2463 path
= alloc_path_for_send();
2470 key
.type
= BTRFS_DIR_INDEX_KEY
;
2472 ret
= btrfs_search_slot(NULL
, sctx
->send_root
, &key
, path
, 0, 0);
2477 eb
= path
->nodes
[0];
2478 slot
= path
->slots
[0];
2479 if (slot
>= btrfs_header_nritems(eb
)) {
2480 ret
= btrfs_next_leaf(sctx
->send_root
, path
);
2483 } else if (ret
> 0) {
2490 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
2491 if (found_key
.objectid
!= key
.objectid
||
2492 found_key
.type
!= key
.type
) {
2497 di
= btrfs_item_ptr(eb
, slot
, struct btrfs_dir_item
);
2498 btrfs_dir_item_key_to_cpu(eb
, di
, &di_key
);
2500 if (di_key
.type
!= BTRFS_ROOT_ITEM_KEY
&&
2501 di_key
.objectid
< sctx
->send_progress
) {
2510 btrfs_free_path(path
);
2515 * Only creates the inode if it is:
2516 * 1. Not a directory
2517 * 2. Or a directory which was not created already due to out of order
2518 * directories. See did_create_dir and process_recorded_refs for details.
2520 static int send_create_inode_if_needed(struct send_ctx
*sctx
)
2524 if (S_ISDIR(sctx
->cur_inode_mode
)) {
2525 ret
= did_create_dir(sctx
, sctx
->cur_ino
);
2534 ret
= send_create_inode(sctx
, sctx
->cur_ino
);
2542 struct recorded_ref
{
2543 struct list_head list
;
2546 struct fs_path
*full_path
;
2554 * We need to process new refs before deleted refs, but compare_tree gives us
2555 * everything mixed. So we first record all refs and later process them.
2556 * This function is a helper to record one ref.
2558 static int record_ref(struct list_head
*head
, u64 dir
,
2559 u64 dir_gen
, struct fs_path
*path
)
2561 struct recorded_ref
*ref
;
2563 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
2568 ref
->dir_gen
= dir_gen
;
2569 ref
->full_path
= path
;
2571 ref
->name
= (char *)kbasename(ref
->full_path
->start
);
2572 ref
->name_len
= ref
->full_path
->end
- ref
->name
;
2573 ref
->dir_path
= ref
->full_path
->start
;
2574 if (ref
->name
== ref
->full_path
->start
)
2575 ref
->dir_path_len
= 0;
2577 ref
->dir_path_len
= ref
->full_path
->end
-
2578 ref
->full_path
->start
- 1 - ref
->name_len
;
2580 list_add_tail(&ref
->list
, head
);
2584 static int dup_ref(struct recorded_ref
*ref
, struct list_head
*list
)
2586 struct recorded_ref
*new;
2588 new = kmalloc(sizeof(*ref
), GFP_NOFS
);
2592 new->dir
= ref
->dir
;
2593 new->dir_gen
= ref
->dir_gen
;
2594 new->full_path
= NULL
;
2595 INIT_LIST_HEAD(&new->list
);
2596 list_add_tail(&new->list
, list
);
2600 static void __free_recorded_refs(struct list_head
*head
)
2602 struct recorded_ref
*cur
;
2604 while (!list_empty(head
)) {
2605 cur
= list_entry(head
->next
, struct recorded_ref
, list
);
2606 fs_path_free(cur
->full_path
);
2607 list_del(&cur
->list
);
2612 static void free_recorded_refs(struct send_ctx
*sctx
)
2614 __free_recorded_refs(&sctx
->new_refs
);
2615 __free_recorded_refs(&sctx
->deleted_refs
);
2619 * Renames/moves a file/dir to its orphan name. Used when the first
2620 * ref of an unprocessed inode gets overwritten and for all non empty
2623 static int orphanize_inode(struct send_ctx
*sctx
, u64 ino
, u64 gen
,
2624 struct fs_path
*path
)
2627 struct fs_path
*orphan
;
2629 orphan
= fs_path_alloc();
2633 ret
= gen_unique_name(sctx
, ino
, gen
, orphan
);
2637 ret
= send_rename(sctx
, path
, orphan
);
2640 fs_path_free(orphan
);
2645 * Returns 1 if a directory can be removed at this point in time.
2646 * We check this by iterating all dir items and checking if the inode behind
2647 * the dir item was already processed.
2649 static int can_rmdir(struct send_ctx
*sctx
, u64 dir
, u64 send_progress
)
2652 struct btrfs_root
*root
= sctx
->parent_root
;
2653 struct btrfs_path
*path
;
2654 struct btrfs_key key
;
2655 struct btrfs_key found_key
;
2656 struct btrfs_key loc
;
2657 struct btrfs_dir_item
*di
;
2660 * Don't try to rmdir the top/root subvolume dir.
2662 if (dir
== BTRFS_FIRST_FREE_OBJECTID
)
2665 path
= alloc_path_for_send();
2670 key
.type
= BTRFS_DIR_INDEX_KEY
;
2672 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2677 if (path
->slots
[0] >= btrfs_header_nritems(path
->nodes
[0])) {
2678 ret
= btrfs_next_leaf(root
, path
);
2685 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
2687 if (found_key
.objectid
!= key
.objectid
||
2688 found_key
.type
!= key
.type
)
2691 di
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2692 struct btrfs_dir_item
);
2693 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &loc
);
2695 if (loc
.objectid
> send_progress
) {
2706 btrfs_free_path(path
);
2710 static int is_waiting_for_move(struct send_ctx
*sctx
, u64 ino
)
2712 struct rb_node
*n
= sctx
->waiting_dir_moves
.rb_node
;
2713 struct waiting_dir_move
*entry
;
2716 entry
= rb_entry(n
, struct waiting_dir_move
, node
);
2717 if (ino
< entry
->ino
)
2719 else if (ino
> entry
->ino
)
2727 static int add_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
)
2729 struct rb_node
**p
= &sctx
->waiting_dir_moves
.rb_node
;
2730 struct rb_node
*parent
= NULL
;
2731 struct waiting_dir_move
*entry
, *dm
;
2733 dm
= kmalloc(sizeof(*dm
), GFP_NOFS
);
2740 entry
= rb_entry(parent
, struct waiting_dir_move
, node
);
2741 if (ino
< entry
->ino
) {
2743 } else if (ino
> entry
->ino
) {
2744 p
= &(*p
)->rb_right
;
2751 rb_link_node(&dm
->node
, parent
, p
);
2752 rb_insert_color(&dm
->node
, &sctx
->waiting_dir_moves
);
2756 static int del_waiting_dir_move(struct send_ctx
*sctx
, u64 ino
)
2758 struct rb_node
*n
= sctx
->waiting_dir_moves
.rb_node
;
2759 struct waiting_dir_move
*entry
;
2762 entry
= rb_entry(n
, struct waiting_dir_move
, node
);
2763 if (ino
< entry
->ino
) {
2765 } else if (ino
> entry
->ino
) {
2768 rb_erase(&entry
->node
, &sctx
->waiting_dir_moves
);
2776 static int add_pending_dir_move(struct send_ctx
*sctx
, u64 parent_ino
)
2778 struct rb_node
**p
= &sctx
->pending_dir_moves
.rb_node
;
2779 struct rb_node
*parent
= NULL
;
2780 struct pending_dir_move
*entry
, *pm
;
2781 struct recorded_ref
*cur
;
2785 pm
= kmalloc(sizeof(*pm
), GFP_NOFS
);
2788 pm
->parent_ino
= parent_ino
;
2789 pm
->ino
= sctx
->cur_ino
;
2790 pm
->gen
= sctx
->cur_inode_gen
;
2791 INIT_LIST_HEAD(&pm
->list
);
2792 INIT_LIST_HEAD(&pm
->update_refs
);
2793 RB_CLEAR_NODE(&pm
->node
);
2797 entry
= rb_entry(parent
, struct pending_dir_move
, node
);
2798 if (parent_ino
< entry
->parent_ino
) {
2800 } else if (parent_ino
> entry
->parent_ino
) {
2801 p
= &(*p
)->rb_right
;
2808 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
2809 ret
= dup_ref(cur
, &pm
->update_refs
);
2813 list_for_each_entry(cur
, &sctx
->new_refs
, list
) {
2814 ret
= dup_ref(cur
, &pm
->update_refs
);
2819 ret
= add_waiting_dir_move(sctx
, pm
->ino
);
2824 list_add_tail(&pm
->list
, &entry
->list
);
2826 rb_link_node(&pm
->node
, parent
, p
);
2827 rb_insert_color(&pm
->node
, &sctx
->pending_dir_moves
);
2832 __free_recorded_refs(&pm
->update_refs
);
2838 static struct pending_dir_move
*get_pending_dir_moves(struct send_ctx
*sctx
,
2841 struct rb_node
*n
= sctx
->pending_dir_moves
.rb_node
;
2842 struct pending_dir_move
*entry
;
2845 entry
= rb_entry(n
, struct pending_dir_move
, node
);
2846 if (parent_ino
< entry
->parent_ino
)
2848 else if (parent_ino
> entry
->parent_ino
)
2856 static int apply_dir_move(struct send_ctx
*sctx
, struct pending_dir_move
*pm
)
2858 struct fs_path
*from_path
= NULL
;
2859 struct fs_path
*to_path
= NULL
;
2860 struct fs_path
*name
= NULL
;
2861 u64 orig_progress
= sctx
->send_progress
;
2862 struct recorded_ref
*cur
;
2863 u64 parent_ino
, parent_gen
;
2866 name
= fs_path_alloc();
2867 from_path
= fs_path_alloc();
2868 if (!name
|| !from_path
) {
2873 ret
= del_waiting_dir_move(sctx
, pm
->ino
);
2876 ret
= get_first_ref(sctx
->parent_root
, pm
->ino
,
2877 &parent_ino
, &parent_gen
, name
);
2881 if (parent_ino
== sctx
->cur_ino
) {
2882 /* child only renamed, not moved */
2883 ASSERT(parent_gen
== sctx
->cur_inode_gen
);
2884 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
2888 ret
= fs_path_add_path(from_path
, name
);
2892 /* child moved and maybe renamed too */
2893 sctx
->send_progress
= pm
->ino
;
2894 ret
= get_cur_path(sctx
, pm
->ino
, pm
->gen
, from_path
);
2902 to_path
= fs_path_alloc();
2908 sctx
->send_progress
= sctx
->cur_ino
+ 1;
2909 ret
= get_cur_path(sctx
, pm
->ino
, pm
->gen
, to_path
);
2913 ret
= send_rename(sctx
, from_path
, to_path
);
2917 ret
= send_utimes(sctx
, pm
->ino
, pm
->gen
);
2922 * After rename/move, need to update the utimes of both new parent(s)
2923 * and old parent(s).
2925 list_for_each_entry(cur
, &pm
->update_refs
, list
) {
2926 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
2933 fs_path_free(from_path
);
2934 fs_path_free(to_path
);
2935 sctx
->send_progress
= orig_progress
;
2940 static void free_pending_move(struct send_ctx
*sctx
, struct pending_dir_move
*m
)
2942 if (!list_empty(&m
->list
))
2944 if (!RB_EMPTY_NODE(&m
->node
))
2945 rb_erase(&m
->node
, &sctx
->pending_dir_moves
);
2946 __free_recorded_refs(&m
->update_refs
);
2950 static void tail_append_pending_moves(struct pending_dir_move
*moves
,
2951 struct list_head
*stack
)
2953 if (list_empty(&moves
->list
)) {
2954 list_add_tail(&moves
->list
, stack
);
2957 list_splice_init(&moves
->list
, &list
);
2958 list_add_tail(&moves
->list
, stack
);
2959 list_splice_tail(&list
, stack
);
2963 static int apply_children_dir_moves(struct send_ctx
*sctx
)
2965 struct pending_dir_move
*pm
;
2966 struct list_head stack
;
2967 u64 parent_ino
= sctx
->cur_ino
;
2970 pm
= get_pending_dir_moves(sctx
, parent_ino
);
2974 INIT_LIST_HEAD(&stack
);
2975 tail_append_pending_moves(pm
, &stack
);
2977 while (!list_empty(&stack
)) {
2978 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
2979 parent_ino
= pm
->ino
;
2980 ret
= apply_dir_move(sctx
, pm
);
2981 free_pending_move(sctx
, pm
);
2984 pm
= get_pending_dir_moves(sctx
, parent_ino
);
2986 tail_append_pending_moves(pm
, &stack
);
2991 while (!list_empty(&stack
)) {
2992 pm
= list_first_entry(&stack
, struct pending_dir_move
, list
);
2993 free_pending_move(sctx
, pm
);
2998 static int wait_for_parent_move(struct send_ctx
*sctx
,
2999 struct recorded_ref
*parent_ref
)
3002 u64 ino
= parent_ref
->dir
;
3003 u64 parent_ino_before
, parent_ino_after
;
3004 u64 new_gen
, old_gen
;
3005 struct fs_path
*path_before
= NULL
;
3006 struct fs_path
*path_after
= NULL
;
3009 if (parent_ref
->dir
<= sctx
->cur_ino
)
3012 if (is_waiting_for_move(sctx
, ino
))
3015 ret
= get_inode_info(sctx
->parent_root
, ino
, NULL
, &old_gen
,
3016 NULL
, NULL
, NULL
, NULL
);
3022 ret
= get_inode_info(sctx
->send_root
, ino
, NULL
, &new_gen
,
3023 NULL
, NULL
, NULL
, NULL
);
3027 if (new_gen
!= old_gen
)
3030 path_before
= fs_path_alloc();
3034 ret
= get_first_ref(sctx
->parent_root
, ino
, &parent_ino_before
,
3036 if (ret
== -ENOENT
) {
3039 } else if (ret
< 0) {
3043 path_after
= fs_path_alloc();
3049 ret
= get_first_ref(sctx
->send_root
, ino
, &parent_ino_after
,
3051 if (ret
== -ENOENT
) {
3054 } else if (ret
< 0) {
3058 len1
= fs_path_len(path_before
);
3059 len2
= fs_path_len(path_after
);
3060 if (parent_ino_before
!= parent_ino_after
|| len1
!= len2
||
3061 memcmp(path_before
->start
, path_after
->start
, len1
)) {
3068 fs_path_free(path_before
);
3069 fs_path_free(path_after
);
3075 * This does all the move/link/unlink/rmdir magic.
3077 static int process_recorded_refs(struct send_ctx
*sctx
, int *pending_move
)
3080 struct recorded_ref
*cur
;
3081 struct recorded_ref
*cur2
;
3082 struct list_head check_dirs
;
3083 struct fs_path
*valid_path
= NULL
;
3086 int did_overwrite
= 0;
3089 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx
->cur_ino
);
3092 * This should never happen as the root dir always has the same ref
3093 * which is always '..'
3095 BUG_ON(sctx
->cur_ino
<= BTRFS_FIRST_FREE_OBJECTID
);
3096 INIT_LIST_HEAD(&check_dirs
);
3098 valid_path
= fs_path_alloc();
3105 * First, check if the first ref of the current inode was overwritten
3106 * before. If yes, we know that the current inode was already orphanized
3107 * and thus use the orphan name. If not, we can use get_cur_path to
3108 * get the path of the first ref as it would like while receiving at
3109 * this point in time.
3110 * New inodes are always orphan at the beginning, so force to use the
3111 * orphan name in this case.
3112 * The first ref is stored in valid_path and will be updated if it
3113 * gets moved around.
3115 if (!sctx
->cur_inode_new
) {
3116 ret
= did_overwrite_first_ref(sctx
, sctx
->cur_ino
,
3117 sctx
->cur_inode_gen
);
3123 if (sctx
->cur_inode_new
|| did_overwrite
) {
3124 ret
= gen_unique_name(sctx
, sctx
->cur_ino
,
3125 sctx
->cur_inode_gen
, valid_path
);
3130 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
3136 list_for_each_entry(cur
, &sctx
->new_refs
, list
) {
3138 * We may have refs where the parent directory does not exist
3139 * yet. This happens if the parent directories inum is higher
3140 * the the current inum. To handle this case, we create the
3141 * parent directory out of order. But we need to check if this
3142 * did already happen before due to other refs in the same dir.
3144 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3147 if (ret
== inode_state_will_create
) {
3150 * First check if any of the current inodes refs did
3151 * already create the dir.
3153 list_for_each_entry(cur2
, &sctx
->new_refs
, list
) {
3156 if (cur2
->dir
== cur
->dir
) {
3163 * If that did not happen, check if a previous inode
3164 * did already create the dir.
3167 ret
= did_create_dir(sctx
, cur
->dir
);
3171 ret
= send_create_inode(sctx
, cur
->dir
);
3178 * Check if this new ref would overwrite the first ref of
3179 * another unprocessed inode. If yes, orphanize the
3180 * overwritten inode. If we find an overwritten ref that is
3181 * not the first ref, simply unlink it.
3183 ret
= will_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3184 cur
->name
, cur
->name_len
,
3185 &ow_inode
, &ow_gen
);
3189 ret
= is_first_ref(sctx
->parent_root
,
3190 ow_inode
, cur
->dir
, cur
->name
,
3195 ret
= orphanize_inode(sctx
, ow_inode
, ow_gen
,
3200 ret
= send_unlink(sctx
, cur
->full_path
);
3207 * link/move the ref to the new place. If we have an orphan
3208 * inode, move it and update valid_path. If not, link or move
3209 * it depending on the inode mode.
3212 ret
= send_rename(sctx
, valid_path
, cur
->full_path
);
3216 ret
= fs_path_copy(valid_path
, cur
->full_path
);
3220 if (S_ISDIR(sctx
->cur_inode_mode
)) {
3222 * Dirs can't be linked, so move it. For moved
3223 * dirs, we always have one new and one deleted
3224 * ref. The deleted ref is ignored later.
3226 ret
= wait_for_parent_move(sctx
, cur
);
3230 ret
= add_pending_dir_move(sctx
,
3234 ret
= send_rename(sctx
, valid_path
,
3237 ret
= fs_path_copy(valid_path
,
3243 ret
= send_link(sctx
, cur
->full_path
,
3249 ret
= dup_ref(cur
, &check_dirs
);
3254 if (S_ISDIR(sctx
->cur_inode_mode
) && sctx
->cur_inode_deleted
) {
3256 * Check if we can already rmdir the directory. If not,
3257 * orphanize it. For every dir item inside that gets deleted
3258 * later, we do this check again and rmdir it then if possible.
3259 * See the use of check_dirs for more details.
3261 ret
= can_rmdir(sctx
, sctx
->cur_ino
, sctx
->cur_ino
);
3265 ret
= send_rmdir(sctx
, valid_path
);
3268 } else if (!is_orphan
) {
3269 ret
= orphanize_inode(sctx
, sctx
->cur_ino
,
3270 sctx
->cur_inode_gen
, valid_path
);
3276 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
3277 ret
= dup_ref(cur
, &check_dirs
);
3281 } else if (S_ISDIR(sctx
->cur_inode_mode
) &&
3282 !list_empty(&sctx
->deleted_refs
)) {
3284 * We have a moved dir. Add the old parent to check_dirs
3286 cur
= list_entry(sctx
->deleted_refs
.next
, struct recorded_ref
,
3288 ret
= dup_ref(cur
, &check_dirs
);
3291 } else if (!S_ISDIR(sctx
->cur_inode_mode
)) {
3293 * We have a non dir inode. Go through all deleted refs and
3294 * unlink them if they were not already overwritten by other
3297 list_for_each_entry(cur
, &sctx
->deleted_refs
, list
) {
3298 ret
= did_overwrite_ref(sctx
, cur
->dir
, cur
->dir_gen
,
3299 sctx
->cur_ino
, sctx
->cur_inode_gen
,
3300 cur
->name
, cur
->name_len
);
3304 ret
= send_unlink(sctx
, cur
->full_path
);
3308 ret
= dup_ref(cur
, &check_dirs
);
3313 * If the inode is still orphan, unlink the orphan. This may
3314 * happen when a previous inode did overwrite the first ref
3315 * of this inode and no new refs were added for the current
3316 * inode. Unlinking does not mean that the inode is deleted in
3317 * all cases. There may still be links to this inode in other
3321 ret
= send_unlink(sctx
, valid_path
);
3328 * We did collect all parent dirs where cur_inode was once located. We
3329 * now go through all these dirs and check if they are pending for
3330 * deletion and if it's finally possible to perform the rmdir now.
3331 * We also update the inode stats of the parent dirs here.
3333 list_for_each_entry(cur
, &check_dirs
, list
) {
3335 * In case we had refs into dirs that were not processed yet,
3336 * we don't need to do the utime and rmdir logic for these dirs.
3337 * The dir will be processed later.
3339 if (cur
->dir
> sctx
->cur_ino
)
3342 ret
= get_cur_inode_state(sctx
, cur
->dir
, cur
->dir_gen
);
3346 if (ret
== inode_state_did_create
||
3347 ret
== inode_state_no_change
) {
3348 /* TODO delayed utimes */
3349 ret
= send_utimes(sctx
, cur
->dir
, cur
->dir_gen
);
3352 } else if (ret
== inode_state_did_delete
) {
3353 ret
= can_rmdir(sctx
, cur
->dir
, sctx
->cur_ino
);
3357 ret
= get_cur_path(sctx
, cur
->dir
,
3358 cur
->dir_gen
, valid_path
);
3361 ret
= send_rmdir(sctx
, valid_path
);
3371 __free_recorded_refs(&check_dirs
);
3372 free_recorded_refs(sctx
);
3373 fs_path_free(valid_path
);
3377 static int __record_new_ref(int num
, u64 dir
, int index
,
3378 struct fs_path
*name
,
3382 struct send_ctx
*sctx
= ctx
;
3386 p
= fs_path_alloc();
3390 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &gen
, NULL
, NULL
,
3395 ret
= get_cur_path(sctx
, dir
, gen
, p
);
3398 ret
= fs_path_add_path(p
, name
);
3402 ret
= record_ref(&sctx
->new_refs
, dir
, gen
, p
);
3410 static int __record_deleted_ref(int num
, u64 dir
, int index
,
3411 struct fs_path
*name
,
3415 struct send_ctx
*sctx
= ctx
;
3419 p
= fs_path_alloc();
3423 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &gen
, NULL
, NULL
,
3428 ret
= get_cur_path(sctx
, dir
, gen
, p
);
3431 ret
= fs_path_add_path(p
, name
);
3435 ret
= record_ref(&sctx
->deleted_refs
, dir
, gen
, p
);
3443 static int record_new_ref(struct send_ctx
*sctx
)
3447 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
3448 sctx
->cmp_key
, 0, __record_new_ref
, sctx
);
3457 static int record_deleted_ref(struct send_ctx
*sctx
)
3461 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
3462 sctx
->cmp_key
, 0, __record_deleted_ref
, sctx
);
3471 struct find_ref_ctx
{
3474 struct btrfs_root
*root
;
3475 struct fs_path
*name
;
3479 static int __find_iref(int num
, u64 dir
, int index
,
3480 struct fs_path
*name
,
3483 struct find_ref_ctx
*ctx
= ctx_
;
3487 if (dir
== ctx
->dir
&& fs_path_len(name
) == fs_path_len(ctx
->name
) &&
3488 strncmp(name
->start
, ctx
->name
->start
, fs_path_len(name
)) == 0) {
3490 * To avoid doing extra lookups we'll only do this if everything
3493 ret
= get_inode_info(ctx
->root
, dir
, NULL
, &dir_gen
, NULL
,
3497 if (dir_gen
!= ctx
->dir_gen
)
3499 ctx
->found_idx
= num
;
3505 static int find_iref(struct btrfs_root
*root
,
3506 struct btrfs_path
*path
,
3507 struct btrfs_key
*key
,
3508 u64 dir
, u64 dir_gen
, struct fs_path
*name
)
3511 struct find_ref_ctx ctx
;
3515 ctx
.dir_gen
= dir_gen
;
3519 ret
= iterate_inode_ref(root
, path
, key
, 0, __find_iref
, &ctx
);
3523 if (ctx
.found_idx
== -1)
3526 return ctx
.found_idx
;
3529 static int __record_changed_new_ref(int num
, u64 dir
, int index
,
3530 struct fs_path
*name
,
3535 struct send_ctx
*sctx
= ctx
;
3537 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &dir_gen
, NULL
,
3542 ret
= find_iref(sctx
->parent_root
, sctx
->right_path
,
3543 sctx
->cmp_key
, dir
, dir_gen
, name
);
3545 ret
= __record_new_ref(num
, dir
, index
, name
, sctx
);
3552 static int __record_changed_deleted_ref(int num
, u64 dir
, int index
,
3553 struct fs_path
*name
,
3558 struct send_ctx
*sctx
= ctx
;
3560 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &dir_gen
, NULL
,
3565 ret
= find_iref(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
3566 dir
, dir_gen
, name
);
3568 ret
= __record_deleted_ref(num
, dir
, index
, name
, sctx
);
3575 static int record_changed_ref(struct send_ctx
*sctx
)
3579 ret
= iterate_inode_ref(sctx
->send_root
, sctx
->left_path
,
3580 sctx
->cmp_key
, 0, __record_changed_new_ref
, sctx
);
3583 ret
= iterate_inode_ref(sctx
->parent_root
, sctx
->right_path
,
3584 sctx
->cmp_key
, 0, __record_changed_deleted_ref
, sctx
);
3594 * Record and process all refs at once. Needed when an inode changes the
3595 * generation number, which means that it was deleted and recreated.
3597 static int process_all_refs(struct send_ctx
*sctx
,
3598 enum btrfs_compare_tree_result cmd
)
3601 struct btrfs_root
*root
;
3602 struct btrfs_path
*path
;
3603 struct btrfs_key key
;
3604 struct btrfs_key found_key
;
3605 struct extent_buffer
*eb
;
3607 iterate_inode_ref_t cb
;
3608 int pending_move
= 0;
3610 path
= alloc_path_for_send();
3614 if (cmd
== BTRFS_COMPARE_TREE_NEW
) {
3615 root
= sctx
->send_root
;
3616 cb
= __record_new_ref
;
3617 } else if (cmd
== BTRFS_COMPARE_TREE_DELETED
) {
3618 root
= sctx
->parent_root
;
3619 cb
= __record_deleted_ref
;
3621 btrfs_err(sctx
->send_root
->fs_info
,
3622 "Wrong command %d in process_all_refs", cmd
);
3627 key
.objectid
= sctx
->cmp_key
->objectid
;
3628 key
.type
= BTRFS_INODE_REF_KEY
;
3630 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3635 eb
= path
->nodes
[0];
3636 slot
= path
->slots
[0];
3637 if (slot
>= btrfs_header_nritems(eb
)) {
3638 ret
= btrfs_next_leaf(root
, path
);
3646 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
3648 if (found_key
.objectid
!= key
.objectid
||
3649 (found_key
.type
!= BTRFS_INODE_REF_KEY
&&
3650 found_key
.type
!= BTRFS_INODE_EXTREF_KEY
))
3653 ret
= iterate_inode_ref(root
, path
, &found_key
, 0, cb
, sctx
);
3659 btrfs_release_path(path
);
3661 ret
= process_recorded_refs(sctx
, &pending_move
);
3662 /* Only applicable to an incremental send. */
3663 ASSERT(pending_move
== 0);
3666 btrfs_free_path(path
);
3670 static int send_set_xattr(struct send_ctx
*sctx
,
3671 struct fs_path
*path
,
3672 const char *name
, int name_len
,
3673 const char *data
, int data_len
)
3677 ret
= begin_cmd(sctx
, BTRFS_SEND_C_SET_XATTR
);
3681 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
3682 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
3683 TLV_PUT(sctx
, BTRFS_SEND_A_XATTR_DATA
, data
, data_len
);
3685 ret
= send_cmd(sctx
);
3692 static int send_remove_xattr(struct send_ctx
*sctx
,
3693 struct fs_path
*path
,
3694 const char *name
, int name_len
)
3698 ret
= begin_cmd(sctx
, BTRFS_SEND_C_REMOVE_XATTR
);
3702 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, path
);
3703 TLV_PUT_STRING(sctx
, BTRFS_SEND_A_XATTR_NAME
, name
, name_len
);
3705 ret
= send_cmd(sctx
);
3712 static int __process_new_xattr(int num
, struct btrfs_key
*di_key
,
3713 const char *name
, int name_len
,
3714 const char *data
, int data_len
,
3718 struct send_ctx
*sctx
= ctx
;
3720 posix_acl_xattr_header dummy_acl
;
3722 p
= fs_path_alloc();
3727 * This hack is needed because empty acl's are stored as zero byte
3728 * data in xattrs. Problem with that is, that receiving these zero byte
3729 * acl's will fail later. To fix this, we send a dummy acl list that
3730 * only contains the version number and no entries.
3732 if (!strncmp(name
, XATTR_NAME_POSIX_ACL_ACCESS
, name_len
) ||
3733 !strncmp(name
, XATTR_NAME_POSIX_ACL_DEFAULT
, name_len
)) {
3734 if (data_len
== 0) {
3735 dummy_acl
.a_version
=
3736 cpu_to_le32(POSIX_ACL_XATTR_VERSION
);
3737 data
= (char *)&dummy_acl
;
3738 data_len
= sizeof(dummy_acl
);
3742 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
3746 ret
= send_set_xattr(sctx
, p
, name
, name_len
, data
, data_len
);
3753 static int __process_deleted_xattr(int num
, struct btrfs_key
*di_key
,
3754 const char *name
, int name_len
,
3755 const char *data
, int data_len
,
3759 struct send_ctx
*sctx
= ctx
;
3762 p
= fs_path_alloc();
3766 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
3770 ret
= send_remove_xattr(sctx
, p
, name
, name_len
);
3777 static int process_new_xattr(struct send_ctx
*sctx
)
3781 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
3782 sctx
->cmp_key
, __process_new_xattr
, sctx
);
3787 static int process_deleted_xattr(struct send_ctx
*sctx
)
3791 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
3792 sctx
->cmp_key
, __process_deleted_xattr
, sctx
);
3797 struct find_xattr_ctx
{
3805 static int __find_xattr(int num
, struct btrfs_key
*di_key
,
3806 const char *name
, int name_len
,
3807 const char *data
, int data_len
,
3808 u8 type
, void *vctx
)
3810 struct find_xattr_ctx
*ctx
= vctx
;
3812 if (name_len
== ctx
->name_len
&&
3813 strncmp(name
, ctx
->name
, name_len
) == 0) {
3814 ctx
->found_idx
= num
;
3815 ctx
->found_data_len
= data_len
;
3816 ctx
->found_data
= kmemdup(data
, data_len
, GFP_NOFS
);
3817 if (!ctx
->found_data
)
3824 static int find_xattr(struct btrfs_root
*root
,
3825 struct btrfs_path
*path
,
3826 struct btrfs_key
*key
,
3827 const char *name
, int name_len
,
3828 char **data
, int *data_len
)
3831 struct find_xattr_ctx ctx
;
3834 ctx
.name_len
= name_len
;
3836 ctx
.found_data
= NULL
;
3837 ctx
.found_data_len
= 0;
3839 ret
= iterate_dir_item(root
, path
, key
, __find_xattr
, &ctx
);
3843 if (ctx
.found_idx
== -1)
3846 *data
= ctx
.found_data
;
3847 *data_len
= ctx
.found_data_len
;
3849 kfree(ctx
.found_data
);
3851 return ctx
.found_idx
;
3855 static int __process_changed_new_xattr(int num
, struct btrfs_key
*di_key
,
3856 const char *name
, int name_len
,
3857 const char *data
, int data_len
,
3861 struct send_ctx
*sctx
= ctx
;
3862 char *found_data
= NULL
;
3863 int found_data_len
= 0;
3865 ret
= find_xattr(sctx
->parent_root
, sctx
->right_path
,
3866 sctx
->cmp_key
, name
, name_len
, &found_data
,
3868 if (ret
== -ENOENT
) {
3869 ret
= __process_new_xattr(num
, di_key
, name
, name_len
, data
,
3870 data_len
, type
, ctx
);
3871 } else if (ret
>= 0) {
3872 if (data_len
!= found_data_len
||
3873 memcmp(data
, found_data
, data_len
)) {
3874 ret
= __process_new_xattr(num
, di_key
, name
, name_len
,
3875 data
, data_len
, type
, ctx
);
3885 static int __process_changed_deleted_xattr(int num
, struct btrfs_key
*di_key
,
3886 const char *name
, int name_len
,
3887 const char *data
, int data_len
,
3891 struct send_ctx
*sctx
= ctx
;
3893 ret
= find_xattr(sctx
->send_root
, sctx
->left_path
, sctx
->cmp_key
,
3894 name
, name_len
, NULL
, NULL
);
3896 ret
= __process_deleted_xattr(num
, di_key
, name
, name_len
, data
,
3897 data_len
, type
, ctx
);
3904 static int process_changed_xattr(struct send_ctx
*sctx
)
3908 ret
= iterate_dir_item(sctx
->send_root
, sctx
->left_path
,
3909 sctx
->cmp_key
, __process_changed_new_xattr
, sctx
);
3912 ret
= iterate_dir_item(sctx
->parent_root
, sctx
->right_path
,
3913 sctx
->cmp_key
, __process_changed_deleted_xattr
, sctx
);
3919 static int process_all_new_xattrs(struct send_ctx
*sctx
)
3922 struct btrfs_root
*root
;
3923 struct btrfs_path
*path
;
3924 struct btrfs_key key
;
3925 struct btrfs_key found_key
;
3926 struct extent_buffer
*eb
;
3929 path
= alloc_path_for_send();
3933 root
= sctx
->send_root
;
3935 key
.objectid
= sctx
->cmp_key
->objectid
;
3936 key
.type
= BTRFS_XATTR_ITEM_KEY
;
3938 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3943 eb
= path
->nodes
[0];
3944 slot
= path
->slots
[0];
3945 if (slot
>= btrfs_header_nritems(eb
)) {
3946 ret
= btrfs_next_leaf(root
, path
);
3949 } else if (ret
> 0) {
3956 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
3957 if (found_key
.objectid
!= key
.objectid
||
3958 found_key
.type
!= key
.type
) {
3963 ret
= iterate_dir_item(root
, path
, &found_key
,
3964 __process_new_xattr
, sctx
);
3972 btrfs_free_path(path
);
3976 static ssize_t
fill_read_buf(struct send_ctx
*sctx
, u64 offset
, u32 len
)
3978 struct btrfs_root
*root
= sctx
->send_root
;
3979 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3980 struct inode
*inode
;
3983 struct btrfs_key key
;
3984 pgoff_t index
= offset
>> PAGE_CACHE_SHIFT
;
3986 unsigned pg_offset
= offset
& ~PAGE_CACHE_MASK
;
3989 key
.objectid
= sctx
->cur_ino
;
3990 key
.type
= BTRFS_INODE_ITEM_KEY
;
3993 inode
= btrfs_iget(fs_info
->sb
, &key
, root
, NULL
);
3995 return PTR_ERR(inode
);
3997 if (offset
+ len
> i_size_read(inode
)) {
3998 if (offset
> i_size_read(inode
))
4001 len
= offset
- i_size_read(inode
);
4006 last_index
= (offset
+ len
- 1) >> PAGE_CACHE_SHIFT
;
4007 while (index
<= last_index
) {
4008 unsigned cur_len
= min_t(unsigned, len
,
4009 PAGE_CACHE_SIZE
- pg_offset
);
4010 page
= find_or_create_page(inode
->i_mapping
, index
, GFP_NOFS
);
4016 if (!PageUptodate(page
)) {
4017 btrfs_readpage(NULL
, page
);
4019 if (!PageUptodate(page
)) {
4021 page_cache_release(page
);
4028 memcpy(sctx
->read_buf
+ ret
, addr
+ pg_offset
, cur_len
);
4031 page_cache_release(page
);
4043 * Read some bytes from the current inode/file and send a write command to
4046 static int send_write(struct send_ctx
*sctx
, u64 offset
, u32 len
)
4050 ssize_t num_read
= 0;
4052 p
= fs_path_alloc();
4056 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset
, len
);
4058 num_read
= fill_read_buf(sctx
, offset
, len
);
4059 if (num_read
<= 0) {
4065 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4069 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4073 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4074 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4075 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, num_read
);
4077 ret
= send_cmd(sctx
);
4088 * Send a clone command to user space.
4090 static int send_clone(struct send_ctx
*sctx
,
4091 u64 offset
, u32 len
,
4092 struct clone_root
*clone_root
)
4098 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, "
4099 "clone_inode=%llu, clone_offset=%llu\n", offset
, len
,
4100 clone_root
->root
->objectid
, clone_root
->ino
,
4101 clone_root
->offset
);
4103 p
= fs_path_alloc();
4107 ret
= begin_cmd(sctx
, BTRFS_SEND_C_CLONE
);
4111 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4115 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4116 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_LEN
, len
);
4117 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4119 if (clone_root
->root
== sctx
->send_root
) {
4120 ret
= get_inode_info(sctx
->send_root
, clone_root
->ino
, NULL
,
4121 &gen
, NULL
, NULL
, NULL
, NULL
);
4124 ret
= get_cur_path(sctx
, clone_root
->ino
, gen
, p
);
4126 ret
= get_inode_path(clone_root
->root
, clone_root
->ino
, p
);
4131 TLV_PUT_UUID(sctx
, BTRFS_SEND_A_CLONE_UUID
,
4132 clone_root
->root
->root_item
.uuid
);
4133 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_CTRANSID
,
4134 le64_to_cpu(clone_root
->root
->root_item
.ctransid
));
4135 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_CLONE_PATH
, p
);
4136 TLV_PUT_U64(sctx
, BTRFS_SEND_A_CLONE_OFFSET
,
4137 clone_root
->offset
);
4139 ret
= send_cmd(sctx
);
4148 * Send an update extent command to user space.
4150 static int send_update_extent(struct send_ctx
*sctx
,
4151 u64 offset
, u32 len
)
4156 p
= fs_path_alloc();
4160 ret
= begin_cmd(sctx
, BTRFS_SEND_C_UPDATE_EXTENT
);
4164 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4168 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4169 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4170 TLV_PUT_U64(sctx
, BTRFS_SEND_A_SIZE
, len
);
4172 ret
= send_cmd(sctx
);
4180 static int send_hole(struct send_ctx
*sctx
, u64 end
)
4182 struct fs_path
*p
= NULL
;
4183 u64 offset
= sctx
->cur_inode_last_extent
;
4187 p
= fs_path_alloc();
4190 memset(sctx
->read_buf
, 0, BTRFS_SEND_READ_SIZE
);
4191 while (offset
< end
) {
4192 len
= min_t(u64
, end
- offset
, BTRFS_SEND_READ_SIZE
);
4194 ret
= begin_cmd(sctx
, BTRFS_SEND_C_WRITE
);
4197 ret
= get_cur_path(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
, p
);
4200 TLV_PUT_PATH(sctx
, BTRFS_SEND_A_PATH
, p
);
4201 TLV_PUT_U64(sctx
, BTRFS_SEND_A_FILE_OFFSET
, offset
);
4202 TLV_PUT(sctx
, BTRFS_SEND_A_DATA
, sctx
->read_buf
, len
);
4203 ret
= send_cmd(sctx
);
4213 static int send_write_or_clone(struct send_ctx
*sctx
,
4214 struct btrfs_path
*path
,
4215 struct btrfs_key
*key
,
4216 struct clone_root
*clone_root
)
4219 struct btrfs_file_extent_item
*ei
;
4220 u64 offset
= key
->offset
;
4225 u64 bs
= sctx
->send_root
->fs_info
->sb
->s_blocksize
;
4227 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4228 struct btrfs_file_extent_item
);
4229 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
4230 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4231 len
= btrfs_file_extent_inline_len(path
->nodes
[0],
4232 path
->slots
[0], ei
);
4234 * it is possible the inline item won't cover the whole page,
4235 * but there may be items after this page. Make
4236 * sure to send the whole thing
4238 len
= PAGE_CACHE_ALIGN(len
);
4240 len
= btrfs_file_extent_num_bytes(path
->nodes
[0], ei
);
4243 if (offset
+ len
> sctx
->cur_inode_size
)
4244 len
= sctx
->cur_inode_size
- offset
;
4250 if (clone_root
&& IS_ALIGNED(offset
+ len
, bs
)) {
4251 ret
= send_clone(sctx
, offset
, len
, clone_root
);
4252 } else if (sctx
->flags
& BTRFS_SEND_FLAG_NO_FILE_DATA
) {
4253 ret
= send_update_extent(sctx
, offset
, len
);
4257 if (l
> BTRFS_SEND_READ_SIZE
)
4258 l
= BTRFS_SEND_READ_SIZE
;
4259 ret
= send_write(sctx
, pos
+ offset
, l
);
4272 static int is_extent_unchanged(struct send_ctx
*sctx
,
4273 struct btrfs_path
*left_path
,
4274 struct btrfs_key
*ekey
)
4277 struct btrfs_key key
;
4278 struct btrfs_path
*path
= NULL
;
4279 struct extent_buffer
*eb
;
4281 struct btrfs_key found_key
;
4282 struct btrfs_file_extent_item
*ei
;
4287 u64 left_offset_fixed
;
4295 path
= alloc_path_for_send();
4299 eb
= left_path
->nodes
[0];
4300 slot
= left_path
->slots
[0];
4301 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
4302 left_type
= btrfs_file_extent_type(eb
, ei
);
4304 if (left_type
!= BTRFS_FILE_EXTENT_REG
) {
4308 left_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
4309 left_len
= btrfs_file_extent_num_bytes(eb
, ei
);
4310 left_offset
= btrfs_file_extent_offset(eb
, ei
);
4311 left_gen
= btrfs_file_extent_generation(eb
, ei
);
4314 * Following comments will refer to these graphics. L is the left
4315 * extents which we are checking at the moment. 1-8 are the right
4316 * extents that we iterate.
4319 * |-1-|-2a-|-3-|-4-|-5-|-6-|
4322 * |--1--|-2b-|...(same as above)
4324 * Alternative situation. Happens on files where extents got split.
4326 * |-----------7-----------|-6-|
4328 * Alternative situation. Happens on files which got larger.
4331 * Nothing follows after 8.
4334 key
.objectid
= ekey
->objectid
;
4335 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4336 key
.offset
= ekey
->offset
;
4337 ret
= btrfs_search_slot_for_read(sctx
->parent_root
, &key
, path
, 0, 0);
4346 * Handle special case where the right side has no extents at all.
4348 eb
= path
->nodes
[0];
4349 slot
= path
->slots
[0];
4350 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4351 if (found_key
.objectid
!= key
.objectid
||
4352 found_key
.type
!= key
.type
) {
4353 /* If we're a hole then just pretend nothing changed */
4354 ret
= (left_disknr
) ? 0 : 1;
4359 * We're now on 2a, 2b or 7.
4362 while (key
.offset
< ekey
->offset
+ left_len
) {
4363 ei
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
4364 right_type
= btrfs_file_extent_type(eb
, ei
);
4365 if (right_type
!= BTRFS_FILE_EXTENT_REG
) {
4370 right_disknr
= btrfs_file_extent_disk_bytenr(eb
, ei
);
4371 right_len
= btrfs_file_extent_num_bytes(eb
, ei
);
4372 right_offset
= btrfs_file_extent_offset(eb
, ei
);
4373 right_gen
= btrfs_file_extent_generation(eb
, ei
);
4376 * Are we at extent 8? If yes, we know the extent is changed.
4377 * This may only happen on the first iteration.
4379 if (found_key
.offset
+ right_len
<= ekey
->offset
) {
4380 /* If we're a hole just pretend nothing changed */
4381 ret
= (left_disknr
) ? 0 : 1;
4385 left_offset_fixed
= left_offset
;
4386 if (key
.offset
< ekey
->offset
) {
4387 /* Fix the right offset for 2a and 7. */
4388 right_offset
+= ekey
->offset
- key
.offset
;
4390 /* Fix the left offset for all behind 2a and 2b */
4391 left_offset_fixed
+= key
.offset
- ekey
->offset
;
4395 * Check if we have the same extent.
4397 if (left_disknr
!= right_disknr
||
4398 left_offset_fixed
!= right_offset
||
4399 left_gen
!= right_gen
) {
4405 * Go to the next extent.
4407 ret
= btrfs_next_item(sctx
->parent_root
, path
);
4411 eb
= path
->nodes
[0];
4412 slot
= path
->slots
[0];
4413 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4415 if (ret
|| found_key
.objectid
!= key
.objectid
||
4416 found_key
.type
!= key
.type
) {
4417 key
.offset
+= right_len
;
4420 if (found_key
.offset
!= key
.offset
+ right_len
) {
4428 * We're now behind the left extent (treat as unchanged) or at the end
4429 * of the right side (treat as changed).
4431 if (key
.offset
>= ekey
->offset
+ left_len
)
4438 btrfs_free_path(path
);
4442 static int get_last_extent(struct send_ctx
*sctx
, u64 offset
)
4444 struct btrfs_path
*path
;
4445 struct btrfs_root
*root
= sctx
->send_root
;
4446 struct btrfs_file_extent_item
*fi
;
4447 struct btrfs_key key
;
4452 path
= alloc_path_for_send();
4456 sctx
->cur_inode_last_extent
= 0;
4458 key
.objectid
= sctx
->cur_ino
;
4459 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4460 key
.offset
= offset
;
4461 ret
= btrfs_search_slot_for_read(root
, &key
, path
, 0, 1);
4465 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
4466 if (key
.objectid
!= sctx
->cur_ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
4469 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4470 struct btrfs_file_extent_item
);
4471 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
4472 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4473 u64 size
= btrfs_file_extent_inline_len(path
->nodes
[0],
4474 path
->slots
[0], fi
);
4475 extent_end
= ALIGN(key
.offset
+ size
,
4476 sctx
->send_root
->sectorsize
);
4478 extent_end
= key
.offset
+
4479 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
4481 sctx
->cur_inode_last_extent
= extent_end
;
4483 btrfs_free_path(path
);
4487 static int maybe_send_hole(struct send_ctx
*sctx
, struct btrfs_path
*path
,
4488 struct btrfs_key
*key
)
4490 struct btrfs_file_extent_item
*fi
;
4495 if (sctx
->cur_ino
!= key
->objectid
|| !need_send_hole(sctx
))
4498 if (sctx
->cur_inode_last_extent
== (u64
)-1) {
4499 ret
= get_last_extent(sctx
, key
->offset
- 1);
4504 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4505 struct btrfs_file_extent_item
);
4506 type
= btrfs_file_extent_type(path
->nodes
[0], fi
);
4507 if (type
== BTRFS_FILE_EXTENT_INLINE
) {
4508 u64 size
= btrfs_file_extent_inline_len(path
->nodes
[0],
4509 path
->slots
[0], fi
);
4510 extent_end
= ALIGN(key
->offset
+ size
,
4511 sctx
->send_root
->sectorsize
);
4513 extent_end
= key
->offset
+
4514 btrfs_file_extent_num_bytes(path
->nodes
[0], fi
);
4517 if (path
->slots
[0] == 0 &&
4518 sctx
->cur_inode_last_extent
< key
->offset
) {
4520 * We might have skipped entire leafs that contained only
4521 * file extent items for our current inode. These leafs have
4522 * a generation number smaller (older) than the one in the
4523 * current leaf and the leaf our last extent came from, and
4524 * are located between these 2 leafs.
4526 ret
= get_last_extent(sctx
, key
->offset
- 1);
4531 if (sctx
->cur_inode_last_extent
< key
->offset
)
4532 ret
= send_hole(sctx
, key
->offset
);
4533 sctx
->cur_inode_last_extent
= extent_end
;
4537 static int process_extent(struct send_ctx
*sctx
,
4538 struct btrfs_path
*path
,
4539 struct btrfs_key
*key
)
4541 struct clone_root
*found_clone
= NULL
;
4544 if (S_ISLNK(sctx
->cur_inode_mode
))
4547 if (sctx
->parent_root
&& !sctx
->cur_inode_new
) {
4548 ret
= is_extent_unchanged(sctx
, path
, key
);
4556 struct btrfs_file_extent_item
*ei
;
4559 ei
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
4560 struct btrfs_file_extent_item
);
4561 type
= btrfs_file_extent_type(path
->nodes
[0], ei
);
4562 if (type
== BTRFS_FILE_EXTENT_PREALLOC
||
4563 type
== BTRFS_FILE_EXTENT_REG
) {
4565 * The send spec does not have a prealloc command yet,
4566 * so just leave a hole for prealloc'ed extents until
4567 * we have enough commands queued up to justify rev'ing
4570 if (type
== BTRFS_FILE_EXTENT_PREALLOC
) {
4575 /* Have a hole, just skip it. */
4576 if (btrfs_file_extent_disk_bytenr(path
->nodes
[0], ei
) == 0) {
4583 ret
= find_extent_clone(sctx
, path
, key
->objectid
, key
->offset
,
4584 sctx
->cur_inode_size
, &found_clone
);
4585 if (ret
!= -ENOENT
&& ret
< 0)
4588 ret
= send_write_or_clone(sctx
, path
, key
, found_clone
);
4592 ret
= maybe_send_hole(sctx
, path
, key
);
4597 static int process_all_extents(struct send_ctx
*sctx
)
4600 struct btrfs_root
*root
;
4601 struct btrfs_path
*path
;
4602 struct btrfs_key key
;
4603 struct btrfs_key found_key
;
4604 struct extent_buffer
*eb
;
4607 root
= sctx
->send_root
;
4608 path
= alloc_path_for_send();
4612 key
.objectid
= sctx
->cmp_key
->objectid
;
4613 key
.type
= BTRFS_EXTENT_DATA_KEY
;
4615 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4620 eb
= path
->nodes
[0];
4621 slot
= path
->slots
[0];
4623 if (slot
>= btrfs_header_nritems(eb
)) {
4624 ret
= btrfs_next_leaf(root
, path
);
4627 } else if (ret
> 0) {
4634 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
4636 if (found_key
.objectid
!= key
.objectid
||
4637 found_key
.type
!= key
.type
) {
4642 ret
= process_extent(sctx
, path
, &found_key
);
4650 btrfs_free_path(path
);
4654 static int process_recorded_refs_if_needed(struct send_ctx
*sctx
, int at_end
,
4656 int *refs_processed
)
4660 if (sctx
->cur_ino
== 0)
4662 if (!at_end
&& sctx
->cur_ino
== sctx
->cmp_key
->objectid
&&
4663 sctx
->cmp_key
->type
<= BTRFS_INODE_EXTREF_KEY
)
4665 if (list_empty(&sctx
->new_refs
) && list_empty(&sctx
->deleted_refs
))
4668 ret
= process_recorded_refs(sctx
, pending_move
);
4672 *refs_processed
= 1;
4677 static int finish_inode_if_needed(struct send_ctx
*sctx
, int at_end
)
4688 int pending_move
= 0;
4689 int refs_processed
= 0;
4691 ret
= process_recorded_refs_if_needed(sctx
, at_end
, &pending_move
,
4697 * We have processed the refs and thus need to advance send_progress.
4698 * Now, calls to get_cur_xxx will take the updated refs of the current
4699 * inode into account.
4701 * On the other hand, if our current inode is a directory and couldn't
4702 * be moved/renamed because its parent was renamed/moved too and it has
4703 * a higher inode number, we can only move/rename our current inode
4704 * after we moved/renamed its parent. Therefore in this case operate on
4705 * the old path (pre move/rename) of our current inode, and the
4706 * move/rename will be performed later.
4708 if (refs_processed
&& !pending_move
)
4709 sctx
->send_progress
= sctx
->cur_ino
+ 1;
4711 if (sctx
->cur_ino
== 0 || sctx
->cur_inode_deleted
)
4713 if (!at_end
&& sctx
->cmp_key
->objectid
== sctx
->cur_ino
)
4716 ret
= get_inode_info(sctx
->send_root
, sctx
->cur_ino
, NULL
, NULL
,
4717 &left_mode
, &left_uid
, &left_gid
, NULL
);
4721 if (!sctx
->parent_root
|| sctx
->cur_inode_new
) {
4723 if (!S_ISLNK(sctx
->cur_inode_mode
))
4726 ret
= get_inode_info(sctx
->parent_root
, sctx
->cur_ino
,
4727 NULL
, NULL
, &right_mode
, &right_uid
,
4732 if (left_uid
!= right_uid
|| left_gid
!= right_gid
)
4734 if (!S_ISLNK(sctx
->cur_inode_mode
) && left_mode
!= right_mode
)
4738 if (S_ISREG(sctx
->cur_inode_mode
)) {
4739 if (need_send_hole(sctx
)) {
4740 if (sctx
->cur_inode_last_extent
== (u64
)-1) {
4741 ret
= get_last_extent(sctx
, (u64
)-1);
4745 if (sctx
->cur_inode_last_extent
<
4746 sctx
->cur_inode_size
) {
4747 ret
= send_hole(sctx
, sctx
->cur_inode_size
);
4752 ret
= send_truncate(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
4753 sctx
->cur_inode_size
);
4759 ret
= send_chown(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
4760 left_uid
, left_gid
);
4765 ret
= send_chmod(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
,
4772 * If other directory inodes depended on our current directory
4773 * inode's move/rename, now do their move/rename operations.
4775 if (!is_waiting_for_move(sctx
, sctx
->cur_ino
)) {
4776 ret
= apply_children_dir_moves(sctx
);
4782 * Need to send that every time, no matter if it actually
4783 * changed between the two trees as we have done changes to
4786 sctx
->send_progress
= sctx
->cur_ino
+ 1;
4787 ret
= send_utimes(sctx
, sctx
->cur_ino
, sctx
->cur_inode_gen
);
4795 static int changed_inode(struct send_ctx
*sctx
,
4796 enum btrfs_compare_tree_result result
)
4799 struct btrfs_key
*key
= sctx
->cmp_key
;
4800 struct btrfs_inode_item
*left_ii
= NULL
;
4801 struct btrfs_inode_item
*right_ii
= NULL
;
4805 sctx
->cur_ino
= key
->objectid
;
4806 sctx
->cur_inode_new_gen
= 0;
4807 sctx
->cur_inode_last_extent
= (u64
)-1;
4810 * Set send_progress to current inode. This will tell all get_cur_xxx
4811 * functions that the current inode's refs are not updated yet. Later,
4812 * when process_recorded_refs is finished, it is set to cur_ino + 1.
4814 sctx
->send_progress
= sctx
->cur_ino
;
4816 if (result
== BTRFS_COMPARE_TREE_NEW
||
4817 result
== BTRFS_COMPARE_TREE_CHANGED
) {
4818 left_ii
= btrfs_item_ptr(sctx
->left_path
->nodes
[0],
4819 sctx
->left_path
->slots
[0],
4820 struct btrfs_inode_item
);
4821 left_gen
= btrfs_inode_generation(sctx
->left_path
->nodes
[0],
4824 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
4825 sctx
->right_path
->slots
[0],
4826 struct btrfs_inode_item
);
4827 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
4830 if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
4831 right_ii
= btrfs_item_ptr(sctx
->right_path
->nodes
[0],
4832 sctx
->right_path
->slots
[0],
4833 struct btrfs_inode_item
);
4835 right_gen
= btrfs_inode_generation(sctx
->right_path
->nodes
[0],
4839 * The cur_ino = root dir case is special here. We can't treat
4840 * the inode as deleted+reused because it would generate a
4841 * stream that tries to delete/mkdir the root dir.
4843 if (left_gen
!= right_gen
&&
4844 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
4845 sctx
->cur_inode_new_gen
= 1;
4848 if (result
== BTRFS_COMPARE_TREE_NEW
) {
4849 sctx
->cur_inode_gen
= left_gen
;
4850 sctx
->cur_inode_new
= 1;
4851 sctx
->cur_inode_deleted
= 0;
4852 sctx
->cur_inode_size
= btrfs_inode_size(
4853 sctx
->left_path
->nodes
[0], left_ii
);
4854 sctx
->cur_inode_mode
= btrfs_inode_mode(
4855 sctx
->left_path
->nodes
[0], left_ii
);
4856 if (sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
4857 ret
= send_create_inode_if_needed(sctx
);
4858 } else if (result
== BTRFS_COMPARE_TREE_DELETED
) {
4859 sctx
->cur_inode_gen
= right_gen
;
4860 sctx
->cur_inode_new
= 0;
4861 sctx
->cur_inode_deleted
= 1;
4862 sctx
->cur_inode_size
= btrfs_inode_size(
4863 sctx
->right_path
->nodes
[0], right_ii
);
4864 sctx
->cur_inode_mode
= btrfs_inode_mode(
4865 sctx
->right_path
->nodes
[0], right_ii
);
4866 } else if (result
== BTRFS_COMPARE_TREE_CHANGED
) {
4868 * We need to do some special handling in case the inode was
4869 * reported as changed with a changed generation number. This
4870 * means that the original inode was deleted and new inode
4871 * reused the same inum. So we have to treat the old inode as
4872 * deleted and the new one as new.
4874 if (sctx
->cur_inode_new_gen
) {
4876 * First, process the inode as if it was deleted.
4878 sctx
->cur_inode_gen
= right_gen
;
4879 sctx
->cur_inode_new
= 0;
4880 sctx
->cur_inode_deleted
= 1;
4881 sctx
->cur_inode_size
= btrfs_inode_size(
4882 sctx
->right_path
->nodes
[0], right_ii
);
4883 sctx
->cur_inode_mode
= btrfs_inode_mode(
4884 sctx
->right_path
->nodes
[0], right_ii
);
4885 ret
= process_all_refs(sctx
,
4886 BTRFS_COMPARE_TREE_DELETED
);
4891 * Now process the inode as if it was new.
4893 sctx
->cur_inode_gen
= left_gen
;
4894 sctx
->cur_inode_new
= 1;
4895 sctx
->cur_inode_deleted
= 0;
4896 sctx
->cur_inode_size
= btrfs_inode_size(
4897 sctx
->left_path
->nodes
[0], left_ii
);
4898 sctx
->cur_inode_mode
= btrfs_inode_mode(
4899 sctx
->left_path
->nodes
[0], left_ii
);
4900 ret
= send_create_inode_if_needed(sctx
);
4904 ret
= process_all_refs(sctx
, BTRFS_COMPARE_TREE_NEW
);
4908 * Advance send_progress now as we did not get into
4909 * process_recorded_refs_if_needed in the new_gen case.
4911 sctx
->send_progress
= sctx
->cur_ino
+ 1;
4914 * Now process all extents and xattrs of the inode as if
4915 * they were all new.
4917 ret
= process_all_extents(sctx
);
4920 ret
= process_all_new_xattrs(sctx
);
4924 sctx
->cur_inode_gen
= left_gen
;
4925 sctx
->cur_inode_new
= 0;
4926 sctx
->cur_inode_new_gen
= 0;
4927 sctx
->cur_inode_deleted
= 0;
4928 sctx
->cur_inode_size
= btrfs_inode_size(
4929 sctx
->left_path
->nodes
[0], left_ii
);
4930 sctx
->cur_inode_mode
= btrfs_inode_mode(
4931 sctx
->left_path
->nodes
[0], left_ii
);
4940 * We have to process new refs before deleted refs, but compare_trees gives us
4941 * the new and deleted refs mixed. To fix this, we record the new/deleted refs
4942 * first and later process them in process_recorded_refs.
4943 * For the cur_inode_new_gen case, we skip recording completely because
4944 * changed_inode did already initiate processing of refs. The reason for this is
4945 * that in this case, compare_tree actually compares the refs of 2 different
4946 * inodes. To fix this, process_all_refs is used in changed_inode to handle all
4947 * refs of the right tree as deleted and all refs of the left tree as new.
4949 static int changed_ref(struct send_ctx
*sctx
,
4950 enum btrfs_compare_tree_result result
)
4954 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
4956 if (!sctx
->cur_inode_new_gen
&&
4957 sctx
->cur_ino
!= BTRFS_FIRST_FREE_OBJECTID
) {
4958 if (result
== BTRFS_COMPARE_TREE_NEW
)
4959 ret
= record_new_ref(sctx
);
4960 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
4961 ret
= record_deleted_ref(sctx
);
4962 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
4963 ret
= record_changed_ref(sctx
);
4970 * Process new/deleted/changed xattrs. We skip processing in the
4971 * cur_inode_new_gen case because changed_inode did already initiate processing
4972 * of xattrs. The reason is the same as in changed_ref
4974 static int changed_xattr(struct send_ctx
*sctx
,
4975 enum btrfs_compare_tree_result result
)
4979 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
4981 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
4982 if (result
== BTRFS_COMPARE_TREE_NEW
)
4983 ret
= process_new_xattr(sctx
);
4984 else if (result
== BTRFS_COMPARE_TREE_DELETED
)
4985 ret
= process_deleted_xattr(sctx
);
4986 else if (result
== BTRFS_COMPARE_TREE_CHANGED
)
4987 ret
= process_changed_xattr(sctx
);
4994 * Process new/deleted/changed extents. We skip processing in the
4995 * cur_inode_new_gen case because changed_inode did already initiate processing
4996 * of extents. The reason is the same as in changed_ref
4998 static int changed_extent(struct send_ctx
*sctx
,
4999 enum btrfs_compare_tree_result result
)
5003 BUG_ON(sctx
->cur_ino
!= sctx
->cmp_key
->objectid
);
5005 if (!sctx
->cur_inode_new_gen
&& !sctx
->cur_inode_deleted
) {
5006 if (result
!= BTRFS_COMPARE_TREE_DELETED
)
5007 ret
= process_extent(sctx
, sctx
->left_path
,
5014 static int dir_changed(struct send_ctx
*sctx
, u64 dir
)
5016 u64 orig_gen
, new_gen
;
5019 ret
= get_inode_info(sctx
->send_root
, dir
, NULL
, &new_gen
, NULL
, NULL
,
5024 ret
= get_inode_info(sctx
->parent_root
, dir
, NULL
, &orig_gen
, NULL
,
5029 return (orig_gen
!= new_gen
) ? 1 : 0;
5032 static int compare_refs(struct send_ctx
*sctx
, struct btrfs_path
*path
,
5033 struct btrfs_key
*key
)
5035 struct btrfs_inode_extref
*extref
;
5036 struct extent_buffer
*leaf
;
5037 u64 dirid
= 0, last_dirid
= 0;
5044 /* Easy case, just check this one dirid */
5045 if (key
->type
== BTRFS_INODE_REF_KEY
) {
5046 dirid
= key
->offset
;
5048 ret
= dir_changed(sctx
, dirid
);
5052 leaf
= path
->nodes
[0];
5053 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
5054 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
5055 while (cur_offset
< item_size
) {
5056 extref
= (struct btrfs_inode_extref
*)(ptr
+
5058 dirid
= btrfs_inode_extref_parent(leaf
, extref
);
5059 ref_name_len
= btrfs_inode_extref_name_len(leaf
, extref
);
5060 cur_offset
+= ref_name_len
+ sizeof(*extref
);
5061 if (dirid
== last_dirid
)
5063 ret
= dir_changed(sctx
, dirid
);
5073 * Updates compare related fields in sctx and simply forwards to the actual
5074 * changed_xxx functions.
5076 static int changed_cb(struct btrfs_root
*left_root
,
5077 struct btrfs_root
*right_root
,
5078 struct btrfs_path
*left_path
,
5079 struct btrfs_path
*right_path
,
5080 struct btrfs_key
*key
,
5081 enum btrfs_compare_tree_result result
,
5085 struct send_ctx
*sctx
= ctx
;
5087 if (result
== BTRFS_COMPARE_TREE_SAME
) {
5088 if (key
->type
== BTRFS_INODE_REF_KEY
||
5089 key
->type
== BTRFS_INODE_EXTREF_KEY
) {
5090 ret
= compare_refs(sctx
, left_path
, key
);
5095 } else if (key
->type
== BTRFS_EXTENT_DATA_KEY
) {
5096 return maybe_send_hole(sctx
, left_path
, key
);
5100 result
= BTRFS_COMPARE_TREE_CHANGED
;
5104 sctx
->left_path
= left_path
;
5105 sctx
->right_path
= right_path
;
5106 sctx
->cmp_key
= key
;
5108 ret
= finish_inode_if_needed(sctx
, 0);
5112 /* Ignore non-FS objects */
5113 if (key
->objectid
== BTRFS_FREE_INO_OBJECTID
||
5114 key
->objectid
== BTRFS_FREE_SPACE_OBJECTID
)
5117 if (key
->type
== BTRFS_INODE_ITEM_KEY
)
5118 ret
= changed_inode(sctx
, result
);
5119 else if (key
->type
== BTRFS_INODE_REF_KEY
||
5120 key
->type
== BTRFS_INODE_EXTREF_KEY
)
5121 ret
= changed_ref(sctx
, result
);
5122 else if (key
->type
== BTRFS_XATTR_ITEM_KEY
)
5123 ret
= changed_xattr(sctx
, result
);
5124 else if (key
->type
== BTRFS_EXTENT_DATA_KEY
)
5125 ret
= changed_extent(sctx
, result
);
5131 static int full_send_tree(struct send_ctx
*sctx
)
5134 struct btrfs_trans_handle
*trans
= NULL
;
5135 struct btrfs_root
*send_root
= sctx
->send_root
;
5136 struct btrfs_key key
;
5137 struct btrfs_key found_key
;
5138 struct btrfs_path
*path
;
5139 struct extent_buffer
*eb
;
5144 path
= alloc_path_for_send();
5148 spin_lock(&send_root
->root_item_lock
);
5149 start_ctransid
= btrfs_root_ctransid(&send_root
->root_item
);
5150 spin_unlock(&send_root
->root_item_lock
);
5152 key
.objectid
= BTRFS_FIRST_FREE_OBJECTID
;
5153 key
.type
= BTRFS_INODE_ITEM_KEY
;
5158 * We need to make sure the transaction does not get committed
5159 * while we do anything on commit roots. Join a transaction to prevent
5162 trans
= btrfs_join_transaction(send_root
);
5163 if (IS_ERR(trans
)) {
5164 ret
= PTR_ERR(trans
);
5170 * Make sure the tree has not changed after re-joining. We detect this
5171 * by comparing start_ctransid and ctransid. They should always match.
5173 spin_lock(&send_root
->root_item_lock
);
5174 ctransid
= btrfs_root_ctransid(&send_root
->root_item
);
5175 spin_unlock(&send_root
->root_item_lock
);
5177 if (ctransid
!= start_ctransid
) {
5178 WARN(1, KERN_WARNING
"BTRFS: the root that you're trying to "
5179 "send was modified in between. This is "
5180 "probably a bug.\n");
5185 ret
= btrfs_search_slot_for_read(send_root
, &key
, path
, 1, 0);
5193 * When someone want to commit while we iterate, end the
5194 * joined transaction and rejoin.
5196 if (btrfs_should_end_transaction(trans
, send_root
)) {
5197 ret
= btrfs_end_transaction(trans
, send_root
);
5201 btrfs_release_path(path
);
5205 eb
= path
->nodes
[0];
5206 slot
= path
->slots
[0];
5207 btrfs_item_key_to_cpu(eb
, &found_key
, slot
);
5209 ret
= changed_cb(send_root
, NULL
, path
, NULL
,
5210 &found_key
, BTRFS_COMPARE_TREE_NEW
, sctx
);
5214 key
.objectid
= found_key
.objectid
;
5215 key
.type
= found_key
.type
;
5216 key
.offset
= found_key
.offset
+ 1;
5218 ret
= btrfs_next_item(send_root
, path
);
5228 ret
= finish_inode_if_needed(sctx
, 1);
5231 btrfs_free_path(path
);
5234 ret
= btrfs_end_transaction(trans
, send_root
);
5236 btrfs_end_transaction(trans
, send_root
);
5241 static int send_subvol(struct send_ctx
*sctx
)
5245 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_STREAM_HEADER
)) {
5246 ret
= send_header(sctx
);
5251 ret
= send_subvol_begin(sctx
);
5255 if (sctx
->parent_root
) {
5256 ret
= btrfs_compare_trees(sctx
->send_root
, sctx
->parent_root
,
5260 ret
= finish_inode_if_needed(sctx
, 1);
5264 ret
= full_send_tree(sctx
);
5270 free_recorded_refs(sctx
);
5274 static void btrfs_root_dec_send_in_progress(struct btrfs_root
* root
)
5276 spin_lock(&root
->root_item_lock
);
5277 root
->send_in_progress
--;
5279 * Not much left to do, we don't know why it's unbalanced and
5280 * can't blindly reset it to 0.
5282 if (root
->send_in_progress
< 0)
5283 btrfs_err(root
->fs_info
,
5284 "send_in_progres unbalanced %d root %llu\n",
5285 root
->send_in_progress
, root
->root_key
.objectid
);
5286 spin_unlock(&root
->root_item_lock
);
5289 long btrfs_ioctl_send(struct file
*mnt_file
, void __user
*arg_
)
5292 struct btrfs_root
*send_root
;
5293 struct btrfs_root
*clone_root
;
5294 struct btrfs_fs_info
*fs_info
;
5295 struct btrfs_ioctl_send_args
*arg
= NULL
;
5296 struct btrfs_key key
;
5297 struct send_ctx
*sctx
= NULL
;
5299 u64
*clone_sources_tmp
= NULL
;
5300 int clone_sources_to_rollback
= 0;
5301 int sort_clone_roots
= 0;
5304 if (!capable(CAP_SYS_ADMIN
))
5307 send_root
= BTRFS_I(file_inode(mnt_file
))->root
;
5308 fs_info
= send_root
->fs_info
;
5311 * The subvolume must remain read-only during send, protect against
5314 spin_lock(&send_root
->root_item_lock
);
5315 send_root
->send_in_progress
++;
5316 spin_unlock(&send_root
->root_item_lock
);
5319 * This is done when we lookup the root, it should already be complete
5320 * by the time we get here.
5322 WARN_ON(send_root
->orphan_cleanup_state
!= ORPHAN_CLEANUP_DONE
);
5325 * Userspace tools do the checks and warn the user if it's
5328 if (!btrfs_root_readonly(send_root
)) {
5333 arg
= memdup_user(arg_
, sizeof(*arg
));
5340 if (!access_ok(VERIFY_READ
, arg
->clone_sources
,
5341 sizeof(*arg
->clone_sources
) *
5342 arg
->clone_sources_count
)) {
5347 if (arg
->flags
& ~BTRFS_SEND_FLAG_MASK
) {
5352 sctx
= kzalloc(sizeof(struct send_ctx
), GFP_NOFS
);
5358 INIT_LIST_HEAD(&sctx
->new_refs
);
5359 INIT_LIST_HEAD(&sctx
->deleted_refs
);
5360 INIT_RADIX_TREE(&sctx
->name_cache
, GFP_NOFS
);
5361 INIT_LIST_HEAD(&sctx
->name_cache_list
);
5363 sctx
->flags
= arg
->flags
;
5365 sctx
->send_filp
= fget(arg
->send_fd
);
5366 if (!sctx
->send_filp
) {
5371 sctx
->send_root
= send_root
;
5372 sctx
->clone_roots_cnt
= arg
->clone_sources_count
;
5374 sctx
->send_max_size
= BTRFS_SEND_BUF_SIZE
;
5375 sctx
->send_buf
= vmalloc(sctx
->send_max_size
);
5376 if (!sctx
->send_buf
) {
5381 sctx
->read_buf
= vmalloc(BTRFS_SEND_READ_SIZE
);
5382 if (!sctx
->read_buf
) {
5387 sctx
->pending_dir_moves
= RB_ROOT
;
5388 sctx
->waiting_dir_moves
= RB_ROOT
;
5390 sctx
->clone_roots
= vzalloc(sizeof(struct clone_root
) *
5391 (arg
->clone_sources_count
+ 1));
5392 if (!sctx
->clone_roots
) {
5397 if (arg
->clone_sources_count
) {
5398 clone_sources_tmp
= vmalloc(arg
->clone_sources_count
*
5399 sizeof(*arg
->clone_sources
));
5400 if (!clone_sources_tmp
) {
5405 ret
= copy_from_user(clone_sources_tmp
, arg
->clone_sources
,
5406 arg
->clone_sources_count
*
5407 sizeof(*arg
->clone_sources
));
5413 for (i
= 0; i
< arg
->clone_sources_count
; i
++) {
5414 key
.objectid
= clone_sources_tmp
[i
];
5415 key
.type
= BTRFS_ROOT_ITEM_KEY
;
5416 key
.offset
= (u64
)-1;
5418 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
5420 clone_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
5421 if (IS_ERR(clone_root
)) {
5422 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5423 ret
= PTR_ERR(clone_root
);
5426 clone_sources_to_rollback
= i
+ 1;
5427 spin_lock(&clone_root
->root_item_lock
);
5428 clone_root
->send_in_progress
++;
5429 if (!btrfs_root_readonly(clone_root
)) {
5430 spin_unlock(&clone_root
->root_item_lock
);
5431 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5435 spin_unlock(&clone_root
->root_item_lock
);
5436 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5438 sctx
->clone_roots
[i
].root
= clone_root
;
5440 vfree(clone_sources_tmp
);
5441 clone_sources_tmp
= NULL
;
5444 if (arg
->parent_root
) {
5445 key
.objectid
= arg
->parent_root
;
5446 key
.type
= BTRFS_ROOT_ITEM_KEY
;
5447 key
.offset
= (u64
)-1;
5449 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
5451 sctx
->parent_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
5452 if (IS_ERR(sctx
->parent_root
)) {
5453 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5454 ret
= PTR_ERR(sctx
->parent_root
);
5458 spin_lock(&sctx
->parent_root
->root_item_lock
);
5459 sctx
->parent_root
->send_in_progress
++;
5460 if (!btrfs_root_readonly(sctx
->parent_root
)) {
5461 spin_unlock(&sctx
->parent_root
->root_item_lock
);
5462 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5466 spin_unlock(&sctx
->parent_root
->root_item_lock
);
5468 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5472 * Clones from send_root are allowed, but only if the clone source
5473 * is behind the current send position. This is checked while searching
5474 * for possible clone sources.
5476 sctx
->clone_roots
[sctx
->clone_roots_cnt
++].root
= sctx
->send_root
;
5478 /* We do a bsearch later */
5479 sort(sctx
->clone_roots
, sctx
->clone_roots_cnt
,
5480 sizeof(*sctx
->clone_roots
), __clone_root_cmp_sort
,
5482 sort_clone_roots
= 1;
5484 ret
= send_subvol(sctx
);
5488 if (!(sctx
->flags
& BTRFS_SEND_FLAG_OMIT_END_CMD
)) {
5489 ret
= begin_cmd(sctx
, BTRFS_SEND_C_END
);
5492 ret
= send_cmd(sctx
);
5498 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
));
5499 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->pending_dir_moves
)) {
5501 struct pending_dir_move
*pm
;
5503 n
= rb_first(&sctx
->pending_dir_moves
);
5504 pm
= rb_entry(n
, struct pending_dir_move
, node
);
5505 while (!list_empty(&pm
->list
)) {
5506 struct pending_dir_move
*pm2
;
5508 pm2
= list_first_entry(&pm
->list
,
5509 struct pending_dir_move
, list
);
5510 free_pending_move(sctx
, pm2
);
5512 free_pending_move(sctx
, pm
);
5515 WARN_ON(sctx
&& !ret
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
));
5516 while (sctx
&& !RB_EMPTY_ROOT(&sctx
->waiting_dir_moves
)) {
5518 struct waiting_dir_move
*dm
;
5520 n
= rb_first(&sctx
->waiting_dir_moves
);
5521 dm
= rb_entry(n
, struct waiting_dir_move
, node
);
5522 rb_erase(&dm
->node
, &sctx
->waiting_dir_moves
);
5526 if (sort_clone_roots
) {
5527 for (i
= 0; i
< sctx
->clone_roots_cnt
; i
++)
5528 btrfs_root_dec_send_in_progress(
5529 sctx
->clone_roots
[i
].root
);
5531 for (i
= 0; sctx
&& i
< clone_sources_to_rollback
; i
++)
5532 btrfs_root_dec_send_in_progress(
5533 sctx
->clone_roots
[i
].root
);
5535 btrfs_root_dec_send_in_progress(send_root
);
5537 if (sctx
&& !IS_ERR_OR_NULL(sctx
->parent_root
))
5538 btrfs_root_dec_send_in_progress(sctx
->parent_root
);
5541 vfree(clone_sources_tmp
);
5544 if (sctx
->send_filp
)
5545 fput(sctx
->send_filp
);
5547 vfree(sctx
->clone_roots
);
5548 vfree(sctx
->send_buf
);
5549 vfree(sctx
->read_buf
);
5551 name_cache_free(sctx
);