2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
23 #include "transaction.h"
24 #include "delayed-ref.h"
27 struct extent_inode_elem
{
30 struct extent_inode_elem
*next
;
33 static int check_extent_in_eb(struct btrfs_key
*key
, struct extent_buffer
*eb
,
34 struct btrfs_file_extent_item
*fi
,
36 struct extent_inode_elem
**eie
)
40 struct extent_inode_elem
*e
;
42 data_offset
= btrfs_file_extent_offset(eb
, fi
);
43 data_len
= btrfs_file_extent_num_bytes(eb
, fi
);
45 if (extent_item_pos
< data_offset
||
46 extent_item_pos
>= data_offset
+ data_len
)
49 e
= kmalloc(sizeof(*e
), GFP_NOFS
);
54 e
->inum
= key
->objectid
;
55 e
->offset
= key
->offset
+ (extent_item_pos
- data_offset
);
61 static int find_extent_in_eb(struct extent_buffer
*eb
, u64 wanted_disk_byte
,
63 struct extent_inode_elem
**eie
)
67 struct btrfs_file_extent_item
*fi
;
74 * from the shared data ref, we only have the leaf but we need
75 * the key. thus, we must look into all items and see that we
76 * find one (some) with a reference to our extent item.
78 nritems
= btrfs_header_nritems(eb
);
79 for (slot
= 0; slot
< nritems
; ++slot
) {
80 btrfs_item_key_to_cpu(eb
, &key
, slot
);
81 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
83 fi
= btrfs_item_ptr(eb
, slot
, struct btrfs_file_extent_item
);
84 extent_type
= btrfs_file_extent_type(eb
, fi
);
85 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
87 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
88 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
89 if (disk_byte
!= wanted_disk_byte
)
92 ret
= check_extent_in_eb(&key
, eb
, fi
, extent_item_pos
, eie
);
101 * this structure records all encountered refs on the way up to the root
103 struct __prelim_ref
{
104 struct list_head list
;
106 struct btrfs_key key_for_search
;
110 u64 wanted_disk_byte
;
114 * the rules for all callers of this function are:
115 * - obtaining the parent is the goal
116 * - if you add a key, you must know that it is a correct key
117 * - if you cannot add the parent or a correct key, then we will look into the
118 * block later to set a correct key
122 * backref type | shared | indirect | shared | indirect
123 * information | tree | tree | data | data
124 * --------------------+--------+----------+--------+----------
125 * parent logical | y | - | - | -
126 * key to resolve | - | y | y | y
127 * tree block logical | - | - | - | -
128 * root for resolving | y | y | y | y
130 * - column 1: we've the parent -> done
131 * - column 2, 3, 4: we use the key to find the parent
133 * on disk refs (inline or keyed)
134 * ==============================
135 * backref type | shared | indirect | shared | indirect
136 * information | tree | tree | data | data
137 * --------------------+--------+----------+--------+----------
138 * parent logical | y | - | y | -
139 * key to resolve | - | - | - | y
140 * tree block logical | y | y | y | y
141 * root for resolving | - | y | y | y
143 * - column 1, 3: we've the parent -> done
144 * - column 2: we take the first key from the block to find the parent
145 * (see __add_missing_keys)
146 * - column 4: we use the key to find the parent
148 * additional information that's available but not required to find the parent
149 * block might help in merging entries to gain some speed.
152 static int __add_prelim_ref(struct list_head
*head
, u64 root_id
,
153 struct btrfs_key
*key
, int level
,
154 u64 parent
, u64 wanted_disk_byte
, int count
)
156 struct __prelim_ref
*ref
;
158 /* in case we're adding delayed refs, we're holding the refs spinlock */
159 ref
= kmalloc(sizeof(*ref
), GFP_ATOMIC
);
163 ref
->root_id
= root_id
;
165 ref
->key_for_search
= *key
;
167 memset(&ref
->key_for_search
, 0, sizeof(ref
->key_for_search
));
171 ref
->parent
= parent
;
172 ref
->wanted_disk_byte
= wanted_disk_byte
;
173 list_add_tail(&ref
->list
, head
);
178 static int add_all_parents(struct btrfs_root
*root
, struct btrfs_path
*path
,
179 struct ulist
*parents
, int level
,
180 struct btrfs_key
*key
, u64 wanted_disk_byte
,
181 const u64
*extent_item_pos
)
185 struct extent_buffer
*eb
= path
->nodes
[level
];
186 struct btrfs_file_extent_item
*fi
;
188 u64 wanted_objectid
= key
->objectid
;
191 ret
= ulist_add(parents
, eb
->start
, 0, GFP_NOFS
);
199 * if the current leaf is full with EXTENT_DATA items, we must
200 * check the next one if that holds a reference as well.
201 * ref->count cannot be used to skip this check.
202 * repeat this until we don't find any additional EXTENT_DATA items.
205 ret
= btrfs_next_leaf(root
, path
);
212 for (slot
= 0; slot
< btrfs_header_nritems(eb
); ++slot
) {
213 btrfs_item_key_to_cpu(eb
, key
, slot
);
214 if (key
->objectid
!= wanted_objectid
||
215 key
->type
!= BTRFS_EXTENT_DATA_KEY
)
217 fi
= btrfs_item_ptr(eb
, slot
,
218 struct btrfs_file_extent_item
);
219 disk_byte
= btrfs_file_extent_disk_bytenr(eb
, fi
);
220 if (disk_byte
== wanted_disk_byte
)
229 * resolve an indirect backref in the form (root_id, key, level)
230 * to a logical address
232 static int __resolve_indirect_ref(struct btrfs_fs_info
*fs_info
,
233 int search_commit_root
,
235 struct __prelim_ref
*ref
,
236 struct ulist
*parents
,
237 const u64
*extent_item_pos
)
239 struct btrfs_path
*path
;
240 struct btrfs_root
*root
;
241 struct btrfs_key root_key
;
242 struct btrfs_key key
= {0};
243 struct extent_buffer
*eb
;
246 int level
= ref
->level
;
248 path
= btrfs_alloc_path();
251 path
->search_commit_root
= !!search_commit_root
;
253 root_key
.objectid
= ref
->root_id
;
254 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
255 root_key
.offset
= (u64
)-1;
256 root
= btrfs_read_fs_root_no_name(fs_info
, &root_key
);
263 root_level
= btrfs_header_level(root
->node
);
266 if (root_level
+ 1 == level
)
269 path
->lowest_level
= level
;
270 ret
= btrfs_search_old_slot(root
, &ref
->key_for_search
, path
, time_seq
);
271 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
272 "%d for key (%llu %u %llu)\n",
273 (unsigned long long)ref
->root_id
, level
, ref
->count
, ret
,
274 (unsigned long long)ref
->key_for_search
.objectid
,
275 ref
->key_for_search
.type
,
276 (unsigned long long)ref
->key_for_search
.offset
);
280 eb
= path
->nodes
[level
];
288 if (ret
== 1 && path
->slots
[0] >= btrfs_header_nritems(eb
)) {
289 ret
= btrfs_next_leaf(root
, path
);
295 btrfs_item_key_to_cpu(eb
, &key
, path
->slots
[0]);
298 ret
= add_all_parents(root
, path
, parents
, level
, &key
,
299 ref
->wanted_disk_byte
, extent_item_pos
);
301 btrfs_free_path(path
);
306 * resolve all indirect backrefs from the list
308 static int __resolve_indirect_refs(struct btrfs_fs_info
*fs_info
,
309 int search_commit_root
, u64 time_seq
,
310 struct list_head
*head
,
311 const u64
*extent_item_pos
)
315 struct __prelim_ref
*ref
;
316 struct __prelim_ref
*ref_safe
;
317 struct __prelim_ref
*new_ref
;
318 struct ulist
*parents
;
319 struct ulist_node
*node
;
320 struct ulist_iterator uiter
;
322 parents
= ulist_alloc(GFP_NOFS
);
327 * _safe allows us to insert directly after the current item without
328 * iterating over the newly inserted items.
329 * we're also allowed to re-assign ref during iteration.
331 list_for_each_entry_safe(ref
, ref_safe
, head
, list
) {
332 if (ref
->parent
) /* already direct */
336 err
= __resolve_indirect_ref(fs_info
, search_commit_root
,
337 time_seq
, ref
, parents
,
345 /* we put the first parent into the ref at hand */
346 ULIST_ITER_INIT(&uiter
);
347 node
= ulist_next(parents
, &uiter
);
348 ref
->parent
= node
? node
->val
: 0;
350 /* additional parents require new refs being added here */
351 while ((node
= ulist_next(parents
, &uiter
))) {
352 new_ref
= kmalloc(sizeof(*new_ref
), GFP_NOFS
);
357 memcpy(new_ref
, ref
, sizeof(*ref
));
358 new_ref
->parent
= node
->val
;
359 list_add(&new_ref
->list
, &ref
->list
);
361 ulist_reinit(parents
);
368 static inline int ref_for_same_block(struct __prelim_ref
*ref1
,
369 struct __prelim_ref
*ref2
)
371 if (ref1
->level
!= ref2
->level
)
373 if (ref1
->root_id
!= ref2
->root_id
)
375 if (ref1
->key_for_search
.type
!= ref2
->key_for_search
.type
)
377 if (ref1
->key_for_search
.objectid
!= ref2
->key_for_search
.objectid
)
379 if (ref1
->key_for_search
.offset
!= ref2
->key_for_search
.offset
)
381 if (ref1
->parent
!= ref2
->parent
)
388 * read tree blocks and add keys where required.
390 static int __add_missing_keys(struct btrfs_fs_info
*fs_info
,
391 struct list_head
*head
)
393 struct list_head
*pos
;
394 struct extent_buffer
*eb
;
396 list_for_each(pos
, head
) {
397 struct __prelim_ref
*ref
;
398 ref
= list_entry(pos
, struct __prelim_ref
, list
);
402 if (ref
->key_for_search
.type
)
404 BUG_ON(!ref
->wanted_disk_byte
);
405 eb
= read_tree_block(fs_info
->tree_root
, ref
->wanted_disk_byte
,
406 fs_info
->tree_root
->leafsize
, 0);
408 btrfs_tree_read_lock(eb
);
409 if (btrfs_header_level(eb
) == 0)
410 btrfs_item_key_to_cpu(eb
, &ref
->key_for_search
, 0);
412 btrfs_node_key_to_cpu(eb
, &ref
->key_for_search
, 0);
413 btrfs_tree_read_unlock(eb
);
414 free_extent_buffer(eb
);
420 * merge two lists of backrefs and adjust counts accordingly
422 * mode = 1: merge identical keys, if key is set
423 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
424 * additionally, we could even add a key range for the blocks we
425 * looked into to merge even more (-> replace unresolved refs by those
427 * mode = 2: merge identical parents
429 static int __merge_refs(struct list_head
*head
, int mode
)
431 struct list_head
*pos1
;
433 list_for_each(pos1
, head
) {
434 struct list_head
*n2
;
435 struct list_head
*pos2
;
436 struct __prelim_ref
*ref1
;
438 ref1
= list_entry(pos1
, struct __prelim_ref
, list
);
440 for (pos2
= pos1
->next
, n2
= pos2
->next
; pos2
!= head
;
441 pos2
= n2
, n2
= pos2
->next
) {
442 struct __prelim_ref
*ref2
;
443 struct __prelim_ref
*xchg
;
445 ref2
= list_entry(pos2
, struct __prelim_ref
, list
);
448 if (!ref_for_same_block(ref1
, ref2
))
450 if (!ref1
->parent
&& ref2
->parent
) {
455 ref1
->count
+= ref2
->count
;
457 if (ref1
->parent
!= ref2
->parent
)
459 ref1
->count
+= ref2
->count
;
461 list_del(&ref2
->list
);
470 * add all currently queued delayed refs from this head whose seq nr is
471 * smaller or equal that seq to the list
473 static int __add_delayed_refs(struct btrfs_delayed_ref_head
*head
, u64 seq
,
474 struct list_head
*prefs
)
476 struct btrfs_delayed_extent_op
*extent_op
= head
->extent_op
;
477 struct rb_node
*n
= &head
->node
.rb_node
;
478 struct btrfs_key key
;
479 struct btrfs_key op_key
= {0};
483 if (extent_op
&& extent_op
->update_key
)
484 btrfs_disk_key_to_cpu(&op_key
, &extent_op
->key
);
486 while ((n
= rb_prev(n
))) {
487 struct btrfs_delayed_ref_node
*node
;
488 node
= rb_entry(n
, struct btrfs_delayed_ref_node
,
490 if (node
->bytenr
!= head
->node
.bytenr
)
492 WARN_ON(node
->is_head
);
497 switch (node
->action
) {
498 case BTRFS_ADD_DELAYED_EXTENT
:
499 case BTRFS_UPDATE_DELAYED_HEAD
:
502 case BTRFS_ADD_DELAYED_REF
:
505 case BTRFS_DROP_DELAYED_REF
:
511 switch (node
->type
) {
512 case BTRFS_TREE_BLOCK_REF_KEY
: {
513 struct btrfs_delayed_tree_ref
*ref
;
515 ref
= btrfs_delayed_node_to_tree_ref(node
);
516 ret
= __add_prelim_ref(prefs
, ref
->root
, &op_key
,
517 ref
->level
+ 1, 0, node
->bytenr
,
518 node
->ref_mod
* sgn
);
521 case BTRFS_SHARED_BLOCK_REF_KEY
: {
522 struct btrfs_delayed_tree_ref
*ref
;
524 ref
= btrfs_delayed_node_to_tree_ref(node
);
525 ret
= __add_prelim_ref(prefs
, ref
->root
, NULL
,
526 ref
->level
+ 1, ref
->parent
,
528 node
->ref_mod
* sgn
);
531 case BTRFS_EXTENT_DATA_REF_KEY
: {
532 struct btrfs_delayed_data_ref
*ref
;
533 ref
= btrfs_delayed_node_to_data_ref(node
);
535 key
.objectid
= ref
->objectid
;
536 key
.type
= BTRFS_EXTENT_DATA_KEY
;
537 key
.offset
= ref
->offset
;
538 ret
= __add_prelim_ref(prefs
, ref
->root
, &key
, 0, 0,
540 node
->ref_mod
* sgn
);
543 case BTRFS_SHARED_DATA_REF_KEY
: {
544 struct btrfs_delayed_data_ref
*ref
;
546 ref
= btrfs_delayed_node_to_data_ref(node
);
548 key
.objectid
= ref
->objectid
;
549 key
.type
= BTRFS_EXTENT_DATA_KEY
;
550 key
.offset
= ref
->offset
;
551 ret
= __add_prelim_ref(prefs
, ref
->root
, &key
, 0,
552 ref
->parent
, node
->bytenr
,
553 node
->ref_mod
* sgn
);
566 * add all inline backrefs for bytenr to the list
568 static int __add_inline_refs(struct btrfs_fs_info
*fs_info
,
569 struct btrfs_path
*path
, u64 bytenr
,
570 int *info_level
, struct list_head
*prefs
)
574 struct extent_buffer
*leaf
;
575 struct btrfs_key key
;
578 struct btrfs_extent_item
*ei
;
583 * enumerate all inline refs
585 leaf
= path
->nodes
[0];
586 slot
= path
->slots
[0];
588 item_size
= btrfs_item_size_nr(leaf
, slot
);
589 BUG_ON(item_size
< sizeof(*ei
));
591 ei
= btrfs_item_ptr(leaf
, slot
, struct btrfs_extent_item
);
592 flags
= btrfs_extent_flags(leaf
, ei
);
594 ptr
= (unsigned long)(ei
+ 1);
595 end
= (unsigned long)ei
+ item_size
;
597 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
598 struct btrfs_tree_block_info
*info
;
600 info
= (struct btrfs_tree_block_info
*)ptr
;
601 *info_level
= btrfs_tree_block_level(leaf
, info
);
602 ptr
+= sizeof(struct btrfs_tree_block_info
);
605 BUG_ON(!(flags
& BTRFS_EXTENT_FLAG_DATA
));
609 struct btrfs_extent_inline_ref
*iref
;
613 iref
= (struct btrfs_extent_inline_ref
*)ptr
;
614 type
= btrfs_extent_inline_ref_type(leaf
, iref
);
615 offset
= btrfs_extent_inline_ref_offset(leaf
, iref
);
618 case BTRFS_SHARED_BLOCK_REF_KEY
:
619 ret
= __add_prelim_ref(prefs
, 0, NULL
,
620 *info_level
+ 1, offset
,
623 case BTRFS_SHARED_DATA_REF_KEY
: {
624 struct btrfs_shared_data_ref
*sdref
;
627 sdref
= (struct btrfs_shared_data_ref
*)(iref
+ 1);
628 count
= btrfs_shared_data_ref_count(leaf
, sdref
);
629 ret
= __add_prelim_ref(prefs
, 0, NULL
, 0, offset
,
633 case BTRFS_TREE_BLOCK_REF_KEY
:
634 ret
= __add_prelim_ref(prefs
, offset
, NULL
,
638 case BTRFS_EXTENT_DATA_REF_KEY
: {
639 struct btrfs_extent_data_ref
*dref
;
643 dref
= (struct btrfs_extent_data_ref
*)(&iref
->offset
);
644 count
= btrfs_extent_data_ref_count(leaf
, dref
);
645 key
.objectid
= btrfs_extent_data_ref_objectid(leaf
,
647 key
.type
= BTRFS_EXTENT_DATA_KEY
;
648 key
.offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
649 root
= btrfs_extent_data_ref_root(leaf
, dref
);
650 ret
= __add_prelim_ref(prefs
, root
, &key
, 0, 0,
658 ptr
+= btrfs_extent_inline_ref_size(type
);
665 * add all non-inline backrefs for bytenr to the list
667 static int __add_keyed_refs(struct btrfs_fs_info
*fs_info
,
668 struct btrfs_path
*path
, u64 bytenr
,
669 int info_level
, struct list_head
*prefs
)
671 struct btrfs_root
*extent_root
= fs_info
->extent_root
;
674 struct extent_buffer
*leaf
;
675 struct btrfs_key key
;
678 ret
= btrfs_next_item(extent_root
, path
);
686 slot
= path
->slots
[0];
687 leaf
= path
->nodes
[0];
688 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
690 if (key
.objectid
!= bytenr
)
692 if (key
.type
< BTRFS_TREE_BLOCK_REF_KEY
)
694 if (key
.type
> BTRFS_SHARED_DATA_REF_KEY
)
698 case BTRFS_SHARED_BLOCK_REF_KEY
:
699 ret
= __add_prelim_ref(prefs
, 0, NULL
,
700 info_level
+ 1, key
.offset
,
703 case BTRFS_SHARED_DATA_REF_KEY
: {
704 struct btrfs_shared_data_ref
*sdref
;
707 sdref
= btrfs_item_ptr(leaf
, slot
,
708 struct btrfs_shared_data_ref
);
709 count
= btrfs_shared_data_ref_count(leaf
, sdref
);
710 ret
= __add_prelim_ref(prefs
, 0, NULL
, 0, key
.offset
,
714 case BTRFS_TREE_BLOCK_REF_KEY
:
715 ret
= __add_prelim_ref(prefs
, key
.offset
, NULL
,
719 case BTRFS_EXTENT_DATA_REF_KEY
: {
720 struct btrfs_extent_data_ref
*dref
;
724 dref
= btrfs_item_ptr(leaf
, slot
,
725 struct btrfs_extent_data_ref
);
726 count
= btrfs_extent_data_ref_count(leaf
, dref
);
727 key
.objectid
= btrfs_extent_data_ref_objectid(leaf
,
729 key
.type
= BTRFS_EXTENT_DATA_KEY
;
730 key
.offset
= btrfs_extent_data_ref_offset(leaf
, dref
);
731 root
= btrfs_extent_data_ref_root(leaf
, dref
);
732 ret
= __add_prelim_ref(prefs
, root
, &key
, 0, 0,
746 * this adds all existing backrefs (inline backrefs, backrefs and delayed
747 * refs) for the given bytenr to the refs list, merges duplicates and resolves
748 * indirect refs to their parent bytenr.
749 * When roots are found, they're added to the roots list
751 * FIXME some caching might speed things up
753 static int find_parent_nodes(struct btrfs_trans_handle
*trans
,
754 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
755 u64 delayed_ref_seq
, u64 time_seq
,
756 struct ulist
*refs
, struct ulist
*roots
,
757 const u64
*extent_item_pos
)
759 struct btrfs_key key
;
760 struct btrfs_path
*path
;
761 struct btrfs_delayed_ref_root
*delayed_refs
= NULL
;
762 struct btrfs_delayed_ref_head
*head
;
765 int search_commit_root
= (trans
== BTRFS_BACKREF_SEARCH_COMMIT_ROOT
);
766 struct list_head prefs_delayed
;
767 struct list_head prefs
;
768 struct __prelim_ref
*ref
;
770 INIT_LIST_HEAD(&prefs
);
771 INIT_LIST_HEAD(&prefs_delayed
);
773 key
.objectid
= bytenr
;
774 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
775 key
.offset
= (u64
)-1;
777 path
= btrfs_alloc_path();
780 path
->search_commit_root
= !!search_commit_root
;
783 * grab both a lock on the path and a lock on the delayed ref head.
784 * We need both to get a consistent picture of how the refs look
785 * at a specified point in time
790 ret
= btrfs_search_slot(trans
, fs_info
->extent_root
, &key
, path
, 0, 0);
795 if (trans
!= BTRFS_BACKREF_SEARCH_COMMIT_ROOT
) {
797 * look if there are updates for this ref queued and lock the
800 delayed_refs
= &trans
->transaction
->delayed_refs
;
801 spin_lock(&delayed_refs
->lock
);
802 head
= btrfs_find_delayed_ref_head(trans
, bytenr
);
804 if (!mutex_trylock(&head
->mutex
)) {
805 atomic_inc(&head
->node
.refs
);
806 spin_unlock(&delayed_refs
->lock
);
808 btrfs_release_path(path
);
811 * Mutex was contended, block until it's
812 * released and try again
814 mutex_lock(&head
->mutex
);
815 mutex_unlock(&head
->mutex
);
816 btrfs_put_delayed_ref(&head
->node
);
819 ret
= __add_delayed_refs(head
, delayed_ref_seq
,
822 spin_unlock(&delayed_refs
->lock
);
826 spin_unlock(&delayed_refs
->lock
);
829 if (path
->slots
[0]) {
830 struct extent_buffer
*leaf
;
834 leaf
= path
->nodes
[0];
835 slot
= path
->slots
[0];
836 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
837 if (key
.objectid
== bytenr
&&
838 key
.type
== BTRFS_EXTENT_ITEM_KEY
) {
839 ret
= __add_inline_refs(fs_info
, path
, bytenr
,
840 &info_level
, &prefs
);
843 ret
= __add_keyed_refs(fs_info
, path
, bytenr
,
849 btrfs_release_path(path
);
851 list_splice_init(&prefs_delayed
, &prefs
);
853 ret
= __add_missing_keys(fs_info
, &prefs
);
857 ret
= __merge_refs(&prefs
, 1);
861 ret
= __resolve_indirect_refs(fs_info
, search_commit_root
, time_seq
,
862 &prefs
, extent_item_pos
);
866 ret
= __merge_refs(&prefs
, 2);
870 while (!list_empty(&prefs
)) {
871 ref
= list_first_entry(&prefs
, struct __prelim_ref
, list
);
872 list_del(&ref
->list
);
875 if (ref
->count
&& ref
->root_id
&& ref
->parent
== 0) {
876 /* no parent == root of tree */
877 ret
= ulist_add(roots
, ref
->root_id
, 0, GFP_NOFS
);
880 if (ref
->count
&& ref
->parent
) {
881 struct extent_inode_elem
*eie
= NULL
;
882 if (extent_item_pos
) {
884 struct extent_buffer
*eb
;
885 bsz
= btrfs_level_size(fs_info
->extent_root
,
887 eb
= read_tree_block(fs_info
->extent_root
,
888 ref
->parent
, bsz
, 0);
890 ret
= find_extent_in_eb(eb
, bytenr
,
891 *extent_item_pos
, &eie
);
892 free_extent_buffer(eb
);
894 ret
= ulist_add(refs
, ref
->parent
,
895 (unsigned long)eie
, GFP_NOFS
);
903 mutex_unlock(&head
->mutex
);
904 btrfs_free_path(path
);
905 while (!list_empty(&prefs
)) {
906 ref
= list_first_entry(&prefs
, struct __prelim_ref
, list
);
907 list_del(&ref
->list
);
910 while (!list_empty(&prefs_delayed
)) {
911 ref
= list_first_entry(&prefs_delayed
, struct __prelim_ref
,
913 list_del(&ref
->list
);
920 static void free_leaf_list(struct ulist
*blocks
)
922 struct ulist_node
*node
= NULL
;
923 struct extent_inode_elem
*eie
;
924 struct extent_inode_elem
*eie_next
;
925 struct ulist_iterator uiter
;
927 ULIST_ITER_INIT(&uiter
);
928 while ((node
= ulist_next(blocks
, &uiter
))) {
931 eie
= (struct extent_inode_elem
*)node
->aux
;
932 for (; eie
; eie
= eie_next
) {
933 eie_next
= eie
->next
;
943 * Finds all leafs with a reference to the specified combination of bytenr and
944 * offset. key_list_head will point to a list of corresponding keys (caller must
945 * free each list element). The leafs will be stored in the leafs ulist, which
946 * must be freed with ulist_free.
948 * returns 0 on success, <0 on error
950 static int btrfs_find_all_leafs(struct btrfs_trans_handle
*trans
,
951 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
952 u64 delayed_ref_seq
, u64 time_seq
,
953 struct ulist
**leafs
,
954 const u64
*extent_item_pos
)
959 tmp
= ulist_alloc(GFP_NOFS
);
962 *leafs
= ulist_alloc(GFP_NOFS
);
968 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, delayed_ref_seq
,
969 time_seq
, *leafs
, tmp
, extent_item_pos
);
972 if (ret
< 0 && ret
!= -ENOENT
) {
973 free_leaf_list(*leafs
);
981 * walk all backrefs for a given extent to find all roots that reference this
982 * extent. Walking a backref means finding all extents that reference this
983 * extent and in turn walk the backrefs of those, too. Naturally this is a
984 * recursive process, but here it is implemented in an iterative fashion: We
985 * find all referencing extents for the extent in question and put them on a
986 * list. In turn, we find all referencing extents for those, further appending
987 * to the list. The way we iterate the list allows adding more elements after
988 * the current while iterating. The process stops when we reach the end of the
989 * list. Found roots are added to the roots list.
991 * returns 0 on success, < 0 on error.
993 int btrfs_find_all_roots(struct btrfs_trans_handle
*trans
,
994 struct btrfs_fs_info
*fs_info
, u64 bytenr
,
995 u64 delayed_ref_seq
, u64 time_seq
,
996 struct ulist
**roots
)
999 struct ulist_node
*node
= NULL
;
1000 struct ulist_iterator uiter
;
1003 tmp
= ulist_alloc(GFP_NOFS
);
1006 *roots
= ulist_alloc(GFP_NOFS
);
1012 ULIST_ITER_INIT(&uiter
);
1014 ret
= find_parent_nodes(trans
, fs_info
, bytenr
, delayed_ref_seq
,
1015 time_seq
, tmp
, *roots
, NULL
);
1016 if (ret
< 0 && ret
!= -ENOENT
) {
1021 node
= ulist_next(tmp
, &uiter
);
1032 static int __inode_info(u64 inum
, u64 ioff
, u8 key_type
,
1033 struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
1034 struct btrfs_key
*found_key
)
1037 struct btrfs_key key
;
1038 struct extent_buffer
*eb
;
1040 key
.type
= key_type
;
1041 key
.objectid
= inum
;
1044 ret
= btrfs_search_slot(NULL
, fs_root
, &key
, path
, 0, 0);
1048 eb
= path
->nodes
[0];
1049 if (ret
&& path
->slots
[0] >= btrfs_header_nritems(eb
)) {
1050 ret
= btrfs_next_leaf(fs_root
, path
);
1053 eb
= path
->nodes
[0];
1056 btrfs_item_key_to_cpu(eb
, found_key
, path
->slots
[0]);
1057 if (found_key
->type
!= key
.type
|| found_key
->objectid
!= key
.objectid
)
1064 * this makes the path point to (inum INODE_ITEM ioff)
1066 int inode_item_info(u64 inum
, u64 ioff
, struct btrfs_root
*fs_root
,
1067 struct btrfs_path
*path
)
1069 struct btrfs_key key
;
1070 return __inode_info(inum
, ioff
, BTRFS_INODE_ITEM_KEY
, fs_root
, path
,
1074 static int inode_ref_info(u64 inum
, u64 ioff
, struct btrfs_root
*fs_root
,
1075 struct btrfs_path
*path
,
1076 struct btrfs_key
*found_key
)
1078 return __inode_info(inum
, ioff
, BTRFS_INODE_REF_KEY
, fs_root
, path
,
1083 * this iterates to turn a btrfs_inode_ref into a full filesystem path. elements
1084 * of the path are separated by '/' and the path is guaranteed to be
1085 * 0-terminated. the path is only given within the current file system.
1086 * Therefore, it never starts with a '/'. the caller is responsible to provide
1087 * "size" bytes in "dest". the dest buffer will be filled backwards. finally,
1088 * the start point of the resulting string is returned. this pointer is within
1090 * in case the path buffer would overflow, the pointer is decremented further
1091 * as if output was written to the buffer, though no more output is actually
1092 * generated. that way, the caller can determine how much space would be
1093 * required for the path to fit into the buffer. in that case, the returned
1094 * value will be smaller than dest. callers must check this!
1096 static char *iref_to_path(struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
1097 struct btrfs_inode_ref
*iref
,
1098 struct extent_buffer
*eb_in
, u64 parent
,
1099 char *dest
, u32 size
)
1105 s64 bytes_left
= size
- 1;
1106 struct extent_buffer
*eb
= eb_in
;
1107 struct btrfs_key found_key
;
1108 int leave_spinning
= path
->leave_spinning
;
1110 if (bytes_left
>= 0)
1111 dest
[bytes_left
] = '\0';
1113 path
->leave_spinning
= 1;
1115 len
= btrfs_inode_ref_name_len(eb
, iref
);
1117 if (bytes_left
>= 0)
1118 read_extent_buffer(eb
, dest
+ bytes_left
,
1119 (unsigned long)(iref
+ 1), len
);
1121 btrfs_tree_read_unlock_blocking(eb
);
1122 free_extent_buffer(eb
);
1124 ret
= inode_ref_info(parent
, 0, fs_root
, path
, &found_key
);
1129 next_inum
= found_key
.offset
;
1131 /* regular exit ahead */
1132 if (parent
== next_inum
)
1135 slot
= path
->slots
[0];
1136 eb
= path
->nodes
[0];
1137 /* make sure we can use eb after releasing the path */
1139 atomic_inc(&eb
->refs
);
1140 btrfs_tree_read_lock(eb
);
1141 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1143 btrfs_release_path(path
);
1145 iref
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_ref
);
1148 if (bytes_left
>= 0)
1149 dest
[bytes_left
] = '/';
1152 btrfs_release_path(path
);
1153 path
->leave_spinning
= leave_spinning
;
1156 return ERR_PTR(ret
);
1158 return dest
+ bytes_left
;
1162 * this makes the path point to (logical EXTENT_ITEM *)
1163 * returns BTRFS_EXTENT_FLAG_DATA for data, BTRFS_EXTENT_FLAG_TREE_BLOCK for
1164 * tree blocks and <0 on error.
1166 int extent_from_logical(struct btrfs_fs_info
*fs_info
, u64 logical
,
1167 struct btrfs_path
*path
, struct btrfs_key
*found_key
)
1172 struct extent_buffer
*eb
;
1173 struct btrfs_extent_item
*ei
;
1174 struct btrfs_key key
;
1176 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
1177 key
.objectid
= logical
;
1178 key
.offset
= (u64
)-1;
1180 ret
= btrfs_search_slot(NULL
, fs_info
->extent_root
, &key
, path
, 0, 0);
1183 ret
= btrfs_previous_item(fs_info
->extent_root
, path
,
1184 0, BTRFS_EXTENT_ITEM_KEY
);
1188 btrfs_item_key_to_cpu(path
->nodes
[0], found_key
, path
->slots
[0]);
1189 if (found_key
->type
!= BTRFS_EXTENT_ITEM_KEY
||
1190 found_key
->objectid
> logical
||
1191 found_key
->objectid
+ found_key
->offset
<= logical
) {
1192 pr_debug("logical %llu is not within any extent\n",
1193 (unsigned long long)logical
);
1197 eb
= path
->nodes
[0];
1198 item_size
= btrfs_item_size_nr(eb
, path
->slots
[0]);
1199 BUG_ON(item_size
< sizeof(*ei
));
1201 ei
= btrfs_item_ptr(eb
, path
->slots
[0], struct btrfs_extent_item
);
1202 flags
= btrfs_extent_flags(eb
, ei
);
1204 pr_debug("logical %llu is at position %llu within the extent (%llu "
1205 "EXTENT_ITEM %llu) flags %#llx size %u\n",
1206 (unsigned long long)logical
,
1207 (unsigned long long)(logical
- found_key
->objectid
),
1208 (unsigned long long)found_key
->objectid
,
1209 (unsigned long long)found_key
->offset
,
1210 (unsigned long long)flags
, item_size
);
1211 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1212 return BTRFS_EXTENT_FLAG_TREE_BLOCK
;
1213 if (flags
& BTRFS_EXTENT_FLAG_DATA
)
1214 return BTRFS_EXTENT_FLAG_DATA
;
1220 * helper function to iterate extent inline refs. ptr must point to a 0 value
1221 * for the first call and may be modified. it is used to track state.
1222 * if more refs exist, 0 is returned and the next call to
1223 * __get_extent_inline_ref must pass the modified ptr parameter to get the
1224 * next ref. after the last ref was processed, 1 is returned.
1225 * returns <0 on error
1227 static int __get_extent_inline_ref(unsigned long *ptr
, struct extent_buffer
*eb
,
1228 struct btrfs_extent_item
*ei
, u32 item_size
,
1229 struct btrfs_extent_inline_ref
**out_eiref
,
1234 struct btrfs_tree_block_info
*info
;
1238 flags
= btrfs_extent_flags(eb
, ei
);
1239 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
1240 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1242 (struct btrfs_extent_inline_ref
*)(info
+ 1);
1244 *out_eiref
= (struct btrfs_extent_inline_ref
*)(ei
+ 1);
1246 *ptr
= (unsigned long)*out_eiref
;
1247 if ((void *)*ptr
>= (void *)ei
+ item_size
)
1251 end
= (unsigned long)ei
+ item_size
;
1252 *out_eiref
= (struct btrfs_extent_inline_ref
*)*ptr
;
1253 *out_type
= btrfs_extent_inline_ref_type(eb
, *out_eiref
);
1255 *ptr
+= btrfs_extent_inline_ref_size(*out_type
);
1256 WARN_ON(*ptr
> end
);
1258 return 1; /* last */
1264 * reads the tree block backref for an extent. tree level and root are returned
1265 * through out_level and out_root. ptr must point to a 0 value for the first
1266 * call and may be modified (see __get_extent_inline_ref comment).
1267 * returns 0 if data was provided, 1 if there was no more data to provide or
1270 int tree_backref_for_extent(unsigned long *ptr
, struct extent_buffer
*eb
,
1271 struct btrfs_extent_item
*ei
, u32 item_size
,
1272 u64
*out_root
, u8
*out_level
)
1276 struct btrfs_tree_block_info
*info
;
1277 struct btrfs_extent_inline_ref
*eiref
;
1279 if (*ptr
== (unsigned long)-1)
1283 ret
= __get_extent_inline_ref(ptr
, eb
, ei
, item_size
,
1288 if (type
== BTRFS_TREE_BLOCK_REF_KEY
||
1289 type
== BTRFS_SHARED_BLOCK_REF_KEY
)
1296 /* we can treat both ref types equally here */
1297 info
= (struct btrfs_tree_block_info
*)(ei
+ 1);
1298 *out_root
= btrfs_extent_inline_ref_offset(eb
, eiref
);
1299 *out_level
= btrfs_tree_block_level(eb
, info
);
1302 *ptr
= (unsigned long)-1;
1307 static int iterate_leaf_refs(struct extent_inode_elem
*inode_list
,
1308 u64 root
, u64 extent_item_objectid
,
1309 iterate_extent_inodes_t
*iterate
, void *ctx
)
1311 struct extent_inode_elem
*eie
;
1314 for (eie
= inode_list
; eie
; eie
= eie
->next
) {
1315 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1316 "root %llu\n", extent_item_objectid
,
1317 eie
->inum
, eie
->offset
, root
);
1318 ret
= iterate(eie
->inum
, eie
->offset
, root
, ctx
);
1320 pr_debug("stopping iteration for %llu due to ret=%d\n",
1321 extent_item_objectid
, ret
);
1330 * calls iterate() for every inode that references the extent identified by
1331 * the given parameters.
1332 * when the iterator function returns a non-zero value, iteration stops.
1334 int iterate_extent_inodes(struct btrfs_fs_info
*fs_info
,
1335 u64 extent_item_objectid
, u64 extent_item_pos
,
1336 int search_commit_root
,
1337 iterate_extent_inodes_t
*iterate
, void *ctx
)
1340 struct list_head data_refs
= LIST_HEAD_INIT(data_refs
);
1341 struct list_head shared_refs
= LIST_HEAD_INIT(shared_refs
);
1342 struct btrfs_trans_handle
*trans
;
1343 struct ulist
*refs
= NULL
;
1344 struct ulist
*roots
= NULL
;
1345 struct ulist_node
*ref_node
= NULL
;
1346 struct ulist_node
*root_node
= NULL
;
1347 struct seq_list seq_elem
= {};
1348 struct seq_list tree_mod_seq_elem
= {};
1349 struct ulist_iterator ref_uiter
;
1350 struct ulist_iterator root_uiter
;
1351 struct btrfs_delayed_ref_root
*delayed_refs
= NULL
;
1353 pr_debug("resolving all inodes for extent %llu\n",
1354 extent_item_objectid
);
1356 if (search_commit_root
) {
1357 trans
= BTRFS_BACKREF_SEARCH_COMMIT_ROOT
;
1359 trans
= btrfs_join_transaction(fs_info
->extent_root
);
1361 return PTR_ERR(trans
);
1363 delayed_refs
= &trans
->transaction
->delayed_refs
;
1364 spin_lock(&delayed_refs
->lock
);
1365 btrfs_get_delayed_seq(delayed_refs
, &seq_elem
);
1366 spin_unlock(&delayed_refs
->lock
);
1367 btrfs_get_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
1370 ret
= btrfs_find_all_leafs(trans
, fs_info
, extent_item_objectid
,
1371 seq_elem
.seq
, tree_mod_seq_elem
.seq
, &refs
,
1376 ULIST_ITER_INIT(&ref_uiter
);
1377 while (!ret
&& (ref_node
= ulist_next(refs
, &ref_uiter
))) {
1378 ret
= btrfs_find_all_roots(trans
, fs_info
, ref_node
->val
,
1380 tree_mod_seq_elem
.seq
, &roots
);
1383 ULIST_ITER_INIT(&root_uiter
);
1384 while (!ret
&& (root_node
= ulist_next(roots
, &root_uiter
))) {
1385 pr_debug("root %llu references leaf %llu, data list "
1386 "%#lx\n", root_node
->val
, ref_node
->val
,
1388 ret
= iterate_leaf_refs(
1389 (struct extent_inode_elem
*)ref_node
->aux
,
1390 root_node
->val
, extent_item_objectid
,
1397 free_leaf_list(refs
);
1400 if (!search_commit_root
) {
1401 btrfs_put_tree_mod_seq(fs_info
, &tree_mod_seq_elem
);
1402 btrfs_put_delayed_seq(delayed_refs
, &seq_elem
);
1403 btrfs_end_transaction(trans
, fs_info
->extent_root
);
1409 int iterate_inodes_from_logical(u64 logical
, struct btrfs_fs_info
*fs_info
,
1410 struct btrfs_path
*path
,
1411 iterate_extent_inodes_t
*iterate
, void *ctx
)
1414 u64 extent_item_pos
;
1415 struct btrfs_key found_key
;
1416 int search_commit_root
= path
->search_commit_root
;
1418 ret
= extent_from_logical(fs_info
, logical
, path
,
1420 btrfs_release_path(path
);
1421 if (ret
& BTRFS_EXTENT_FLAG_TREE_BLOCK
)
1426 extent_item_pos
= logical
- found_key
.objectid
;
1427 ret
= iterate_extent_inodes(fs_info
, found_key
.objectid
,
1428 extent_item_pos
, search_commit_root
,
1434 static int iterate_irefs(u64 inum
, struct btrfs_root
*fs_root
,
1435 struct btrfs_path
*path
,
1436 iterate_irefs_t
*iterate
, void *ctx
)
1445 struct extent_buffer
*eb
;
1446 struct btrfs_item
*item
;
1447 struct btrfs_inode_ref
*iref
;
1448 struct btrfs_key found_key
;
1451 path
->leave_spinning
= 1;
1452 ret
= inode_ref_info(inum
, parent
? parent
+1 : 0, fs_root
, path
,
1457 ret
= found
? 0 : -ENOENT
;
1462 parent
= found_key
.offset
;
1463 slot
= path
->slots
[0];
1464 eb
= path
->nodes
[0];
1465 /* make sure we can use eb after releasing the path */
1466 atomic_inc(&eb
->refs
);
1467 btrfs_tree_read_lock(eb
);
1468 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1469 btrfs_release_path(path
);
1471 item
= btrfs_item_nr(eb
, slot
);
1472 iref
= btrfs_item_ptr(eb
, slot
, struct btrfs_inode_ref
);
1474 for (cur
= 0; cur
< btrfs_item_size(eb
, item
); cur
+= len
) {
1475 name_len
= btrfs_inode_ref_name_len(eb
, iref
);
1476 /* path must be released before calling iterate()! */
1477 pr_debug("following ref at offset %u for inode %llu in "
1479 (unsigned long long)found_key
.objectid
,
1480 (unsigned long long)fs_root
->objectid
);
1481 ret
= iterate(parent
, iref
, eb
, ctx
);
1484 len
= sizeof(*iref
) + name_len
;
1485 iref
= (struct btrfs_inode_ref
*)((char *)iref
+ len
);
1487 btrfs_tree_read_unlock_blocking(eb
);
1488 free_extent_buffer(eb
);
1491 btrfs_release_path(path
);
1497 * returns 0 if the path could be dumped (probably truncated)
1498 * returns <0 in case of an error
1500 static int inode_to_path(u64 inum
, struct btrfs_inode_ref
*iref
,
1501 struct extent_buffer
*eb
, void *ctx
)
1503 struct inode_fs_paths
*ipath
= ctx
;
1506 int i
= ipath
->fspath
->elem_cnt
;
1507 const int s_ptr
= sizeof(char *);
1510 bytes_left
= ipath
->fspath
->bytes_left
> s_ptr
?
1511 ipath
->fspath
->bytes_left
- s_ptr
: 0;
1513 fspath_min
= (char *)ipath
->fspath
->val
+ (i
+ 1) * s_ptr
;
1514 fspath
= iref_to_path(ipath
->fs_root
, ipath
->btrfs_path
, iref
, eb
,
1515 inum
, fspath_min
, bytes_left
);
1517 return PTR_ERR(fspath
);
1519 if (fspath
> fspath_min
) {
1520 pr_debug("path resolved: %s\n", fspath
);
1521 ipath
->fspath
->val
[i
] = (u64
)(unsigned long)fspath
;
1522 ++ipath
->fspath
->elem_cnt
;
1523 ipath
->fspath
->bytes_left
= fspath
- fspath_min
;
1525 pr_debug("missed path, not enough space. missing bytes: %lu, "
1526 "constructed so far: %s\n",
1527 (unsigned long)(fspath_min
- fspath
), fspath_min
);
1528 ++ipath
->fspath
->elem_missed
;
1529 ipath
->fspath
->bytes_missing
+= fspath_min
- fspath
;
1530 ipath
->fspath
->bytes_left
= 0;
1537 * this dumps all file system paths to the inode into the ipath struct, provided
1538 * is has been created large enough. each path is zero-terminated and accessed
1539 * from ipath->fspath->val[i].
1540 * when it returns, there are ipath->fspath->elem_cnt number of paths available
1541 * in ipath->fspath->val[]. when the allocated space wasn't sufficient, the
1542 * number of missed paths in recored in ipath->fspath->elem_missed, otherwise,
1543 * it's zero. ipath->fspath->bytes_missing holds the number of bytes that would
1544 * have been needed to return all paths.
1546 int paths_from_inode(u64 inum
, struct inode_fs_paths
*ipath
)
1548 return iterate_irefs(inum
, ipath
->fs_root
, ipath
->btrfs_path
,
1549 inode_to_path
, ipath
);
1552 struct btrfs_data_container
*init_data_container(u32 total_bytes
)
1554 struct btrfs_data_container
*data
;
1557 alloc_bytes
= max_t(size_t, total_bytes
, sizeof(*data
));
1558 data
= kmalloc(alloc_bytes
, GFP_NOFS
);
1560 return ERR_PTR(-ENOMEM
);
1562 if (total_bytes
>= sizeof(*data
)) {
1563 data
->bytes_left
= total_bytes
- sizeof(*data
);
1564 data
->bytes_missing
= 0;
1566 data
->bytes_missing
= sizeof(*data
) - total_bytes
;
1567 data
->bytes_left
= 0;
1571 data
->elem_missed
= 0;
1577 * allocates space to return multiple file system paths for an inode.
1578 * total_bytes to allocate are passed, note that space usable for actual path
1579 * information will be total_bytes - sizeof(struct inode_fs_paths).
1580 * the returned pointer must be freed with free_ipath() in the end.
1582 struct inode_fs_paths
*init_ipath(s32 total_bytes
, struct btrfs_root
*fs_root
,
1583 struct btrfs_path
*path
)
1585 struct inode_fs_paths
*ifp
;
1586 struct btrfs_data_container
*fspath
;
1588 fspath
= init_data_container(total_bytes
);
1590 return (void *)fspath
;
1592 ifp
= kmalloc(sizeof(*ifp
), GFP_NOFS
);
1595 return ERR_PTR(-ENOMEM
);
1598 ifp
->btrfs_path
= path
;
1599 ifp
->fspath
= fspath
;
1600 ifp
->fs_root
= fs_root
;
1605 void free_ipath(struct inode_fs_paths
*ipath
)
1609 kfree(ipath
->fspath
);