2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
29 *root
, struct btrfs_path
*path
, int level
);
30 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
31 *root
, struct btrfs_key
*ins_key
,
32 struct btrfs_path
*path
, int data_size
, int extend
);
33 static int push_node_left(struct btrfs_trans_handle
*trans
,
34 struct btrfs_root
*root
, struct extent_buffer
*dst
,
35 struct extent_buffer
*src
, int empty
);
36 static int balance_node_right(struct btrfs_trans_handle
*trans
,
37 struct btrfs_root
*root
,
38 struct extent_buffer
*dst_buf
,
39 struct extent_buffer
*src_buf
);
40 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
42 static void tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
43 struct extent_buffer
*eb
);
44 struct extent_buffer
*read_old_tree_block(struct btrfs_root
*root
, u64 bytenr
,
45 u32 blocksize
, u64 parent_transid
,
47 struct extent_buffer
*btrfs_find_old_tree_block(struct btrfs_root
*root
,
48 u64 bytenr
, u32 blocksize
,
51 struct btrfs_path
*btrfs_alloc_path(void)
53 struct btrfs_path
*path
;
54 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
59 * set all locked nodes in the path to blocking locks. This should
60 * be done before scheduling
62 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
65 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
66 if (!p
->nodes
[i
] || !p
->locks
[i
])
68 btrfs_set_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
69 if (p
->locks
[i
] == BTRFS_READ_LOCK
)
70 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
71 else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
)
72 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
77 * reset all the locked nodes in the patch to spinning locks.
79 * held is used to keep lockdep happy, when lockdep is enabled
80 * we set held to a blocking lock before we go around and
81 * retake all the spinlocks in the path. You can safely use NULL
84 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
85 struct extent_buffer
*held
, int held_rw
)
89 #ifdef CONFIG_DEBUG_LOCK_ALLOC
90 /* lockdep really cares that we take all of these spinlocks
91 * in the right order. If any of the locks in the path are not
92 * currently blocking, it is going to complain. So, make really
93 * really sure by forcing the path to blocking before we clear
97 btrfs_set_lock_blocking_rw(held
, held_rw
);
98 if (held_rw
== BTRFS_WRITE_LOCK
)
99 held_rw
= BTRFS_WRITE_LOCK_BLOCKING
;
100 else if (held_rw
== BTRFS_READ_LOCK
)
101 held_rw
= BTRFS_READ_LOCK_BLOCKING
;
103 btrfs_set_path_blocking(p
);
106 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
107 if (p
->nodes
[i
] && p
->locks
[i
]) {
108 btrfs_clear_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
109 if (p
->locks
[i
] == BTRFS_WRITE_LOCK_BLOCKING
)
110 p
->locks
[i
] = BTRFS_WRITE_LOCK
;
111 else if (p
->locks
[i
] == BTRFS_READ_LOCK_BLOCKING
)
112 p
->locks
[i
] = BTRFS_READ_LOCK
;
116 #ifdef CONFIG_DEBUG_LOCK_ALLOC
118 btrfs_clear_lock_blocking_rw(held
, held_rw
);
122 /* this also releases the path */
123 void btrfs_free_path(struct btrfs_path
*p
)
127 btrfs_release_path(p
);
128 kmem_cache_free(btrfs_path_cachep
, p
);
132 * path release drops references on the extent buffers in the path
133 * and it drops any locks held by this path
135 * It is safe to call this on paths that no locks or extent buffers held.
137 noinline
void btrfs_release_path(struct btrfs_path
*p
)
141 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
146 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
149 free_extent_buffer(p
->nodes
[i
]);
155 * safely gets a reference on the root node of a tree. A lock
156 * is not taken, so a concurrent writer may put a different node
157 * at the root of the tree. See btrfs_lock_root_node for the
160 * The extent buffer returned by this has a reference taken, so
161 * it won't disappear. It may stop being the root of the tree
162 * at any time because there are no locks held.
164 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
166 struct extent_buffer
*eb
;
170 eb
= rcu_dereference(root
->node
);
173 * RCU really hurts here, we could free up the root node because
174 * it was cow'ed but we may not get the new root node yet so do
175 * the inc_not_zero dance and if it doesn't work then
176 * synchronize_rcu and try again.
178 if (atomic_inc_not_zero(&eb
->refs
)) {
188 /* loop around taking references on and locking the root node of the
189 * tree until you end up with a lock on the root. A locked buffer
190 * is returned, with a reference held.
192 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
194 struct extent_buffer
*eb
;
197 eb
= btrfs_root_node(root
);
199 if (eb
== root
->node
)
201 btrfs_tree_unlock(eb
);
202 free_extent_buffer(eb
);
207 /* loop around taking references on and locking the root node of the
208 * tree until you end up with a lock on the root. A locked buffer
209 * is returned, with a reference held.
211 struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
213 struct extent_buffer
*eb
;
216 eb
= btrfs_root_node(root
);
217 btrfs_tree_read_lock(eb
);
218 if (eb
== root
->node
)
220 btrfs_tree_read_unlock(eb
);
221 free_extent_buffer(eb
);
226 /* cowonly root (everything not a reference counted cow subvolume), just get
227 * put onto a simple dirty list. transaction.c walks this to make sure they
228 * get properly updated on disk.
230 static void add_root_to_dirty_list(struct btrfs_root
*root
)
232 spin_lock(&root
->fs_info
->trans_lock
);
233 if (root
->track_dirty
&& list_empty(&root
->dirty_list
)) {
234 list_add(&root
->dirty_list
,
235 &root
->fs_info
->dirty_cowonly_roots
);
237 spin_unlock(&root
->fs_info
->trans_lock
);
241 * used by snapshot creation to make a copy of a root for a tree with
242 * a given objectid. The buffer with the new root node is returned in
243 * cow_ret, and this func returns zero on success or a negative error code.
245 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
246 struct btrfs_root
*root
,
247 struct extent_buffer
*buf
,
248 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
250 struct extent_buffer
*cow
;
253 struct btrfs_disk_key disk_key
;
255 WARN_ON(root
->ref_cows
&& trans
->transid
!=
256 root
->fs_info
->running_transaction
->transid
);
257 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
259 level
= btrfs_header_level(buf
);
261 btrfs_item_key(buf
, &disk_key
, 0);
263 btrfs_node_key(buf
, &disk_key
, 0);
265 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, 0,
266 new_root_objectid
, &disk_key
, level
,
271 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
272 btrfs_set_header_bytenr(cow
, cow
->start
);
273 btrfs_set_header_generation(cow
, trans
->transid
);
274 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
275 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
276 BTRFS_HEADER_FLAG_RELOC
);
277 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
278 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
280 btrfs_set_header_owner(cow
, new_root_objectid
);
282 write_extent_buffer(cow
, root
->fs_info
->fsid
,
283 (unsigned long)btrfs_header_fsid(cow
),
286 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
287 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
288 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
290 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
295 btrfs_mark_buffer_dirty(cow
);
304 MOD_LOG_KEY_REMOVE_WHILE_FREEING
,
305 MOD_LOG_KEY_REMOVE_WHILE_MOVING
,
307 MOD_LOG_ROOT_REPLACE
,
310 struct tree_mod_move
{
315 struct tree_mod_root
{
320 struct tree_mod_elem
{
322 u64 index
; /* shifted logical */
326 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
329 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
332 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
333 struct btrfs_disk_key key
;
336 /* this is used for op == MOD_LOG_MOVE_KEYS */
337 struct tree_mod_move move
;
339 /* this is used for op == MOD_LOG_ROOT_REPLACE */
340 struct tree_mod_root old_root
;
343 static inline void tree_mod_log_read_lock(struct btrfs_fs_info
*fs_info
)
345 read_lock(&fs_info
->tree_mod_log_lock
);
348 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info
*fs_info
)
350 read_unlock(&fs_info
->tree_mod_log_lock
);
353 static inline void tree_mod_log_write_lock(struct btrfs_fs_info
*fs_info
)
355 write_lock(&fs_info
->tree_mod_log_lock
);
358 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info
*fs_info
)
360 write_unlock(&fs_info
->tree_mod_log_lock
);
364 * This adds a new blocker to the tree mod log's blocker list if the @elem
365 * passed does not already have a sequence number set. So when a caller expects
366 * to record tree modifications, it should ensure to set elem->seq to zero
367 * before calling btrfs_get_tree_mod_seq.
368 * Returns a fresh, unused tree log modification sequence number, even if no new
371 u64
btrfs_get_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
372 struct seq_list
*elem
)
376 tree_mod_log_write_lock(fs_info
);
377 spin_lock(&fs_info
->tree_mod_seq_lock
);
379 elem
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
380 list_add_tail(&elem
->list
, &fs_info
->tree_mod_seq_list
);
382 seq
= btrfs_inc_tree_mod_seq(fs_info
);
383 spin_unlock(&fs_info
->tree_mod_seq_lock
);
384 tree_mod_log_write_unlock(fs_info
);
389 void btrfs_put_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
390 struct seq_list
*elem
)
392 struct rb_root
*tm_root
;
393 struct rb_node
*node
;
394 struct rb_node
*next
;
395 struct seq_list
*cur_elem
;
396 struct tree_mod_elem
*tm
;
397 u64 min_seq
= (u64
)-1;
398 u64 seq_putting
= elem
->seq
;
403 spin_lock(&fs_info
->tree_mod_seq_lock
);
404 list_del(&elem
->list
);
407 list_for_each_entry(cur_elem
, &fs_info
->tree_mod_seq_list
, list
) {
408 if (cur_elem
->seq
< min_seq
) {
409 if (seq_putting
> cur_elem
->seq
) {
411 * blocker with lower sequence number exists, we
412 * cannot remove anything from the log
414 spin_unlock(&fs_info
->tree_mod_seq_lock
);
417 min_seq
= cur_elem
->seq
;
420 spin_unlock(&fs_info
->tree_mod_seq_lock
);
423 * anything that's lower than the lowest existing (read: blocked)
424 * sequence number can be removed from the tree.
426 tree_mod_log_write_lock(fs_info
);
427 tm_root
= &fs_info
->tree_mod_log
;
428 for (node
= rb_first(tm_root
); node
; node
= next
) {
429 next
= rb_next(node
);
430 tm
= container_of(node
, struct tree_mod_elem
, node
);
431 if (tm
->seq
> min_seq
)
433 rb_erase(node
, tm_root
);
436 tree_mod_log_write_unlock(fs_info
);
440 * key order of the log:
443 * the index is the shifted logical of the *new* root node for root replace
444 * operations, or the shifted logical of the affected block for all other
448 __tree_mod_log_insert(struct btrfs_fs_info
*fs_info
, struct tree_mod_elem
*tm
)
450 struct rb_root
*tm_root
;
451 struct rb_node
**new;
452 struct rb_node
*parent
= NULL
;
453 struct tree_mod_elem
*cur
;
455 BUG_ON(!tm
|| !tm
->seq
);
457 tm_root
= &fs_info
->tree_mod_log
;
458 new = &tm_root
->rb_node
;
460 cur
= container_of(*new, struct tree_mod_elem
, node
);
462 if (cur
->index
< tm
->index
)
463 new = &((*new)->rb_left
);
464 else if (cur
->index
> tm
->index
)
465 new = &((*new)->rb_right
);
466 else if (cur
->seq
< tm
->seq
)
467 new = &((*new)->rb_left
);
468 else if (cur
->seq
> tm
->seq
)
469 new = &((*new)->rb_right
);
476 rb_link_node(&tm
->node
, parent
, new);
477 rb_insert_color(&tm
->node
, tm_root
);
482 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
483 * returns zero with the tree_mod_log_lock acquired. The caller must hold
484 * this until all tree mod log insertions are recorded in the rb tree and then
485 * call tree_mod_log_write_unlock() to release.
487 static inline int tree_mod_dont_log(struct btrfs_fs_info
*fs_info
,
488 struct extent_buffer
*eb
) {
490 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
492 if (eb
&& btrfs_header_level(eb
) == 0)
495 tree_mod_log_write_lock(fs_info
);
496 if (list_empty(&fs_info
->tree_mod_seq_list
)) {
498 * someone emptied the list while we were waiting for the lock.
499 * we must not add to the list when no blocker exists.
501 tree_mod_log_write_unlock(fs_info
);
509 * This allocates memory and gets a tree modification sequence number.
511 * Returns <0 on error.
512 * Returns >0 (the added sequence number) on success.
514 static inline int tree_mod_alloc(struct btrfs_fs_info
*fs_info
, gfp_t flags
,
515 struct tree_mod_elem
**tm_ret
)
517 struct tree_mod_elem
*tm
;
520 * once we switch from spin locks to something different, we should
521 * honor the flags parameter here.
523 tm
= *tm_ret
= kzalloc(sizeof(*tm
), GFP_ATOMIC
);
527 tm
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
532 __tree_mod_log_insert_key(struct btrfs_fs_info
*fs_info
,
533 struct extent_buffer
*eb
, int slot
,
534 enum mod_log_op op
, gfp_t flags
)
537 struct tree_mod_elem
*tm
;
539 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
543 tm
->index
= eb
->start
>> PAGE_CACHE_SHIFT
;
544 if (op
!= MOD_LOG_KEY_ADD
) {
545 btrfs_node_key(eb
, &tm
->key
, slot
);
546 tm
->blockptr
= btrfs_node_blockptr(eb
, slot
);
550 tm
->generation
= btrfs_node_ptr_generation(eb
, slot
);
552 return __tree_mod_log_insert(fs_info
, tm
);
556 tree_mod_log_insert_key_mask(struct btrfs_fs_info
*fs_info
,
557 struct extent_buffer
*eb
, int slot
,
558 enum mod_log_op op
, gfp_t flags
)
562 if (tree_mod_dont_log(fs_info
, eb
))
565 ret
= __tree_mod_log_insert_key(fs_info
, eb
, slot
, op
, flags
);
567 tree_mod_log_write_unlock(fs_info
);
572 tree_mod_log_insert_key(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
573 int slot
, enum mod_log_op op
)
575 return tree_mod_log_insert_key_mask(fs_info
, eb
, slot
, op
, GFP_NOFS
);
579 tree_mod_log_insert_key_locked(struct btrfs_fs_info
*fs_info
,
580 struct extent_buffer
*eb
, int slot
,
583 return __tree_mod_log_insert_key(fs_info
, eb
, slot
, op
, GFP_NOFS
);
587 tree_mod_log_insert_move(struct btrfs_fs_info
*fs_info
,
588 struct extent_buffer
*eb
, int dst_slot
, int src_slot
,
589 int nr_items
, gfp_t flags
)
591 struct tree_mod_elem
*tm
;
595 if (tree_mod_dont_log(fs_info
, eb
))
599 * When we override something during the move, we log these removals.
600 * This can only happen when we move towards the beginning of the
601 * buffer, i.e. dst_slot < src_slot.
603 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
604 ret
= tree_mod_log_insert_key_locked(fs_info
, eb
, i
+ dst_slot
,
605 MOD_LOG_KEY_REMOVE_WHILE_MOVING
);
609 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
613 tm
->index
= eb
->start
>> PAGE_CACHE_SHIFT
;
615 tm
->move
.dst_slot
= dst_slot
;
616 tm
->move
.nr_items
= nr_items
;
617 tm
->op
= MOD_LOG_MOVE_KEYS
;
619 ret
= __tree_mod_log_insert(fs_info
, tm
);
621 tree_mod_log_write_unlock(fs_info
);
626 __tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
)
632 if (btrfs_header_level(eb
) == 0)
635 nritems
= btrfs_header_nritems(eb
);
636 for (i
= nritems
- 1; i
>= 0; i
--) {
637 ret
= tree_mod_log_insert_key_locked(fs_info
, eb
, i
,
638 MOD_LOG_KEY_REMOVE_WHILE_FREEING
);
644 tree_mod_log_insert_root(struct btrfs_fs_info
*fs_info
,
645 struct extent_buffer
*old_root
,
646 struct extent_buffer
*new_root
, gfp_t flags
,
649 struct tree_mod_elem
*tm
;
652 if (tree_mod_dont_log(fs_info
, NULL
))
656 __tree_mod_log_free_eb(fs_info
, old_root
);
658 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
662 tm
->index
= new_root
->start
>> PAGE_CACHE_SHIFT
;
663 tm
->old_root
.logical
= old_root
->start
;
664 tm
->old_root
.level
= btrfs_header_level(old_root
);
665 tm
->generation
= btrfs_header_generation(old_root
);
666 tm
->op
= MOD_LOG_ROOT_REPLACE
;
668 ret
= __tree_mod_log_insert(fs_info
, tm
);
670 tree_mod_log_write_unlock(fs_info
);
674 static struct tree_mod_elem
*
675 __tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
,
678 struct rb_root
*tm_root
;
679 struct rb_node
*node
;
680 struct tree_mod_elem
*cur
= NULL
;
681 struct tree_mod_elem
*found
= NULL
;
682 u64 index
= start
>> PAGE_CACHE_SHIFT
;
684 tree_mod_log_read_lock(fs_info
);
685 tm_root
= &fs_info
->tree_mod_log
;
686 node
= tm_root
->rb_node
;
688 cur
= container_of(node
, struct tree_mod_elem
, node
);
689 if (cur
->index
< index
) {
690 node
= node
->rb_left
;
691 } else if (cur
->index
> index
) {
692 node
= node
->rb_right
;
693 } else if (cur
->seq
< min_seq
) {
694 node
= node
->rb_left
;
695 } else if (!smallest
) {
696 /* we want the node with the highest seq */
698 BUG_ON(found
->seq
> cur
->seq
);
700 node
= node
->rb_left
;
701 } else if (cur
->seq
> min_seq
) {
702 /* we want the node with the smallest seq */
704 BUG_ON(found
->seq
< cur
->seq
);
706 node
= node
->rb_right
;
712 tree_mod_log_read_unlock(fs_info
);
718 * this returns the element from the log with the smallest time sequence
719 * value that's in the log (the oldest log item). any element with a time
720 * sequence lower than min_seq will be ignored.
722 static struct tree_mod_elem
*
723 tree_mod_log_search_oldest(struct btrfs_fs_info
*fs_info
, u64 start
,
726 return __tree_mod_log_search(fs_info
, start
, min_seq
, 1);
730 * this returns the element from the log with the largest time sequence
731 * value that's in the log (the most recent log item). any element with
732 * a time sequence lower than min_seq will be ignored.
734 static struct tree_mod_elem
*
735 tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
)
737 return __tree_mod_log_search(fs_info
, start
, min_seq
, 0);
741 tree_mod_log_eb_copy(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
742 struct extent_buffer
*src
, unsigned long dst_offset
,
743 unsigned long src_offset
, int nr_items
)
748 if (tree_mod_dont_log(fs_info
, NULL
))
751 if (btrfs_header_level(dst
) == 0 && btrfs_header_level(src
) == 0) {
752 tree_mod_log_write_unlock(fs_info
);
756 for (i
= 0; i
< nr_items
; i
++) {
757 ret
= tree_mod_log_insert_key_locked(fs_info
, src
,
761 ret
= tree_mod_log_insert_key_locked(fs_info
, dst
,
767 tree_mod_log_write_unlock(fs_info
);
771 tree_mod_log_eb_move(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
772 int dst_offset
, int src_offset
, int nr_items
)
775 ret
= tree_mod_log_insert_move(fs_info
, dst
, dst_offset
, src_offset
,
781 tree_mod_log_set_node_key(struct btrfs_fs_info
*fs_info
,
782 struct extent_buffer
*eb
, int slot
, int atomic
)
786 ret
= tree_mod_log_insert_key_mask(fs_info
, eb
, slot
,
788 atomic
? GFP_ATOMIC
: GFP_NOFS
);
793 tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
)
795 if (tree_mod_dont_log(fs_info
, eb
))
798 __tree_mod_log_free_eb(fs_info
, eb
);
800 tree_mod_log_write_unlock(fs_info
);
804 tree_mod_log_set_root_pointer(struct btrfs_root
*root
,
805 struct extent_buffer
*new_root_node
,
809 ret
= tree_mod_log_insert_root(root
->fs_info
, root
->node
,
810 new_root_node
, GFP_NOFS
, log_removal
);
815 * check if the tree block can be shared by multiple trees
817 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
818 struct extent_buffer
*buf
)
821 * Tree blocks not in refernece counted trees and tree roots
822 * are never shared. If a block was allocated after the last
823 * snapshot and the block was not allocated by tree relocation,
824 * we know the block is not shared.
826 if (root
->ref_cows
&&
827 buf
!= root
->node
&& buf
!= root
->commit_root
&&
828 (btrfs_header_generation(buf
) <=
829 btrfs_root_last_snapshot(&root
->root_item
) ||
830 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
832 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
833 if (root
->ref_cows
&&
834 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
840 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
841 struct btrfs_root
*root
,
842 struct extent_buffer
*buf
,
843 struct extent_buffer
*cow
,
853 * Backrefs update rules:
855 * Always use full backrefs for extent pointers in tree block
856 * allocated by tree relocation.
858 * If a shared tree block is no longer referenced by its owner
859 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
860 * use full backrefs for extent pointers in tree block.
862 * If a tree block is been relocating
863 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
864 * use full backrefs for extent pointers in tree block.
865 * The reason for this is some operations (such as drop tree)
866 * are only allowed for blocks use full backrefs.
869 if (btrfs_block_can_be_shared(root
, buf
)) {
870 ret
= btrfs_lookup_extent_info(trans
, root
, buf
->start
,
871 btrfs_header_level(buf
), 1,
877 btrfs_std_error(root
->fs_info
, ret
);
882 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
883 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
884 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
889 owner
= btrfs_header_owner(buf
);
890 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
891 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
894 if ((owner
== root
->root_key
.objectid
||
895 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
896 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
897 ret
= btrfs_inc_ref(trans
, root
, buf
, 1, 1);
898 BUG_ON(ret
); /* -ENOMEM */
900 if (root
->root_key
.objectid
==
901 BTRFS_TREE_RELOC_OBJECTID
) {
902 ret
= btrfs_dec_ref(trans
, root
, buf
, 0, 1);
903 BUG_ON(ret
); /* -ENOMEM */
904 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
905 BUG_ON(ret
); /* -ENOMEM */
907 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
910 if (root
->root_key
.objectid
==
911 BTRFS_TREE_RELOC_OBJECTID
)
912 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
914 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
915 BUG_ON(ret
); /* -ENOMEM */
917 if (new_flags
!= 0) {
918 ret
= btrfs_set_disk_extent_flags(trans
, root
,
926 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
927 if (root
->root_key
.objectid
==
928 BTRFS_TREE_RELOC_OBJECTID
)
929 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
931 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
932 BUG_ON(ret
); /* -ENOMEM */
933 ret
= btrfs_dec_ref(trans
, root
, buf
, 1, 1);
934 BUG_ON(ret
); /* -ENOMEM */
936 clean_tree_block(trans
, root
, buf
);
943 * does the dirty work in cow of a single block. The parent block (if
944 * supplied) is updated to point to the new cow copy. The new buffer is marked
945 * dirty and returned locked. If you modify the block it needs to be marked
948 * search_start -- an allocation hint for the new block
950 * empty_size -- a hint that you plan on doing more cow. This is the size in
951 * bytes the allocator should try to find free next to the block it returns.
952 * This is just a hint and may be ignored by the allocator.
954 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
955 struct btrfs_root
*root
,
956 struct extent_buffer
*buf
,
957 struct extent_buffer
*parent
, int parent_slot
,
958 struct extent_buffer
**cow_ret
,
959 u64 search_start
, u64 empty_size
)
961 struct btrfs_disk_key disk_key
;
962 struct extent_buffer
*cow
;
971 btrfs_assert_tree_locked(buf
);
973 WARN_ON(root
->ref_cows
&& trans
->transid
!=
974 root
->fs_info
->running_transaction
->transid
);
975 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
977 level
= btrfs_header_level(buf
);
980 btrfs_item_key(buf
, &disk_key
, 0);
982 btrfs_node_key(buf
, &disk_key
, 0);
984 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
986 parent_start
= parent
->start
;
992 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, parent_start
,
993 root
->root_key
.objectid
, &disk_key
,
994 level
, search_start
, empty_size
);
998 /* cow is set to blocking by btrfs_init_new_buffer */
1000 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
1001 btrfs_set_header_bytenr(cow
, cow
->start
);
1002 btrfs_set_header_generation(cow
, trans
->transid
);
1003 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1004 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
1005 BTRFS_HEADER_FLAG_RELOC
);
1006 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1007 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
1009 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
1011 write_extent_buffer(cow
, root
->fs_info
->fsid
,
1012 (unsigned long)btrfs_header_fsid(cow
),
1015 ret
= update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
1017 btrfs_abort_transaction(trans
, root
, ret
);
1022 btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
1024 if (buf
== root
->node
) {
1025 WARN_ON(parent
&& parent
!= buf
);
1026 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1027 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1028 parent_start
= buf
->start
;
1032 extent_buffer_get(cow
);
1033 tree_mod_log_set_root_pointer(root
, cow
, 1);
1034 rcu_assign_pointer(root
->node
, cow
);
1036 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1038 free_extent_buffer(buf
);
1039 add_root_to_dirty_list(root
);
1041 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1042 parent_start
= parent
->start
;
1046 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
1047 tree_mod_log_insert_key(root
->fs_info
, parent
, parent_slot
,
1048 MOD_LOG_KEY_REPLACE
);
1049 btrfs_set_node_blockptr(parent
, parent_slot
,
1051 btrfs_set_node_ptr_generation(parent
, parent_slot
,
1053 btrfs_mark_buffer_dirty(parent
);
1054 tree_mod_log_free_eb(root
->fs_info
, buf
);
1055 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1059 btrfs_tree_unlock(buf
);
1060 free_extent_buffer_stale(buf
);
1061 btrfs_mark_buffer_dirty(cow
);
1067 * returns the logical address of the oldest predecessor of the given root.
1068 * entries older than time_seq are ignored.
1070 static struct tree_mod_elem
*
1071 __tree_mod_log_oldest_root(struct btrfs_fs_info
*fs_info
,
1072 struct extent_buffer
*eb_root
, u64 time_seq
)
1074 struct tree_mod_elem
*tm
;
1075 struct tree_mod_elem
*found
= NULL
;
1076 u64 root_logical
= eb_root
->start
;
1083 * the very last operation that's logged for a root is the replacement
1084 * operation (if it is replaced at all). this has the index of the *new*
1085 * root, making it the very first operation that's logged for this root.
1088 tm
= tree_mod_log_search_oldest(fs_info
, root_logical
,
1093 * if there are no tree operation for the oldest root, we simply
1094 * return it. this should only happen if that (old) root is at
1101 * if there's an operation that's not a root replacement, we
1102 * found the oldest version of our root. normally, we'll find a
1103 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1105 if (tm
->op
!= MOD_LOG_ROOT_REPLACE
)
1109 root_logical
= tm
->old_root
.logical
;
1113 /* if there's no old root to return, return what we found instead */
1121 * tm is a pointer to the first operation to rewind within eb. then, all
1122 * previous operations will be rewinded (until we reach something older than
1126 __tree_mod_log_rewind(struct extent_buffer
*eb
, u64 time_seq
,
1127 struct tree_mod_elem
*first_tm
)
1130 struct rb_node
*next
;
1131 struct tree_mod_elem
*tm
= first_tm
;
1132 unsigned long o_dst
;
1133 unsigned long o_src
;
1134 unsigned long p_size
= sizeof(struct btrfs_key_ptr
);
1136 n
= btrfs_header_nritems(eb
);
1137 while (tm
&& tm
->seq
>= time_seq
) {
1139 * all the operations are recorded with the operator used for
1140 * the modification. as we're going backwards, we do the
1141 * opposite of each operation here.
1144 case MOD_LOG_KEY_REMOVE_WHILE_FREEING
:
1145 BUG_ON(tm
->slot
< n
);
1147 case MOD_LOG_KEY_REMOVE_WHILE_MOVING
:
1148 case MOD_LOG_KEY_REMOVE
:
1149 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1150 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1151 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1155 case MOD_LOG_KEY_REPLACE
:
1156 BUG_ON(tm
->slot
>= n
);
1157 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1158 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1159 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1162 case MOD_LOG_KEY_ADD
:
1163 /* if a move operation is needed it's in the log */
1166 case MOD_LOG_MOVE_KEYS
:
1167 o_dst
= btrfs_node_key_ptr_offset(tm
->slot
);
1168 o_src
= btrfs_node_key_ptr_offset(tm
->move
.dst_slot
);
1169 memmove_extent_buffer(eb
, o_dst
, o_src
,
1170 tm
->move
.nr_items
* p_size
);
1172 case MOD_LOG_ROOT_REPLACE
:
1174 * this operation is special. for roots, this must be
1175 * handled explicitly before rewinding.
1176 * for non-roots, this operation may exist if the node
1177 * was a root: root A -> child B; then A gets empty and
1178 * B is promoted to the new root. in the mod log, we'll
1179 * have a root-replace operation for B, a tree block
1180 * that is no root. we simply ignore that operation.
1184 next
= rb_next(&tm
->node
);
1187 tm
= container_of(next
, struct tree_mod_elem
, node
);
1188 if (tm
->index
!= first_tm
->index
)
1191 btrfs_set_header_nritems(eb
, n
);
1195 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1196 * is returned. If rewind operations happen, a fresh buffer is returned. The
1197 * returned buffer is always read-locked. If the returned buffer is not the
1198 * input buffer, the lock on the input buffer is released and the input buffer
1199 * is freed (its refcount is decremented).
1201 static struct extent_buffer
*
1202 tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
1205 struct extent_buffer
*eb_rewin
;
1206 struct tree_mod_elem
*tm
;
1211 if (btrfs_header_level(eb
) == 0)
1214 tm
= tree_mod_log_search(fs_info
, eb
->start
, time_seq
);
1218 if (tm
->op
== MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1219 BUG_ON(tm
->slot
!= 0);
1220 eb_rewin
= alloc_dummy_extent_buffer(eb
->start
,
1221 fs_info
->tree_root
->nodesize
);
1223 btrfs_set_header_bytenr(eb_rewin
, eb
->start
);
1224 btrfs_set_header_backref_rev(eb_rewin
,
1225 btrfs_header_backref_rev(eb
));
1226 btrfs_set_header_owner(eb_rewin
, btrfs_header_owner(eb
));
1227 btrfs_set_header_level(eb_rewin
, btrfs_header_level(eb
));
1229 eb_rewin
= btrfs_clone_extent_buffer(eb
);
1233 extent_buffer_get(eb_rewin
);
1234 btrfs_tree_read_unlock(eb
);
1235 free_extent_buffer(eb
);
1237 extent_buffer_get(eb_rewin
);
1238 btrfs_tree_read_lock(eb_rewin
);
1239 __tree_mod_log_rewind(eb_rewin
, time_seq
, tm
);
1240 WARN_ON(btrfs_header_nritems(eb_rewin
) >
1241 BTRFS_NODEPTRS_PER_BLOCK(fs_info
->tree_root
));
1247 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1248 * value. If there are no changes, the current root->root_node is returned. If
1249 * anything changed in between, there's a fresh buffer allocated on which the
1250 * rewind operations are done. In any case, the returned buffer is read locked.
1251 * Returns NULL on error (with no locks held).
1253 static inline struct extent_buffer
*
1254 get_old_root(struct btrfs_root
*root
, u64 time_seq
)
1256 struct tree_mod_elem
*tm
;
1257 struct extent_buffer
*eb
= NULL
;
1258 struct extent_buffer
*eb_root
;
1259 struct extent_buffer
*old
;
1260 struct tree_mod_root
*old_root
= NULL
;
1261 u64 old_generation
= 0;
1265 eb_root
= btrfs_read_lock_root_node(root
);
1266 tm
= __tree_mod_log_oldest_root(root
->fs_info
, eb_root
, time_seq
);
1270 if (tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1271 old_root
= &tm
->old_root
;
1272 old_generation
= tm
->generation
;
1273 logical
= old_root
->logical
;
1275 logical
= eb_root
->start
;
1278 tm
= tree_mod_log_search(root
->fs_info
, logical
, time_seq
);
1279 if (old_root
&& tm
&& tm
->op
!= MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1280 btrfs_tree_read_unlock(eb_root
);
1281 free_extent_buffer(eb_root
);
1282 blocksize
= btrfs_level_size(root
, old_root
->level
);
1283 old
= read_tree_block(root
, logical
, blocksize
, 0);
1285 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1289 eb
= btrfs_clone_extent_buffer(old
);
1290 free_extent_buffer(old
);
1292 } else if (old_root
) {
1293 btrfs_tree_read_unlock(eb_root
);
1294 free_extent_buffer(eb_root
);
1295 eb
= alloc_dummy_extent_buffer(logical
, root
->nodesize
);
1297 eb
= btrfs_clone_extent_buffer(eb_root
);
1298 btrfs_tree_read_unlock(eb_root
);
1299 free_extent_buffer(eb_root
);
1304 extent_buffer_get(eb
);
1305 btrfs_tree_read_lock(eb
);
1307 btrfs_set_header_bytenr(eb
, eb
->start
);
1308 btrfs_set_header_backref_rev(eb
, BTRFS_MIXED_BACKREF_REV
);
1309 btrfs_set_header_owner(eb
, btrfs_header_owner(eb_root
));
1310 btrfs_set_header_level(eb
, old_root
->level
);
1311 btrfs_set_header_generation(eb
, old_generation
);
1314 __tree_mod_log_rewind(eb
, time_seq
, tm
);
1316 WARN_ON(btrfs_header_level(eb
) != 0);
1317 WARN_ON(btrfs_header_nritems(eb
) > BTRFS_NODEPTRS_PER_BLOCK(root
));
1322 int btrfs_old_root_level(struct btrfs_root
*root
, u64 time_seq
)
1324 struct tree_mod_elem
*tm
;
1326 struct extent_buffer
*eb_root
= btrfs_root_node(root
);
1328 tm
= __tree_mod_log_oldest_root(root
->fs_info
, eb_root
, time_seq
);
1329 if (tm
&& tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1330 level
= tm
->old_root
.level
;
1332 level
= btrfs_header_level(eb_root
);
1334 free_extent_buffer(eb_root
);
1339 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
1340 struct btrfs_root
*root
,
1341 struct extent_buffer
*buf
)
1343 /* ensure we can see the force_cow */
1347 * We do not need to cow a block if
1348 * 1) this block is not created or changed in this transaction;
1349 * 2) this block does not belong to TREE_RELOC tree;
1350 * 3) the root is not forced COW.
1352 * What is forced COW:
1353 * when we create snapshot during commiting the transaction,
1354 * after we've finished coping src root, we must COW the shared
1355 * block to ensure the metadata consistency.
1357 if (btrfs_header_generation(buf
) == trans
->transid
&&
1358 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
1359 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
1360 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
1367 * cows a single block, see __btrfs_cow_block for the real work.
1368 * This version of it has extra checks so that a block isn't cow'd more than
1369 * once per transaction, as long as it hasn't been written yet
1371 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1372 struct btrfs_root
*root
, struct extent_buffer
*buf
,
1373 struct extent_buffer
*parent
, int parent_slot
,
1374 struct extent_buffer
**cow_ret
)
1379 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
1380 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1381 (unsigned long long)trans
->transid
,
1382 (unsigned long long)
1383 root
->fs_info
->running_transaction
->transid
);
1385 if (trans
->transid
!= root
->fs_info
->generation
)
1386 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1387 (unsigned long long)trans
->transid
,
1388 (unsigned long long)root
->fs_info
->generation
);
1390 if (!should_cow_block(trans
, root
, buf
)) {
1395 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
1398 btrfs_set_lock_blocking(parent
);
1399 btrfs_set_lock_blocking(buf
);
1401 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
1402 parent_slot
, cow_ret
, search_start
, 0);
1404 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
1410 * helper function for defrag to decide if two blocks pointed to by a
1411 * node are actually close by
1413 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
1415 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
1417 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
1423 * compare two keys in a memcmp fashion
1425 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
1427 struct btrfs_key k1
;
1429 btrfs_disk_key_to_cpu(&k1
, disk
);
1431 return btrfs_comp_cpu_keys(&k1
, k2
);
1435 * same as comp_keys only with two btrfs_key's
1437 int btrfs_comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
1439 if (k1
->objectid
> k2
->objectid
)
1441 if (k1
->objectid
< k2
->objectid
)
1443 if (k1
->type
> k2
->type
)
1445 if (k1
->type
< k2
->type
)
1447 if (k1
->offset
> k2
->offset
)
1449 if (k1
->offset
< k2
->offset
)
1455 * this is used by the defrag code to go through all the
1456 * leaves pointed to by a node and reallocate them so that
1457 * disk order is close to key order
1459 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
1460 struct btrfs_root
*root
, struct extent_buffer
*parent
,
1461 int start_slot
, u64
*last_ret
,
1462 struct btrfs_key
*progress
)
1464 struct extent_buffer
*cur
;
1467 u64 search_start
= *last_ret
;
1477 int progress_passed
= 0;
1478 struct btrfs_disk_key disk_key
;
1480 parent_level
= btrfs_header_level(parent
);
1482 WARN_ON(trans
->transaction
!= root
->fs_info
->running_transaction
);
1483 WARN_ON(trans
->transid
!= root
->fs_info
->generation
);
1485 parent_nritems
= btrfs_header_nritems(parent
);
1486 blocksize
= btrfs_level_size(root
, parent_level
- 1);
1487 end_slot
= parent_nritems
;
1489 if (parent_nritems
== 1)
1492 btrfs_set_lock_blocking(parent
);
1494 for (i
= start_slot
; i
< end_slot
; i
++) {
1497 btrfs_node_key(parent
, &disk_key
, i
);
1498 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
1501 progress_passed
= 1;
1502 blocknr
= btrfs_node_blockptr(parent
, i
);
1503 gen
= btrfs_node_ptr_generation(parent
, i
);
1504 if (last_block
== 0)
1505 last_block
= blocknr
;
1508 other
= btrfs_node_blockptr(parent
, i
- 1);
1509 close
= close_blocks(blocknr
, other
, blocksize
);
1511 if (!close
&& i
< end_slot
- 2) {
1512 other
= btrfs_node_blockptr(parent
, i
+ 1);
1513 close
= close_blocks(blocknr
, other
, blocksize
);
1516 last_block
= blocknr
;
1520 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1522 uptodate
= btrfs_buffer_uptodate(cur
, gen
, 0);
1525 if (!cur
|| !uptodate
) {
1527 cur
= read_tree_block(root
, blocknr
,
1531 } else if (!uptodate
) {
1532 err
= btrfs_read_buffer(cur
, gen
);
1534 free_extent_buffer(cur
);
1539 if (search_start
== 0)
1540 search_start
= last_block
;
1542 btrfs_tree_lock(cur
);
1543 btrfs_set_lock_blocking(cur
);
1544 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
1547 (end_slot
- i
) * blocksize
));
1549 btrfs_tree_unlock(cur
);
1550 free_extent_buffer(cur
);
1553 search_start
= cur
->start
;
1554 last_block
= cur
->start
;
1555 *last_ret
= search_start
;
1556 btrfs_tree_unlock(cur
);
1557 free_extent_buffer(cur
);
1563 * The leaf data grows from end-to-front in the node.
1564 * this returns the address of the start of the last item,
1565 * which is the stop of the leaf data stack
1567 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
1568 struct extent_buffer
*leaf
)
1570 u32 nr
= btrfs_header_nritems(leaf
);
1572 return BTRFS_LEAF_DATA_SIZE(root
);
1573 return btrfs_item_offset_nr(leaf
, nr
- 1);
1578 * search for key in the extent_buffer. The items start at offset p,
1579 * and they are item_size apart. There are 'max' items in p.
1581 * the slot in the array is returned via slot, and it points to
1582 * the place where you would insert key if it is not found in
1585 * slot may point to max if the key is bigger than all of the keys
1587 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
1589 int item_size
, struct btrfs_key
*key
,
1596 struct btrfs_disk_key
*tmp
= NULL
;
1597 struct btrfs_disk_key unaligned
;
1598 unsigned long offset
;
1600 unsigned long map_start
= 0;
1601 unsigned long map_len
= 0;
1604 while (low
< high
) {
1605 mid
= (low
+ high
) / 2;
1606 offset
= p
+ mid
* item_size
;
1608 if (!kaddr
|| offset
< map_start
||
1609 (offset
+ sizeof(struct btrfs_disk_key
)) >
1610 map_start
+ map_len
) {
1612 err
= map_private_extent_buffer(eb
, offset
,
1613 sizeof(struct btrfs_disk_key
),
1614 &kaddr
, &map_start
, &map_len
);
1617 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1620 read_extent_buffer(eb
, &unaligned
,
1621 offset
, sizeof(unaligned
));
1626 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1629 ret
= comp_keys(tmp
, key
);
1645 * simple bin_search frontend that does the right thing for
1648 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1649 int level
, int *slot
)
1652 return generic_bin_search(eb
,
1653 offsetof(struct btrfs_leaf
, items
),
1654 sizeof(struct btrfs_item
),
1655 key
, btrfs_header_nritems(eb
),
1658 return generic_bin_search(eb
,
1659 offsetof(struct btrfs_node
, ptrs
),
1660 sizeof(struct btrfs_key_ptr
),
1661 key
, btrfs_header_nritems(eb
),
1665 int btrfs_bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1666 int level
, int *slot
)
1668 return bin_search(eb
, key
, level
, slot
);
1671 static void root_add_used(struct btrfs_root
*root
, u32 size
)
1673 spin_lock(&root
->accounting_lock
);
1674 btrfs_set_root_used(&root
->root_item
,
1675 btrfs_root_used(&root
->root_item
) + size
);
1676 spin_unlock(&root
->accounting_lock
);
1679 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
1681 spin_lock(&root
->accounting_lock
);
1682 btrfs_set_root_used(&root
->root_item
,
1683 btrfs_root_used(&root
->root_item
) - size
);
1684 spin_unlock(&root
->accounting_lock
);
1687 /* given a node and slot number, this reads the blocks it points to. The
1688 * extent buffer is returned with a reference taken (but unlocked).
1689 * NULL is returned on error.
1691 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
1692 struct extent_buffer
*parent
, int slot
)
1694 int level
= btrfs_header_level(parent
);
1697 if (slot
>= btrfs_header_nritems(parent
))
1702 return read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
1703 btrfs_level_size(root
, level
- 1),
1704 btrfs_node_ptr_generation(parent
, slot
));
1708 * node level balancing, used to make sure nodes are in proper order for
1709 * item deletion. We balance from the top down, so we have to make sure
1710 * that a deletion won't leave an node completely empty later on.
1712 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
1713 struct btrfs_root
*root
,
1714 struct btrfs_path
*path
, int level
)
1716 struct extent_buffer
*right
= NULL
;
1717 struct extent_buffer
*mid
;
1718 struct extent_buffer
*left
= NULL
;
1719 struct extent_buffer
*parent
= NULL
;
1723 int orig_slot
= path
->slots
[level
];
1729 mid
= path
->nodes
[level
];
1731 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
1732 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
1733 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1735 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1737 if (level
< BTRFS_MAX_LEVEL
- 1) {
1738 parent
= path
->nodes
[level
+ 1];
1739 pslot
= path
->slots
[level
+ 1];
1743 * deal with the case where there is only one pointer in the root
1744 * by promoting the node below to a root
1747 struct extent_buffer
*child
;
1749 if (btrfs_header_nritems(mid
) != 1)
1752 /* promote the child to a root */
1753 child
= read_node_slot(root
, mid
, 0);
1756 btrfs_std_error(root
->fs_info
, ret
);
1760 btrfs_tree_lock(child
);
1761 btrfs_set_lock_blocking(child
);
1762 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1764 btrfs_tree_unlock(child
);
1765 free_extent_buffer(child
);
1769 tree_mod_log_set_root_pointer(root
, child
, 1);
1770 rcu_assign_pointer(root
->node
, child
);
1772 add_root_to_dirty_list(root
);
1773 btrfs_tree_unlock(child
);
1775 path
->locks
[level
] = 0;
1776 path
->nodes
[level
] = NULL
;
1777 clean_tree_block(trans
, root
, mid
);
1778 btrfs_tree_unlock(mid
);
1779 /* once for the path */
1780 free_extent_buffer(mid
);
1782 root_sub_used(root
, mid
->len
);
1783 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1784 /* once for the root ptr */
1785 free_extent_buffer_stale(mid
);
1788 if (btrfs_header_nritems(mid
) >
1789 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
1792 left
= read_node_slot(root
, parent
, pslot
- 1);
1794 btrfs_tree_lock(left
);
1795 btrfs_set_lock_blocking(left
);
1796 wret
= btrfs_cow_block(trans
, root
, left
,
1797 parent
, pslot
- 1, &left
);
1803 right
= read_node_slot(root
, parent
, pslot
+ 1);
1805 btrfs_tree_lock(right
);
1806 btrfs_set_lock_blocking(right
);
1807 wret
= btrfs_cow_block(trans
, root
, right
,
1808 parent
, pslot
+ 1, &right
);
1815 /* first, try to make some room in the middle buffer */
1817 orig_slot
+= btrfs_header_nritems(left
);
1818 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1824 * then try to empty the right most buffer into the middle
1827 wret
= push_node_left(trans
, root
, mid
, right
, 1);
1828 if (wret
< 0 && wret
!= -ENOSPC
)
1830 if (btrfs_header_nritems(right
) == 0) {
1831 clean_tree_block(trans
, root
, right
);
1832 btrfs_tree_unlock(right
);
1833 del_ptr(root
, path
, level
+ 1, pslot
+ 1);
1834 root_sub_used(root
, right
->len
);
1835 btrfs_free_tree_block(trans
, root
, right
, 0, 1);
1836 free_extent_buffer_stale(right
);
1839 struct btrfs_disk_key right_key
;
1840 btrfs_node_key(right
, &right_key
, 0);
1841 tree_mod_log_set_node_key(root
->fs_info
, parent
,
1843 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1844 btrfs_mark_buffer_dirty(parent
);
1847 if (btrfs_header_nritems(mid
) == 1) {
1849 * we're not allowed to leave a node with one item in the
1850 * tree during a delete. A deletion from lower in the tree
1851 * could try to delete the only pointer in this node.
1852 * So, pull some keys from the left.
1853 * There has to be a left pointer at this point because
1854 * otherwise we would have pulled some pointers from the
1859 btrfs_std_error(root
->fs_info
, ret
);
1862 wret
= balance_node_right(trans
, root
, mid
, left
);
1868 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1874 if (btrfs_header_nritems(mid
) == 0) {
1875 clean_tree_block(trans
, root
, mid
);
1876 btrfs_tree_unlock(mid
);
1877 del_ptr(root
, path
, level
+ 1, pslot
);
1878 root_sub_used(root
, mid
->len
);
1879 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1880 free_extent_buffer_stale(mid
);
1883 /* update the parent key to reflect our changes */
1884 struct btrfs_disk_key mid_key
;
1885 btrfs_node_key(mid
, &mid_key
, 0);
1886 tree_mod_log_set_node_key(root
->fs_info
, parent
,
1888 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1889 btrfs_mark_buffer_dirty(parent
);
1892 /* update the path */
1894 if (btrfs_header_nritems(left
) > orig_slot
) {
1895 extent_buffer_get(left
);
1896 /* left was locked after cow */
1897 path
->nodes
[level
] = left
;
1898 path
->slots
[level
+ 1] -= 1;
1899 path
->slots
[level
] = orig_slot
;
1901 btrfs_tree_unlock(mid
);
1902 free_extent_buffer(mid
);
1905 orig_slot
-= btrfs_header_nritems(left
);
1906 path
->slots
[level
] = orig_slot
;
1909 /* double check we haven't messed things up */
1911 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1915 btrfs_tree_unlock(right
);
1916 free_extent_buffer(right
);
1919 if (path
->nodes
[level
] != left
)
1920 btrfs_tree_unlock(left
);
1921 free_extent_buffer(left
);
1926 /* Node balancing for insertion. Here we only split or push nodes around
1927 * when they are completely full. This is also done top down, so we
1928 * have to be pessimistic.
1930 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
1931 struct btrfs_root
*root
,
1932 struct btrfs_path
*path
, int level
)
1934 struct extent_buffer
*right
= NULL
;
1935 struct extent_buffer
*mid
;
1936 struct extent_buffer
*left
= NULL
;
1937 struct extent_buffer
*parent
= NULL
;
1941 int orig_slot
= path
->slots
[level
];
1946 mid
= path
->nodes
[level
];
1947 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1949 if (level
< BTRFS_MAX_LEVEL
- 1) {
1950 parent
= path
->nodes
[level
+ 1];
1951 pslot
= path
->slots
[level
+ 1];
1957 left
= read_node_slot(root
, parent
, pslot
- 1);
1959 /* first, try to make some room in the middle buffer */
1963 btrfs_tree_lock(left
);
1964 btrfs_set_lock_blocking(left
);
1966 left_nr
= btrfs_header_nritems(left
);
1967 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1970 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
1975 wret
= push_node_left(trans
, root
,
1982 struct btrfs_disk_key disk_key
;
1983 orig_slot
+= left_nr
;
1984 btrfs_node_key(mid
, &disk_key
, 0);
1985 tree_mod_log_set_node_key(root
->fs_info
, parent
,
1987 btrfs_set_node_key(parent
, &disk_key
, pslot
);
1988 btrfs_mark_buffer_dirty(parent
);
1989 if (btrfs_header_nritems(left
) > orig_slot
) {
1990 path
->nodes
[level
] = left
;
1991 path
->slots
[level
+ 1] -= 1;
1992 path
->slots
[level
] = orig_slot
;
1993 btrfs_tree_unlock(mid
);
1994 free_extent_buffer(mid
);
1997 btrfs_header_nritems(left
);
1998 path
->slots
[level
] = orig_slot
;
1999 btrfs_tree_unlock(left
);
2000 free_extent_buffer(left
);
2004 btrfs_tree_unlock(left
);
2005 free_extent_buffer(left
);
2007 right
= read_node_slot(root
, parent
, pslot
+ 1);
2010 * then try to empty the right most buffer into the middle
2015 btrfs_tree_lock(right
);
2016 btrfs_set_lock_blocking(right
);
2018 right_nr
= btrfs_header_nritems(right
);
2019 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
2022 ret
= btrfs_cow_block(trans
, root
, right
,
2028 wret
= balance_node_right(trans
, root
,
2035 struct btrfs_disk_key disk_key
;
2037 btrfs_node_key(right
, &disk_key
, 0);
2038 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2040 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
2041 btrfs_mark_buffer_dirty(parent
);
2043 if (btrfs_header_nritems(mid
) <= orig_slot
) {
2044 path
->nodes
[level
] = right
;
2045 path
->slots
[level
+ 1] += 1;
2046 path
->slots
[level
] = orig_slot
-
2047 btrfs_header_nritems(mid
);
2048 btrfs_tree_unlock(mid
);
2049 free_extent_buffer(mid
);
2051 btrfs_tree_unlock(right
);
2052 free_extent_buffer(right
);
2056 btrfs_tree_unlock(right
);
2057 free_extent_buffer(right
);
2063 * readahead one full node of leaves, finding things that are close
2064 * to the block in 'slot', and triggering ra on them.
2066 static void reada_for_search(struct btrfs_root
*root
,
2067 struct btrfs_path
*path
,
2068 int level
, int slot
, u64 objectid
)
2070 struct extent_buffer
*node
;
2071 struct btrfs_disk_key disk_key
;
2077 int direction
= path
->reada
;
2078 struct extent_buffer
*eb
;
2086 if (!path
->nodes
[level
])
2089 node
= path
->nodes
[level
];
2091 search
= btrfs_node_blockptr(node
, slot
);
2092 blocksize
= btrfs_level_size(root
, level
- 1);
2093 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
2095 free_extent_buffer(eb
);
2101 nritems
= btrfs_header_nritems(node
);
2105 if (direction
< 0) {
2109 } else if (direction
> 0) {
2114 if (path
->reada
< 0 && objectid
) {
2115 btrfs_node_key(node
, &disk_key
, nr
);
2116 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
2119 search
= btrfs_node_blockptr(node
, nr
);
2120 if ((search
<= target
&& target
- search
<= 65536) ||
2121 (search
> target
&& search
- target
<= 65536)) {
2122 gen
= btrfs_node_ptr_generation(node
, nr
);
2123 readahead_tree_block(root
, search
, blocksize
, gen
);
2127 if ((nread
> 65536 || nscan
> 32))
2133 * returns -EAGAIN if it had to drop the path, or zero if everything was in
2136 static noinline
int reada_for_balance(struct btrfs_root
*root
,
2137 struct btrfs_path
*path
, int level
)
2141 struct extent_buffer
*parent
;
2142 struct extent_buffer
*eb
;
2149 parent
= path
->nodes
[level
+ 1];
2153 nritems
= btrfs_header_nritems(parent
);
2154 slot
= path
->slots
[level
+ 1];
2155 blocksize
= btrfs_level_size(root
, level
);
2158 block1
= btrfs_node_blockptr(parent
, slot
- 1);
2159 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
2160 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
2162 * if we get -eagain from btrfs_buffer_uptodate, we
2163 * don't want to return eagain here. That will loop
2166 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2168 free_extent_buffer(eb
);
2170 if (slot
+ 1 < nritems
) {
2171 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
2172 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
2173 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
2174 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2176 free_extent_buffer(eb
);
2178 if (block1
|| block2
) {
2181 /* release the whole path */
2182 btrfs_release_path(path
);
2184 /* read the blocks */
2186 readahead_tree_block(root
, block1
, blocksize
, 0);
2188 readahead_tree_block(root
, block2
, blocksize
, 0);
2191 eb
= read_tree_block(root
, block1
, blocksize
, 0);
2192 free_extent_buffer(eb
);
2195 eb
= read_tree_block(root
, block2
, blocksize
, 0);
2196 free_extent_buffer(eb
);
2204 * when we walk down the tree, it is usually safe to unlock the higher layers
2205 * in the tree. The exceptions are when our path goes through slot 0, because
2206 * operations on the tree might require changing key pointers higher up in the
2209 * callers might also have set path->keep_locks, which tells this code to keep
2210 * the lock if the path points to the last slot in the block. This is part of
2211 * walking through the tree, and selecting the next slot in the higher block.
2213 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2214 * if lowest_unlock is 1, level 0 won't be unlocked
2216 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
2217 int lowest_unlock
, int min_write_lock_level
,
2218 int *write_lock_level
)
2221 int skip_level
= level
;
2223 struct extent_buffer
*t
;
2225 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2226 if (!path
->nodes
[i
])
2228 if (!path
->locks
[i
])
2230 if (!no_skips
&& path
->slots
[i
] == 0) {
2234 if (!no_skips
&& path
->keep_locks
) {
2237 nritems
= btrfs_header_nritems(t
);
2238 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
2243 if (skip_level
< i
&& i
>= lowest_unlock
)
2247 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
2248 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
2250 if (write_lock_level
&&
2251 i
> min_write_lock_level
&&
2252 i
<= *write_lock_level
) {
2253 *write_lock_level
= i
- 1;
2260 * This releases any locks held in the path starting at level and
2261 * going all the way up to the root.
2263 * btrfs_search_slot will keep the lock held on higher nodes in a few
2264 * corner cases, such as COW of the block at slot zero in the node. This
2265 * ignores those rules, and it should only be called when there are no
2266 * more updates to be done higher up in the tree.
2268 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
2272 if (path
->keep_locks
)
2275 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2276 if (!path
->nodes
[i
])
2278 if (!path
->locks
[i
])
2280 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
2286 * helper function for btrfs_search_slot. The goal is to find a block
2287 * in cache without setting the path to blocking. If we find the block
2288 * we return zero and the path is unchanged.
2290 * If we can't find the block, we set the path blocking and do some
2291 * reada. -EAGAIN is returned and the search must be repeated.
2294 read_block_for_search(struct btrfs_trans_handle
*trans
,
2295 struct btrfs_root
*root
, struct btrfs_path
*p
,
2296 struct extent_buffer
**eb_ret
, int level
, int slot
,
2297 struct btrfs_key
*key
, u64 time_seq
)
2302 struct extent_buffer
*b
= *eb_ret
;
2303 struct extent_buffer
*tmp
;
2306 blocknr
= btrfs_node_blockptr(b
, slot
);
2307 gen
= btrfs_node_ptr_generation(b
, slot
);
2308 blocksize
= btrfs_level_size(root
, level
- 1);
2310 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
2312 /* first we do an atomic uptodate check */
2313 if (btrfs_buffer_uptodate(tmp
, 0, 1) > 0) {
2314 if (btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
2316 * we found an up to date block without
2323 /* the pages were up to date, but we failed
2324 * the generation number check. Do a full
2325 * read for the generation number that is correct.
2326 * We must do this without dropping locks so
2327 * we can trust our generation number
2329 free_extent_buffer(tmp
);
2330 btrfs_set_path_blocking(p
);
2332 /* now we're allowed to do a blocking uptodate check */
2333 tmp
= read_tree_block(root
, blocknr
, blocksize
, gen
);
2334 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
, 0) > 0) {
2338 free_extent_buffer(tmp
);
2339 btrfs_release_path(p
);
2345 * reduce lock contention at high levels
2346 * of the btree by dropping locks before
2347 * we read. Don't release the lock on the current
2348 * level because we need to walk this node to figure
2349 * out which blocks to read.
2351 btrfs_unlock_up_safe(p
, level
+ 1);
2352 btrfs_set_path_blocking(p
);
2354 free_extent_buffer(tmp
);
2356 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
2358 btrfs_release_path(p
);
2361 tmp
= read_tree_block(root
, blocknr
, blocksize
, 0);
2364 * If the read above didn't mark this buffer up to date,
2365 * it will never end up being up to date. Set ret to EIO now
2366 * and give up so that our caller doesn't loop forever
2369 if (!btrfs_buffer_uptodate(tmp
, 0, 0))
2371 free_extent_buffer(tmp
);
2377 * helper function for btrfs_search_slot. This does all of the checks
2378 * for node-level blocks and does any balancing required based on
2381 * If no extra work was required, zero is returned. If we had to
2382 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2386 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
2387 struct btrfs_root
*root
, struct btrfs_path
*p
,
2388 struct extent_buffer
*b
, int level
, int ins_len
,
2389 int *write_lock_level
)
2392 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
2393 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
2396 if (*write_lock_level
< level
+ 1) {
2397 *write_lock_level
= level
+ 1;
2398 btrfs_release_path(p
);
2402 sret
= reada_for_balance(root
, p
, level
);
2406 btrfs_set_path_blocking(p
);
2407 sret
= split_node(trans
, root
, p
, level
);
2408 btrfs_clear_path_blocking(p
, NULL
, 0);
2415 b
= p
->nodes
[level
];
2416 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
2417 BTRFS_NODEPTRS_PER_BLOCK(root
) / 2) {
2420 if (*write_lock_level
< level
+ 1) {
2421 *write_lock_level
= level
+ 1;
2422 btrfs_release_path(p
);
2426 sret
= reada_for_balance(root
, p
, level
);
2430 btrfs_set_path_blocking(p
);
2431 sret
= balance_level(trans
, root
, p
, level
);
2432 btrfs_clear_path_blocking(p
, NULL
, 0);
2438 b
= p
->nodes
[level
];
2440 btrfs_release_path(p
);
2443 BUG_ON(btrfs_header_nritems(b
) == 1);
2454 * look for key in the tree. path is filled in with nodes along the way
2455 * if key is found, we return zero and you can find the item in the leaf
2456 * level of the path (level 0)
2458 * If the key isn't found, the path points to the slot where it should
2459 * be inserted, and 1 is returned. If there are other errors during the
2460 * search a negative error number is returned.
2462 * if ins_len > 0, nodes and leaves will be split as we walk down the
2463 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2466 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2467 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
2470 struct extent_buffer
*b
;
2475 int lowest_unlock
= 1;
2477 /* everything at write_lock_level or lower must be write locked */
2478 int write_lock_level
= 0;
2479 u8 lowest_level
= 0;
2480 int min_write_lock_level
;
2482 lowest_level
= p
->lowest_level
;
2483 WARN_ON(lowest_level
&& ins_len
> 0);
2484 WARN_ON(p
->nodes
[0] != NULL
);
2489 /* when we are removing items, we might have to go up to level
2490 * two as we update tree pointers Make sure we keep write
2491 * for those levels as well
2493 write_lock_level
= 2;
2494 } else if (ins_len
> 0) {
2496 * for inserting items, make sure we have a write lock on
2497 * level 1 so we can update keys
2499 write_lock_level
= 1;
2503 write_lock_level
= -1;
2505 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
2506 write_lock_level
= BTRFS_MAX_LEVEL
;
2508 min_write_lock_level
= write_lock_level
;
2512 * we try very hard to do read locks on the root
2514 root_lock
= BTRFS_READ_LOCK
;
2516 if (p
->search_commit_root
) {
2518 * the commit roots are read only
2519 * so we always do read locks
2521 b
= root
->commit_root
;
2522 extent_buffer_get(b
);
2523 level
= btrfs_header_level(b
);
2524 if (!p
->skip_locking
)
2525 btrfs_tree_read_lock(b
);
2527 if (p
->skip_locking
) {
2528 b
= btrfs_root_node(root
);
2529 level
= btrfs_header_level(b
);
2531 /* we don't know the level of the root node
2532 * until we actually have it read locked
2534 b
= btrfs_read_lock_root_node(root
);
2535 level
= btrfs_header_level(b
);
2536 if (level
<= write_lock_level
) {
2537 /* whoops, must trade for write lock */
2538 btrfs_tree_read_unlock(b
);
2539 free_extent_buffer(b
);
2540 b
= btrfs_lock_root_node(root
);
2541 root_lock
= BTRFS_WRITE_LOCK
;
2543 /* the level might have changed, check again */
2544 level
= btrfs_header_level(b
);
2548 p
->nodes
[level
] = b
;
2549 if (!p
->skip_locking
)
2550 p
->locks
[level
] = root_lock
;
2553 level
= btrfs_header_level(b
);
2556 * setup the path here so we can release it under lock
2557 * contention with the cow code
2561 * if we don't really need to cow this block
2562 * then we don't want to set the path blocking,
2563 * so we test it here
2565 if (!should_cow_block(trans
, root
, b
))
2568 btrfs_set_path_blocking(p
);
2571 * must have write locks on this node and the
2574 if (level
> write_lock_level
||
2575 (level
+ 1 > write_lock_level
&&
2576 level
+ 1 < BTRFS_MAX_LEVEL
&&
2577 p
->nodes
[level
+ 1])) {
2578 write_lock_level
= level
+ 1;
2579 btrfs_release_path(p
);
2583 err
= btrfs_cow_block(trans
, root
, b
,
2584 p
->nodes
[level
+ 1],
2585 p
->slots
[level
+ 1], &b
);
2592 BUG_ON(!cow
&& ins_len
);
2594 p
->nodes
[level
] = b
;
2595 btrfs_clear_path_blocking(p
, NULL
, 0);
2598 * we have a lock on b and as long as we aren't changing
2599 * the tree, there is no way to for the items in b to change.
2600 * It is safe to drop the lock on our parent before we
2601 * go through the expensive btree search on b.
2603 * If cow is true, then we might be changing slot zero,
2604 * which may require changing the parent. So, we can't
2605 * drop the lock until after we know which slot we're
2609 btrfs_unlock_up_safe(p
, level
+ 1);
2611 ret
= bin_search(b
, key
, level
, &slot
);
2615 if (ret
&& slot
> 0) {
2619 p
->slots
[level
] = slot
;
2620 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
2621 ins_len
, &write_lock_level
);
2628 b
= p
->nodes
[level
];
2629 slot
= p
->slots
[level
];
2632 * slot 0 is special, if we change the key
2633 * we have to update the parent pointer
2634 * which means we must have a write lock
2637 if (slot
== 0 && cow
&&
2638 write_lock_level
< level
+ 1) {
2639 write_lock_level
= level
+ 1;
2640 btrfs_release_path(p
);
2644 unlock_up(p
, level
, lowest_unlock
,
2645 min_write_lock_level
, &write_lock_level
);
2647 if (level
== lowest_level
) {
2653 err
= read_block_for_search(trans
, root
, p
,
2654 &b
, level
, slot
, key
, 0);
2662 if (!p
->skip_locking
) {
2663 level
= btrfs_header_level(b
);
2664 if (level
<= write_lock_level
) {
2665 err
= btrfs_try_tree_write_lock(b
);
2667 btrfs_set_path_blocking(p
);
2669 btrfs_clear_path_blocking(p
, b
,
2672 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
2674 err
= btrfs_try_tree_read_lock(b
);
2676 btrfs_set_path_blocking(p
);
2677 btrfs_tree_read_lock(b
);
2678 btrfs_clear_path_blocking(p
, b
,
2681 p
->locks
[level
] = BTRFS_READ_LOCK
;
2683 p
->nodes
[level
] = b
;
2686 p
->slots
[level
] = slot
;
2688 btrfs_leaf_free_space(root
, b
) < ins_len
) {
2689 if (write_lock_level
< 1) {
2690 write_lock_level
= 1;
2691 btrfs_release_path(p
);
2695 btrfs_set_path_blocking(p
);
2696 err
= split_leaf(trans
, root
, key
,
2697 p
, ins_len
, ret
== 0);
2698 btrfs_clear_path_blocking(p
, NULL
, 0);
2706 if (!p
->search_for_split
)
2707 unlock_up(p
, level
, lowest_unlock
,
2708 min_write_lock_level
, &write_lock_level
);
2715 * we don't really know what they plan on doing with the path
2716 * from here on, so for now just mark it as blocking
2718 if (!p
->leave_spinning
)
2719 btrfs_set_path_blocking(p
);
2721 btrfs_release_path(p
);
2726 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2727 * current state of the tree together with the operations recorded in the tree
2728 * modification log to search for the key in a previous version of this tree, as
2729 * denoted by the time_seq parameter.
2731 * Naturally, there is no support for insert, delete or cow operations.
2733 * The resulting path and return value will be set up as if we called
2734 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2736 int btrfs_search_old_slot(struct btrfs_root
*root
, struct btrfs_key
*key
,
2737 struct btrfs_path
*p
, u64 time_seq
)
2739 struct extent_buffer
*b
;
2744 int lowest_unlock
= 1;
2745 u8 lowest_level
= 0;
2747 lowest_level
= p
->lowest_level
;
2748 WARN_ON(p
->nodes
[0] != NULL
);
2750 if (p
->search_commit_root
) {
2752 return btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2756 b
= get_old_root(root
, time_seq
);
2757 level
= btrfs_header_level(b
);
2758 p
->locks
[level
] = BTRFS_READ_LOCK
;
2761 level
= btrfs_header_level(b
);
2762 p
->nodes
[level
] = b
;
2763 btrfs_clear_path_blocking(p
, NULL
, 0);
2766 * we have a lock on b and as long as we aren't changing
2767 * the tree, there is no way to for the items in b to change.
2768 * It is safe to drop the lock on our parent before we
2769 * go through the expensive btree search on b.
2771 btrfs_unlock_up_safe(p
, level
+ 1);
2773 ret
= bin_search(b
, key
, level
, &slot
);
2777 if (ret
&& slot
> 0) {
2781 p
->slots
[level
] = slot
;
2782 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2784 if (level
== lowest_level
) {
2790 err
= read_block_for_search(NULL
, root
, p
, &b
, level
,
2791 slot
, key
, time_seq
);
2799 level
= btrfs_header_level(b
);
2800 err
= btrfs_try_tree_read_lock(b
);
2802 btrfs_set_path_blocking(p
);
2803 btrfs_tree_read_lock(b
);
2804 btrfs_clear_path_blocking(p
, b
,
2807 b
= tree_mod_log_rewind(root
->fs_info
, b
, time_seq
);
2808 p
->locks
[level
] = BTRFS_READ_LOCK
;
2809 p
->nodes
[level
] = b
;
2811 p
->slots
[level
] = slot
;
2812 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2818 if (!p
->leave_spinning
)
2819 btrfs_set_path_blocking(p
);
2821 btrfs_release_path(p
);
2827 * helper to use instead of search slot if no exact match is needed but
2828 * instead the next or previous item should be returned.
2829 * When find_higher is true, the next higher item is returned, the next lower
2831 * When return_any and find_higher are both true, and no higher item is found,
2832 * return the next lower instead.
2833 * When return_any is true and find_higher is false, and no lower item is found,
2834 * return the next higher instead.
2835 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2838 int btrfs_search_slot_for_read(struct btrfs_root
*root
,
2839 struct btrfs_key
*key
, struct btrfs_path
*p
,
2840 int find_higher
, int return_any
)
2843 struct extent_buffer
*leaf
;
2846 ret
= btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2850 * a return value of 1 means the path is at the position where the
2851 * item should be inserted. Normally this is the next bigger item,
2852 * but in case the previous item is the last in a leaf, path points
2853 * to the first free slot in the previous leaf, i.e. at an invalid
2859 if (p
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2860 ret
= btrfs_next_leaf(root
, p
);
2866 * no higher item found, return the next
2871 btrfs_release_path(p
);
2875 if (p
->slots
[0] == 0) {
2876 ret
= btrfs_prev_leaf(root
, p
);
2880 p
->slots
[0] = btrfs_header_nritems(leaf
) - 1;
2886 * no lower item found, return the next
2891 btrfs_release_path(p
);
2901 * adjust the pointers going up the tree, starting at level
2902 * making sure the right key of each node is points to 'key'.
2903 * This is used after shifting pointers to the left, so it stops
2904 * fixing up pointers when a given leaf/node is not in slot 0 of the
2908 static void fixup_low_keys(struct btrfs_root
*root
, struct btrfs_path
*path
,
2909 struct btrfs_disk_key
*key
, int level
)
2912 struct extent_buffer
*t
;
2914 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2915 int tslot
= path
->slots
[i
];
2916 if (!path
->nodes
[i
])
2919 tree_mod_log_set_node_key(root
->fs_info
, t
, tslot
, 1);
2920 btrfs_set_node_key(t
, key
, tslot
);
2921 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
2930 * This function isn't completely safe. It's the caller's responsibility
2931 * that the new key won't break the order
2933 void btrfs_set_item_key_safe(struct btrfs_root
*root
, struct btrfs_path
*path
,
2934 struct btrfs_key
*new_key
)
2936 struct btrfs_disk_key disk_key
;
2937 struct extent_buffer
*eb
;
2940 eb
= path
->nodes
[0];
2941 slot
= path
->slots
[0];
2943 btrfs_item_key(eb
, &disk_key
, slot
- 1);
2944 BUG_ON(comp_keys(&disk_key
, new_key
) >= 0);
2946 if (slot
< btrfs_header_nritems(eb
) - 1) {
2947 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
2948 BUG_ON(comp_keys(&disk_key
, new_key
) <= 0);
2951 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
2952 btrfs_set_item_key(eb
, &disk_key
, slot
);
2953 btrfs_mark_buffer_dirty(eb
);
2955 fixup_low_keys(root
, path
, &disk_key
, 1);
2959 * try to push data from one node into the next node left in the
2962 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2963 * error, and > 0 if there was no room in the left hand block.
2965 static int push_node_left(struct btrfs_trans_handle
*trans
,
2966 struct btrfs_root
*root
, struct extent_buffer
*dst
,
2967 struct extent_buffer
*src
, int empty
)
2974 src_nritems
= btrfs_header_nritems(src
);
2975 dst_nritems
= btrfs_header_nritems(dst
);
2976 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
2977 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
2978 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
2980 if (!empty
&& src_nritems
<= 8)
2983 if (push_items
<= 0)
2987 push_items
= min(src_nritems
, push_items
);
2988 if (push_items
< src_nritems
) {
2989 /* leave at least 8 pointers in the node if
2990 * we aren't going to empty it
2992 if (src_nritems
- push_items
< 8) {
2993 if (push_items
<= 8)
2999 push_items
= min(src_nritems
- 8, push_items
);
3001 tree_mod_log_eb_copy(root
->fs_info
, dst
, src
, dst_nritems
, 0,
3003 copy_extent_buffer(dst
, src
,
3004 btrfs_node_key_ptr_offset(dst_nritems
),
3005 btrfs_node_key_ptr_offset(0),
3006 push_items
* sizeof(struct btrfs_key_ptr
));
3008 if (push_items
< src_nritems
) {
3010 * don't call tree_mod_log_eb_move here, key removal was already
3011 * fully logged by tree_mod_log_eb_copy above.
3013 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
3014 btrfs_node_key_ptr_offset(push_items
),
3015 (src_nritems
- push_items
) *
3016 sizeof(struct btrfs_key_ptr
));
3018 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3019 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3020 btrfs_mark_buffer_dirty(src
);
3021 btrfs_mark_buffer_dirty(dst
);
3027 * try to push data from one node into the next node right in the
3030 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3031 * error, and > 0 if there was no room in the right hand block.
3033 * this will only push up to 1/2 the contents of the left node over
3035 static int balance_node_right(struct btrfs_trans_handle
*trans
,
3036 struct btrfs_root
*root
,
3037 struct extent_buffer
*dst
,
3038 struct extent_buffer
*src
)
3046 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3047 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3049 src_nritems
= btrfs_header_nritems(src
);
3050 dst_nritems
= btrfs_header_nritems(dst
);
3051 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
3052 if (push_items
<= 0)
3055 if (src_nritems
< 4)
3058 max_push
= src_nritems
/ 2 + 1;
3059 /* don't try to empty the node */
3060 if (max_push
>= src_nritems
)
3063 if (max_push
< push_items
)
3064 push_items
= max_push
;
3066 tree_mod_log_eb_move(root
->fs_info
, dst
, push_items
, 0, dst_nritems
);
3067 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
3068 btrfs_node_key_ptr_offset(0),
3070 sizeof(struct btrfs_key_ptr
));
3072 tree_mod_log_eb_copy(root
->fs_info
, dst
, src
, 0,
3073 src_nritems
- push_items
, push_items
);
3074 copy_extent_buffer(dst
, src
,
3075 btrfs_node_key_ptr_offset(0),
3076 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
3077 push_items
* sizeof(struct btrfs_key_ptr
));
3079 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3080 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3082 btrfs_mark_buffer_dirty(src
);
3083 btrfs_mark_buffer_dirty(dst
);
3089 * helper function to insert a new root level in the tree.
3090 * A new node is allocated, and a single item is inserted to
3091 * point to the existing root
3093 * returns zero on success or < 0 on failure.
3095 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
3096 struct btrfs_root
*root
,
3097 struct btrfs_path
*path
, int level
, int log_removal
)
3100 struct extent_buffer
*lower
;
3101 struct extent_buffer
*c
;
3102 struct extent_buffer
*old
;
3103 struct btrfs_disk_key lower_key
;
3105 BUG_ON(path
->nodes
[level
]);
3106 BUG_ON(path
->nodes
[level
-1] != root
->node
);
3108 lower
= path
->nodes
[level
-1];
3110 btrfs_item_key(lower
, &lower_key
, 0);
3112 btrfs_node_key(lower
, &lower_key
, 0);
3114 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
3115 root
->root_key
.objectid
, &lower_key
,
3116 level
, root
->node
->start
, 0);
3120 root_add_used(root
, root
->nodesize
);
3122 memset_extent_buffer(c
, 0, 0, sizeof(struct btrfs_header
));
3123 btrfs_set_header_nritems(c
, 1);
3124 btrfs_set_header_level(c
, level
);
3125 btrfs_set_header_bytenr(c
, c
->start
);
3126 btrfs_set_header_generation(c
, trans
->transid
);
3127 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
3128 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
3130 write_extent_buffer(c
, root
->fs_info
->fsid
,
3131 (unsigned long)btrfs_header_fsid(c
),
3134 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
3135 (unsigned long)btrfs_header_chunk_tree_uuid(c
),
3138 btrfs_set_node_key(c
, &lower_key
, 0);
3139 btrfs_set_node_blockptr(c
, 0, lower
->start
);
3140 lower_gen
= btrfs_header_generation(lower
);
3141 WARN_ON(lower_gen
!= trans
->transid
);
3143 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
3145 btrfs_mark_buffer_dirty(c
);
3148 tree_mod_log_set_root_pointer(root
, c
, log_removal
);
3149 rcu_assign_pointer(root
->node
, c
);
3151 /* the super has an extra ref to root->node */
3152 free_extent_buffer(old
);
3154 add_root_to_dirty_list(root
);
3155 extent_buffer_get(c
);
3156 path
->nodes
[level
] = c
;
3157 path
->locks
[level
] = BTRFS_WRITE_LOCK
;
3158 path
->slots
[level
] = 0;
3163 * worker function to insert a single pointer in a node.
3164 * the node should have enough room for the pointer already
3166 * slot and level indicate where you want the key to go, and
3167 * blocknr is the block the key points to.
3169 static void insert_ptr(struct btrfs_trans_handle
*trans
,
3170 struct btrfs_root
*root
, struct btrfs_path
*path
,
3171 struct btrfs_disk_key
*key
, u64 bytenr
,
3172 int slot
, int level
)
3174 struct extent_buffer
*lower
;
3178 BUG_ON(!path
->nodes
[level
]);
3179 btrfs_assert_tree_locked(path
->nodes
[level
]);
3180 lower
= path
->nodes
[level
];
3181 nritems
= btrfs_header_nritems(lower
);
3182 BUG_ON(slot
> nritems
);
3183 BUG_ON(nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
));
3184 if (slot
!= nritems
) {
3186 tree_mod_log_eb_move(root
->fs_info
, lower
, slot
+ 1,
3187 slot
, nritems
- slot
);
3188 memmove_extent_buffer(lower
,
3189 btrfs_node_key_ptr_offset(slot
+ 1),
3190 btrfs_node_key_ptr_offset(slot
),
3191 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
3194 ret
= tree_mod_log_insert_key(root
->fs_info
, lower
, slot
,
3198 btrfs_set_node_key(lower
, key
, slot
);
3199 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
3200 WARN_ON(trans
->transid
== 0);
3201 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
3202 btrfs_set_header_nritems(lower
, nritems
+ 1);
3203 btrfs_mark_buffer_dirty(lower
);
3207 * split the node at the specified level in path in two.
3208 * The path is corrected to point to the appropriate node after the split
3210 * Before splitting this tries to make some room in the node by pushing
3211 * left and right, if either one works, it returns right away.
3213 * returns 0 on success and < 0 on failure
3215 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
3216 struct btrfs_root
*root
,
3217 struct btrfs_path
*path
, int level
)
3219 struct extent_buffer
*c
;
3220 struct extent_buffer
*split
;
3221 struct btrfs_disk_key disk_key
;
3226 c
= path
->nodes
[level
];
3227 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
3228 if (c
== root
->node
) {
3230 * trying to split the root, lets make a new one
3232 * tree mod log: We pass 0 as log_removal parameter to
3233 * insert_new_root, because that root buffer will be kept as a
3234 * normal node. We are going to log removal of half of the
3235 * elements below with tree_mod_log_eb_copy. We're holding a
3236 * tree lock on the buffer, which is why we cannot race with
3237 * other tree_mod_log users.
3239 ret
= insert_new_root(trans
, root
, path
, level
+ 1, 0);
3243 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
3244 c
= path
->nodes
[level
];
3245 if (!ret
&& btrfs_header_nritems(c
) <
3246 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
3252 c_nritems
= btrfs_header_nritems(c
);
3253 mid
= (c_nritems
+ 1) / 2;
3254 btrfs_node_key(c
, &disk_key
, mid
);
3256 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
3257 root
->root_key
.objectid
,
3258 &disk_key
, level
, c
->start
, 0);
3260 return PTR_ERR(split
);
3262 root_add_used(root
, root
->nodesize
);
3264 memset_extent_buffer(split
, 0, 0, sizeof(struct btrfs_header
));
3265 btrfs_set_header_level(split
, btrfs_header_level(c
));
3266 btrfs_set_header_bytenr(split
, split
->start
);
3267 btrfs_set_header_generation(split
, trans
->transid
);
3268 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
3269 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
3270 write_extent_buffer(split
, root
->fs_info
->fsid
,
3271 (unsigned long)btrfs_header_fsid(split
),
3273 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
3274 (unsigned long)btrfs_header_chunk_tree_uuid(split
),
3277 tree_mod_log_eb_copy(root
->fs_info
, split
, c
, 0, mid
, c_nritems
- mid
);
3278 copy_extent_buffer(split
, c
,
3279 btrfs_node_key_ptr_offset(0),
3280 btrfs_node_key_ptr_offset(mid
),
3281 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
3282 btrfs_set_header_nritems(split
, c_nritems
- mid
);
3283 btrfs_set_header_nritems(c
, mid
);
3286 btrfs_mark_buffer_dirty(c
);
3287 btrfs_mark_buffer_dirty(split
);
3289 insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
3290 path
->slots
[level
+ 1] + 1, level
+ 1);
3292 if (path
->slots
[level
] >= mid
) {
3293 path
->slots
[level
] -= mid
;
3294 btrfs_tree_unlock(c
);
3295 free_extent_buffer(c
);
3296 path
->nodes
[level
] = split
;
3297 path
->slots
[level
+ 1] += 1;
3299 btrfs_tree_unlock(split
);
3300 free_extent_buffer(split
);
3306 * how many bytes are required to store the items in a leaf. start
3307 * and nr indicate which items in the leaf to check. This totals up the
3308 * space used both by the item structs and the item data
3310 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
3312 struct btrfs_item
*start_item
;
3313 struct btrfs_item
*end_item
;
3314 struct btrfs_map_token token
;
3316 int nritems
= btrfs_header_nritems(l
);
3317 int end
= min(nritems
, start
+ nr
) - 1;
3321 btrfs_init_map_token(&token
);
3322 start_item
= btrfs_item_nr(l
, start
);
3323 end_item
= btrfs_item_nr(l
, end
);
3324 data_len
= btrfs_token_item_offset(l
, start_item
, &token
) +
3325 btrfs_token_item_size(l
, start_item
, &token
);
3326 data_len
= data_len
- btrfs_token_item_offset(l
, end_item
, &token
);
3327 data_len
+= sizeof(struct btrfs_item
) * nr
;
3328 WARN_ON(data_len
< 0);
3333 * The space between the end of the leaf items and
3334 * the start of the leaf data. IOW, how much room
3335 * the leaf has left for both items and data
3337 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
3338 struct extent_buffer
*leaf
)
3340 int nritems
= btrfs_header_nritems(leaf
);
3342 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
3344 printk(KERN_CRIT
"leaf free space ret %d, leaf data size %lu, "
3345 "used %d nritems %d\n",
3346 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
3347 leaf_space_used(leaf
, 0, nritems
), nritems
);
3353 * min slot controls the lowest index we're willing to push to the
3354 * right. We'll push up to and including min_slot, but no lower
3356 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
3357 struct btrfs_root
*root
,
3358 struct btrfs_path
*path
,
3359 int data_size
, int empty
,
3360 struct extent_buffer
*right
,
3361 int free_space
, u32 left_nritems
,
3364 struct extent_buffer
*left
= path
->nodes
[0];
3365 struct extent_buffer
*upper
= path
->nodes
[1];
3366 struct btrfs_map_token token
;
3367 struct btrfs_disk_key disk_key
;
3372 struct btrfs_item
*item
;
3378 btrfs_init_map_token(&token
);
3383 nr
= max_t(u32
, 1, min_slot
);
3385 if (path
->slots
[0] >= left_nritems
)
3386 push_space
+= data_size
;
3388 slot
= path
->slots
[1];
3389 i
= left_nritems
- 1;
3391 item
= btrfs_item_nr(left
, i
);
3393 if (!empty
&& push_items
> 0) {
3394 if (path
->slots
[0] > i
)
3396 if (path
->slots
[0] == i
) {
3397 int space
= btrfs_leaf_free_space(root
, left
);
3398 if (space
+ push_space
* 2 > free_space
)
3403 if (path
->slots
[0] == i
)
3404 push_space
+= data_size
;
3406 this_item_size
= btrfs_item_size(left
, item
);
3407 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3411 push_space
+= this_item_size
+ sizeof(*item
);
3417 if (push_items
== 0)
3420 WARN_ON(!empty
&& push_items
== left_nritems
);
3422 /* push left to right */
3423 right_nritems
= btrfs_header_nritems(right
);
3425 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
3426 push_space
-= leaf_data_end(root
, left
);
3428 /* make room in the right data area */
3429 data_end
= leaf_data_end(root
, right
);
3430 memmove_extent_buffer(right
,
3431 btrfs_leaf_data(right
) + data_end
- push_space
,
3432 btrfs_leaf_data(right
) + data_end
,
3433 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
3435 /* copy from the left data area */
3436 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
3437 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3438 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
3441 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
3442 btrfs_item_nr_offset(0),
3443 right_nritems
* sizeof(struct btrfs_item
));
3445 /* copy the items from left to right */
3446 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
3447 btrfs_item_nr_offset(left_nritems
- push_items
),
3448 push_items
* sizeof(struct btrfs_item
));
3450 /* update the item pointers */
3451 right_nritems
+= push_items
;
3452 btrfs_set_header_nritems(right
, right_nritems
);
3453 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3454 for (i
= 0; i
< right_nritems
; i
++) {
3455 item
= btrfs_item_nr(right
, i
);
3456 push_space
-= btrfs_token_item_size(right
, item
, &token
);
3457 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3460 left_nritems
-= push_items
;
3461 btrfs_set_header_nritems(left
, left_nritems
);
3464 btrfs_mark_buffer_dirty(left
);
3466 clean_tree_block(trans
, root
, left
);
3468 btrfs_mark_buffer_dirty(right
);
3470 btrfs_item_key(right
, &disk_key
, 0);
3471 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
3472 btrfs_mark_buffer_dirty(upper
);
3474 /* then fixup the leaf pointer in the path */
3475 if (path
->slots
[0] >= left_nritems
) {
3476 path
->slots
[0] -= left_nritems
;
3477 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
3478 clean_tree_block(trans
, root
, path
->nodes
[0]);
3479 btrfs_tree_unlock(path
->nodes
[0]);
3480 free_extent_buffer(path
->nodes
[0]);
3481 path
->nodes
[0] = right
;
3482 path
->slots
[1] += 1;
3484 btrfs_tree_unlock(right
);
3485 free_extent_buffer(right
);
3490 btrfs_tree_unlock(right
);
3491 free_extent_buffer(right
);
3496 * push some data in the path leaf to the right, trying to free up at
3497 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3499 * returns 1 if the push failed because the other node didn't have enough
3500 * room, 0 if everything worked out and < 0 if there were major errors.
3502 * this will push starting from min_slot to the end of the leaf. It won't
3503 * push any slot lower than min_slot
3505 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
3506 *root
, struct btrfs_path
*path
,
3507 int min_data_size
, int data_size
,
3508 int empty
, u32 min_slot
)
3510 struct extent_buffer
*left
= path
->nodes
[0];
3511 struct extent_buffer
*right
;
3512 struct extent_buffer
*upper
;
3518 if (!path
->nodes
[1])
3521 slot
= path
->slots
[1];
3522 upper
= path
->nodes
[1];
3523 if (slot
>= btrfs_header_nritems(upper
) - 1)
3526 btrfs_assert_tree_locked(path
->nodes
[1]);
3528 right
= read_node_slot(root
, upper
, slot
+ 1);
3532 btrfs_tree_lock(right
);
3533 btrfs_set_lock_blocking(right
);
3535 free_space
= btrfs_leaf_free_space(root
, right
);
3536 if (free_space
< data_size
)
3539 /* cow and double check */
3540 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
3545 free_space
= btrfs_leaf_free_space(root
, right
);
3546 if (free_space
< data_size
)
3549 left_nritems
= btrfs_header_nritems(left
);
3550 if (left_nritems
== 0)
3553 return __push_leaf_right(trans
, root
, path
, min_data_size
, empty
,
3554 right
, free_space
, left_nritems
, min_slot
);
3556 btrfs_tree_unlock(right
);
3557 free_extent_buffer(right
);
3562 * push some data in the path leaf to the left, trying to free up at
3563 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3565 * max_slot can put a limit on how far into the leaf we'll push items. The
3566 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3569 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
3570 struct btrfs_root
*root
,
3571 struct btrfs_path
*path
, int data_size
,
3572 int empty
, struct extent_buffer
*left
,
3573 int free_space
, u32 right_nritems
,
3576 struct btrfs_disk_key disk_key
;
3577 struct extent_buffer
*right
= path
->nodes
[0];
3581 struct btrfs_item
*item
;
3582 u32 old_left_nritems
;
3586 u32 old_left_item_size
;
3587 struct btrfs_map_token token
;
3589 btrfs_init_map_token(&token
);
3592 nr
= min(right_nritems
, max_slot
);
3594 nr
= min(right_nritems
- 1, max_slot
);
3596 for (i
= 0; i
< nr
; i
++) {
3597 item
= btrfs_item_nr(right
, i
);
3599 if (!empty
&& push_items
> 0) {
3600 if (path
->slots
[0] < i
)
3602 if (path
->slots
[0] == i
) {
3603 int space
= btrfs_leaf_free_space(root
, right
);
3604 if (space
+ push_space
* 2 > free_space
)
3609 if (path
->slots
[0] == i
)
3610 push_space
+= data_size
;
3612 this_item_size
= btrfs_item_size(right
, item
);
3613 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3617 push_space
+= this_item_size
+ sizeof(*item
);
3620 if (push_items
== 0) {
3624 if (!empty
&& push_items
== btrfs_header_nritems(right
))
3627 /* push data from right to left */
3628 copy_extent_buffer(left
, right
,
3629 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
3630 btrfs_item_nr_offset(0),
3631 push_items
* sizeof(struct btrfs_item
));
3633 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
3634 btrfs_item_offset_nr(right
, push_items
- 1);
3636 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
3637 leaf_data_end(root
, left
) - push_space
,
3638 btrfs_leaf_data(right
) +
3639 btrfs_item_offset_nr(right
, push_items
- 1),
3641 old_left_nritems
= btrfs_header_nritems(left
);
3642 BUG_ON(old_left_nritems
<= 0);
3644 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
3645 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
3648 item
= btrfs_item_nr(left
, i
);
3650 ioff
= btrfs_token_item_offset(left
, item
, &token
);
3651 btrfs_set_token_item_offset(left
, item
,
3652 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
),
3655 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
3657 /* fixup right node */
3658 if (push_items
> right_nritems
)
3659 WARN(1, KERN_CRIT
"push items %d nr %u\n", push_items
,
3662 if (push_items
< right_nritems
) {
3663 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
3664 leaf_data_end(root
, right
);
3665 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
3666 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3667 btrfs_leaf_data(right
) +
3668 leaf_data_end(root
, right
), push_space
);
3670 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
3671 btrfs_item_nr_offset(push_items
),
3672 (btrfs_header_nritems(right
) - push_items
) *
3673 sizeof(struct btrfs_item
));
3675 right_nritems
-= push_items
;
3676 btrfs_set_header_nritems(right
, right_nritems
);
3677 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3678 for (i
= 0; i
< right_nritems
; i
++) {
3679 item
= btrfs_item_nr(right
, i
);
3681 push_space
= push_space
- btrfs_token_item_size(right
,
3683 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3686 btrfs_mark_buffer_dirty(left
);
3688 btrfs_mark_buffer_dirty(right
);
3690 clean_tree_block(trans
, root
, right
);
3692 btrfs_item_key(right
, &disk_key
, 0);
3693 fixup_low_keys(root
, path
, &disk_key
, 1);
3695 /* then fixup the leaf pointer in the path */
3696 if (path
->slots
[0] < push_items
) {
3697 path
->slots
[0] += old_left_nritems
;
3698 btrfs_tree_unlock(path
->nodes
[0]);
3699 free_extent_buffer(path
->nodes
[0]);
3700 path
->nodes
[0] = left
;
3701 path
->slots
[1] -= 1;
3703 btrfs_tree_unlock(left
);
3704 free_extent_buffer(left
);
3705 path
->slots
[0] -= push_items
;
3707 BUG_ON(path
->slots
[0] < 0);
3710 btrfs_tree_unlock(left
);
3711 free_extent_buffer(left
);
3716 * push some data in the path leaf to the left, trying to free up at
3717 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3719 * max_slot can put a limit on how far into the leaf we'll push items. The
3720 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3723 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
3724 *root
, struct btrfs_path
*path
, int min_data_size
,
3725 int data_size
, int empty
, u32 max_slot
)
3727 struct extent_buffer
*right
= path
->nodes
[0];
3728 struct extent_buffer
*left
;
3734 slot
= path
->slots
[1];
3737 if (!path
->nodes
[1])
3740 right_nritems
= btrfs_header_nritems(right
);
3741 if (right_nritems
== 0)
3744 btrfs_assert_tree_locked(path
->nodes
[1]);
3746 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
3750 btrfs_tree_lock(left
);
3751 btrfs_set_lock_blocking(left
);
3753 free_space
= btrfs_leaf_free_space(root
, left
);
3754 if (free_space
< data_size
) {
3759 /* cow and double check */
3760 ret
= btrfs_cow_block(trans
, root
, left
,
3761 path
->nodes
[1], slot
- 1, &left
);
3763 /* we hit -ENOSPC, but it isn't fatal here */
3769 free_space
= btrfs_leaf_free_space(root
, left
);
3770 if (free_space
< data_size
) {
3775 return __push_leaf_left(trans
, root
, path
, min_data_size
,
3776 empty
, left
, free_space
, right_nritems
,
3779 btrfs_tree_unlock(left
);
3780 free_extent_buffer(left
);
3785 * split the path's leaf in two, making sure there is at least data_size
3786 * available for the resulting leaf level of the path.
3788 static noinline
void copy_for_split(struct btrfs_trans_handle
*trans
,
3789 struct btrfs_root
*root
,
3790 struct btrfs_path
*path
,
3791 struct extent_buffer
*l
,
3792 struct extent_buffer
*right
,
3793 int slot
, int mid
, int nritems
)
3798 struct btrfs_disk_key disk_key
;
3799 struct btrfs_map_token token
;
3801 btrfs_init_map_token(&token
);
3803 nritems
= nritems
- mid
;
3804 btrfs_set_header_nritems(right
, nritems
);
3805 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
3807 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
3808 btrfs_item_nr_offset(mid
),
3809 nritems
* sizeof(struct btrfs_item
));
3811 copy_extent_buffer(right
, l
,
3812 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
3813 data_copy_size
, btrfs_leaf_data(l
) +
3814 leaf_data_end(root
, l
), data_copy_size
);
3816 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
3817 btrfs_item_end_nr(l
, mid
);
3819 for (i
= 0; i
< nritems
; i
++) {
3820 struct btrfs_item
*item
= btrfs_item_nr(right
, i
);
3823 ioff
= btrfs_token_item_offset(right
, item
, &token
);
3824 btrfs_set_token_item_offset(right
, item
,
3825 ioff
+ rt_data_off
, &token
);
3828 btrfs_set_header_nritems(l
, mid
);
3829 btrfs_item_key(right
, &disk_key
, 0);
3830 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
3831 path
->slots
[1] + 1, 1);
3833 btrfs_mark_buffer_dirty(right
);
3834 btrfs_mark_buffer_dirty(l
);
3835 BUG_ON(path
->slots
[0] != slot
);
3838 btrfs_tree_unlock(path
->nodes
[0]);
3839 free_extent_buffer(path
->nodes
[0]);
3840 path
->nodes
[0] = right
;
3841 path
->slots
[0] -= mid
;
3842 path
->slots
[1] += 1;
3844 btrfs_tree_unlock(right
);
3845 free_extent_buffer(right
);
3848 BUG_ON(path
->slots
[0] < 0);
3852 * double splits happen when we need to insert a big item in the middle
3853 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3854 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3857 * We avoid this by trying to push the items on either side of our target
3858 * into the adjacent leaves. If all goes well we can avoid the double split
3861 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
3862 struct btrfs_root
*root
,
3863 struct btrfs_path
*path
,
3871 slot
= path
->slots
[0];
3874 * try to push all the items after our slot into the
3877 ret
= push_leaf_right(trans
, root
, path
, 1, data_size
, 0, slot
);
3884 nritems
= btrfs_header_nritems(path
->nodes
[0]);
3886 * our goal is to get our slot at the start or end of a leaf. If
3887 * we've done so we're done
3889 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
3892 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
3895 /* try to push all the items before our slot into the next leaf */
3896 slot
= path
->slots
[0];
3897 ret
= push_leaf_left(trans
, root
, path
, 1, data_size
, 0, slot
);
3910 * split the path's leaf in two, making sure there is at least data_size
3911 * available for the resulting leaf level of the path.
3913 * returns 0 if all went well and < 0 on failure.
3915 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
3916 struct btrfs_root
*root
,
3917 struct btrfs_key
*ins_key
,
3918 struct btrfs_path
*path
, int data_size
,
3921 struct btrfs_disk_key disk_key
;
3922 struct extent_buffer
*l
;
3926 struct extent_buffer
*right
;
3930 int num_doubles
= 0;
3931 int tried_avoid_double
= 0;
3934 slot
= path
->slots
[0];
3935 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
3936 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(root
))
3939 /* first try to make some room by pushing left and right */
3941 wret
= push_leaf_right(trans
, root
, path
, data_size
,
3946 wret
= push_leaf_left(trans
, root
, path
, data_size
,
3947 data_size
, 0, (u32
)-1);
3953 /* did the pushes work? */
3954 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
3958 if (!path
->nodes
[1]) {
3959 ret
= insert_new_root(trans
, root
, path
, 1, 1);
3966 slot
= path
->slots
[0];
3967 nritems
= btrfs_header_nritems(l
);
3968 mid
= (nritems
+ 1) / 2;
3972 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
3973 BTRFS_LEAF_DATA_SIZE(root
)) {
3974 if (slot
>= nritems
) {
3978 if (mid
!= nritems
&&
3979 leaf_space_used(l
, mid
, nritems
- mid
) +
3980 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
3981 if (data_size
&& !tried_avoid_double
)
3982 goto push_for_double
;
3988 if (leaf_space_used(l
, 0, mid
) + data_size
>
3989 BTRFS_LEAF_DATA_SIZE(root
)) {
3990 if (!extend
&& data_size
&& slot
== 0) {
3992 } else if ((extend
|| !data_size
) && slot
== 0) {
3996 if (mid
!= nritems
&&
3997 leaf_space_used(l
, mid
, nritems
- mid
) +
3998 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
3999 if (data_size
&& !tried_avoid_double
)
4000 goto push_for_double
;
4008 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
4010 btrfs_item_key(l
, &disk_key
, mid
);
4012 right
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
4013 root
->root_key
.objectid
,
4014 &disk_key
, 0, l
->start
, 0);
4016 return PTR_ERR(right
);
4018 root_add_used(root
, root
->leafsize
);
4020 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
4021 btrfs_set_header_bytenr(right
, right
->start
);
4022 btrfs_set_header_generation(right
, trans
->transid
);
4023 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
4024 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
4025 btrfs_set_header_level(right
, 0);
4026 write_extent_buffer(right
, root
->fs_info
->fsid
,
4027 (unsigned long)btrfs_header_fsid(right
),
4030 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
4031 (unsigned long)btrfs_header_chunk_tree_uuid(right
),
4036 btrfs_set_header_nritems(right
, 0);
4037 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
4038 path
->slots
[1] + 1, 1);
4039 btrfs_tree_unlock(path
->nodes
[0]);
4040 free_extent_buffer(path
->nodes
[0]);
4041 path
->nodes
[0] = right
;
4043 path
->slots
[1] += 1;
4045 btrfs_set_header_nritems(right
, 0);
4046 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
4048 btrfs_tree_unlock(path
->nodes
[0]);
4049 free_extent_buffer(path
->nodes
[0]);
4050 path
->nodes
[0] = right
;
4052 if (path
->slots
[1] == 0)
4053 fixup_low_keys(root
, path
, &disk_key
, 1);
4055 btrfs_mark_buffer_dirty(right
);
4059 copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
4062 BUG_ON(num_doubles
!= 0);
4070 push_for_double_split(trans
, root
, path
, data_size
);
4071 tried_avoid_double
= 1;
4072 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
4077 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
4078 struct btrfs_root
*root
,
4079 struct btrfs_path
*path
, int ins_len
)
4081 struct btrfs_key key
;
4082 struct extent_buffer
*leaf
;
4083 struct btrfs_file_extent_item
*fi
;
4088 leaf
= path
->nodes
[0];
4089 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4091 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
4092 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
4094 if (btrfs_leaf_free_space(root
, leaf
) >= ins_len
)
4097 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4098 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4099 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4100 struct btrfs_file_extent_item
);
4101 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
4103 btrfs_release_path(path
);
4105 path
->keep_locks
= 1;
4106 path
->search_for_split
= 1;
4107 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4108 path
->search_for_split
= 0;
4113 leaf
= path
->nodes
[0];
4114 /* if our item isn't there or got smaller, return now */
4115 if (ret
> 0 || item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
4118 /* the leaf has changed, it now has room. return now */
4119 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= ins_len
)
4122 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4123 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4124 struct btrfs_file_extent_item
);
4125 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
4129 btrfs_set_path_blocking(path
);
4130 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
4134 path
->keep_locks
= 0;
4135 btrfs_unlock_up_safe(path
, 1);
4138 path
->keep_locks
= 0;
4142 static noinline
int split_item(struct btrfs_trans_handle
*trans
,
4143 struct btrfs_root
*root
,
4144 struct btrfs_path
*path
,
4145 struct btrfs_key
*new_key
,
4146 unsigned long split_offset
)
4148 struct extent_buffer
*leaf
;
4149 struct btrfs_item
*item
;
4150 struct btrfs_item
*new_item
;
4156 struct btrfs_disk_key disk_key
;
4158 leaf
= path
->nodes
[0];
4159 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
4161 btrfs_set_path_blocking(path
);
4163 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
4164 orig_offset
= btrfs_item_offset(leaf
, item
);
4165 item_size
= btrfs_item_size(leaf
, item
);
4167 buf
= kmalloc(item_size
, GFP_NOFS
);
4171 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
4172 path
->slots
[0]), item_size
);
4174 slot
= path
->slots
[0] + 1;
4175 nritems
= btrfs_header_nritems(leaf
);
4176 if (slot
!= nritems
) {
4177 /* shift the items */
4178 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
4179 btrfs_item_nr_offset(slot
),
4180 (nritems
- slot
) * sizeof(struct btrfs_item
));
4183 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
4184 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4186 new_item
= btrfs_item_nr(leaf
, slot
);
4188 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
4189 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
4191 btrfs_set_item_offset(leaf
, item
,
4192 orig_offset
+ item_size
- split_offset
);
4193 btrfs_set_item_size(leaf
, item
, split_offset
);
4195 btrfs_set_header_nritems(leaf
, nritems
+ 1);
4197 /* write the data for the start of the original item */
4198 write_extent_buffer(leaf
, buf
,
4199 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4202 /* write the data for the new item */
4203 write_extent_buffer(leaf
, buf
+ split_offset
,
4204 btrfs_item_ptr_offset(leaf
, slot
),
4205 item_size
- split_offset
);
4206 btrfs_mark_buffer_dirty(leaf
);
4208 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < 0);
4214 * This function splits a single item into two items,
4215 * giving 'new_key' to the new item and splitting the
4216 * old one at split_offset (from the start of the item).
4218 * The path may be released by this operation. After
4219 * the split, the path is pointing to the old item. The
4220 * new item is going to be in the same node as the old one.
4222 * Note, the item being split must be smaller enough to live alone on
4223 * a tree block with room for one extra struct btrfs_item
4225 * This allows us to split the item in place, keeping a lock on the
4226 * leaf the entire time.
4228 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
4229 struct btrfs_root
*root
,
4230 struct btrfs_path
*path
,
4231 struct btrfs_key
*new_key
,
4232 unsigned long split_offset
)
4235 ret
= setup_leaf_for_split(trans
, root
, path
,
4236 sizeof(struct btrfs_item
));
4240 ret
= split_item(trans
, root
, path
, new_key
, split_offset
);
4245 * This function duplicate a item, giving 'new_key' to the new item.
4246 * It guarantees both items live in the same tree leaf and the new item
4247 * is contiguous with the original item.
4249 * This allows us to split file extent in place, keeping a lock on the
4250 * leaf the entire time.
4252 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
4253 struct btrfs_root
*root
,
4254 struct btrfs_path
*path
,
4255 struct btrfs_key
*new_key
)
4257 struct extent_buffer
*leaf
;
4261 leaf
= path
->nodes
[0];
4262 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4263 ret
= setup_leaf_for_split(trans
, root
, path
,
4264 item_size
+ sizeof(struct btrfs_item
));
4269 setup_items_for_insert(root
, path
, new_key
, &item_size
,
4270 item_size
, item_size
+
4271 sizeof(struct btrfs_item
), 1);
4272 leaf
= path
->nodes
[0];
4273 memcpy_extent_buffer(leaf
,
4274 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4275 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
4281 * make the item pointed to by the path smaller. new_size indicates
4282 * how small to make it, and from_end tells us if we just chop bytes
4283 * off the end of the item or if we shift the item to chop bytes off
4286 void btrfs_truncate_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
4287 u32 new_size
, int from_end
)
4290 struct extent_buffer
*leaf
;
4291 struct btrfs_item
*item
;
4293 unsigned int data_end
;
4294 unsigned int old_data_start
;
4295 unsigned int old_size
;
4296 unsigned int size_diff
;
4298 struct btrfs_map_token token
;
4300 btrfs_init_map_token(&token
);
4302 leaf
= path
->nodes
[0];
4303 slot
= path
->slots
[0];
4305 old_size
= btrfs_item_size_nr(leaf
, slot
);
4306 if (old_size
== new_size
)
4309 nritems
= btrfs_header_nritems(leaf
);
4310 data_end
= leaf_data_end(root
, leaf
);
4312 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
4314 size_diff
= old_size
- new_size
;
4317 BUG_ON(slot
>= nritems
);
4320 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4322 /* first correct the data pointers */
4323 for (i
= slot
; i
< nritems
; i
++) {
4325 item
= btrfs_item_nr(leaf
, i
);
4327 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4328 btrfs_set_token_item_offset(leaf
, item
,
4329 ioff
+ size_diff
, &token
);
4332 /* shift the data */
4334 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4335 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4336 data_end
, old_data_start
+ new_size
- data_end
);
4338 struct btrfs_disk_key disk_key
;
4341 btrfs_item_key(leaf
, &disk_key
, slot
);
4343 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
4345 struct btrfs_file_extent_item
*fi
;
4347 fi
= btrfs_item_ptr(leaf
, slot
,
4348 struct btrfs_file_extent_item
);
4349 fi
= (struct btrfs_file_extent_item
*)(
4350 (unsigned long)fi
- size_diff
);
4352 if (btrfs_file_extent_type(leaf
, fi
) ==
4353 BTRFS_FILE_EXTENT_INLINE
) {
4354 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
4355 memmove_extent_buffer(leaf
, ptr
,
4357 offsetof(struct btrfs_file_extent_item
,
4362 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4363 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4364 data_end
, old_data_start
- data_end
);
4366 offset
= btrfs_disk_key_offset(&disk_key
);
4367 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
4368 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4370 fixup_low_keys(root
, path
, &disk_key
, 1);
4373 item
= btrfs_item_nr(leaf
, slot
);
4374 btrfs_set_item_size(leaf
, item
, new_size
);
4375 btrfs_mark_buffer_dirty(leaf
);
4377 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4378 btrfs_print_leaf(root
, leaf
);
4384 * make the item pointed to by the path bigger, data_size is the new size.
4386 void btrfs_extend_item(struct btrfs_trans_handle
*trans
,
4387 struct btrfs_root
*root
, struct btrfs_path
*path
,
4391 struct extent_buffer
*leaf
;
4392 struct btrfs_item
*item
;
4394 unsigned int data_end
;
4395 unsigned int old_data
;
4396 unsigned int old_size
;
4398 struct btrfs_map_token token
;
4400 btrfs_init_map_token(&token
);
4402 leaf
= path
->nodes
[0];
4404 nritems
= btrfs_header_nritems(leaf
);
4405 data_end
= leaf_data_end(root
, leaf
);
4407 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
4408 btrfs_print_leaf(root
, leaf
);
4411 slot
= path
->slots
[0];
4412 old_data
= btrfs_item_end_nr(leaf
, slot
);
4415 if (slot
>= nritems
) {
4416 btrfs_print_leaf(root
, leaf
);
4417 printk(KERN_CRIT
"slot %d too large, nritems %d\n",
4423 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4425 /* first correct the data pointers */
4426 for (i
= slot
; i
< nritems
; i
++) {
4428 item
= btrfs_item_nr(leaf
, i
);
4430 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4431 btrfs_set_token_item_offset(leaf
, item
,
4432 ioff
- data_size
, &token
);
4435 /* shift the data */
4436 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4437 data_end
- data_size
, btrfs_leaf_data(leaf
) +
4438 data_end
, old_data
- data_end
);
4440 data_end
= old_data
;
4441 old_size
= btrfs_item_size_nr(leaf
, slot
);
4442 item
= btrfs_item_nr(leaf
, slot
);
4443 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
4444 btrfs_mark_buffer_dirty(leaf
);
4446 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4447 btrfs_print_leaf(root
, leaf
);
4453 * this is a helper for btrfs_insert_empty_items, the main goal here is
4454 * to save stack depth by doing the bulk of the work in a function
4455 * that doesn't call btrfs_search_slot
4457 void setup_items_for_insert(struct btrfs_root
*root
, struct btrfs_path
*path
,
4458 struct btrfs_key
*cpu_key
, u32
*data_size
,
4459 u32 total_data
, u32 total_size
, int nr
)
4461 struct btrfs_item
*item
;
4464 unsigned int data_end
;
4465 struct btrfs_disk_key disk_key
;
4466 struct extent_buffer
*leaf
;
4468 struct btrfs_map_token token
;
4470 btrfs_init_map_token(&token
);
4472 leaf
= path
->nodes
[0];
4473 slot
= path
->slots
[0];
4475 nritems
= btrfs_header_nritems(leaf
);
4476 data_end
= leaf_data_end(root
, leaf
);
4478 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
4479 btrfs_print_leaf(root
, leaf
);
4480 printk(KERN_CRIT
"not enough freespace need %u have %d\n",
4481 total_size
, btrfs_leaf_free_space(root
, leaf
));
4485 if (slot
!= nritems
) {
4486 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4488 if (old_data
< data_end
) {
4489 btrfs_print_leaf(root
, leaf
);
4490 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
4491 slot
, old_data
, data_end
);
4495 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4497 /* first correct the data pointers */
4498 for (i
= slot
; i
< nritems
; i
++) {
4501 item
= btrfs_item_nr(leaf
, i
);
4502 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4503 btrfs_set_token_item_offset(leaf
, item
,
4504 ioff
- total_data
, &token
);
4506 /* shift the items */
4507 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4508 btrfs_item_nr_offset(slot
),
4509 (nritems
- slot
) * sizeof(struct btrfs_item
));
4511 /* shift the data */
4512 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4513 data_end
- total_data
, btrfs_leaf_data(leaf
) +
4514 data_end
, old_data
- data_end
);
4515 data_end
= old_data
;
4518 /* setup the item for the new data */
4519 for (i
= 0; i
< nr
; i
++) {
4520 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4521 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4522 item
= btrfs_item_nr(leaf
, slot
+ i
);
4523 btrfs_set_token_item_offset(leaf
, item
,
4524 data_end
- data_size
[i
], &token
);
4525 data_end
-= data_size
[i
];
4526 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
4529 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4532 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4533 fixup_low_keys(root
, path
, &disk_key
, 1);
4535 btrfs_unlock_up_safe(path
, 1);
4536 btrfs_mark_buffer_dirty(leaf
);
4538 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4539 btrfs_print_leaf(root
, leaf
);
4545 * Given a key and some data, insert items into the tree.
4546 * This does all the path init required, making room in the tree if needed.
4548 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
4549 struct btrfs_root
*root
,
4550 struct btrfs_path
*path
,
4551 struct btrfs_key
*cpu_key
, u32
*data_size
,
4560 for (i
= 0; i
< nr
; i
++)
4561 total_data
+= data_size
[i
];
4563 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4564 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4570 slot
= path
->slots
[0];
4573 setup_items_for_insert(root
, path
, cpu_key
, data_size
,
4574 total_data
, total_size
, nr
);
4579 * Given a key and some data, insert an item into the tree.
4580 * This does all the path init required, making room in the tree if needed.
4582 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
4583 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
4587 struct btrfs_path
*path
;
4588 struct extent_buffer
*leaf
;
4591 path
= btrfs_alloc_path();
4594 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
4596 leaf
= path
->nodes
[0];
4597 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
4598 write_extent_buffer(leaf
, data
, ptr
, data_size
);
4599 btrfs_mark_buffer_dirty(leaf
);
4601 btrfs_free_path(path
);
4606 * delete the pointer from a given node.
4608 * the tree should have been previously balanced so the deletion does not
4611 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
4612 int level
, int slot
)
4614 struct extent_buffer
*parent
= path
->nodes
[level
];
4618 nritems
= btrfs_header_nritems(parent
);
4619 if (slot
!= nritems
- 1) {
4621 tree_mod_log_eb_move(root
->fs_info
, parent
, slot
,
4622 slot
+ 1, nritems
- slot
- 1);
4623 memmove_extent_buffer(parent
,
4624 btrfs_node_key_ptr_offset(slot
),
4625 btrfs_node_key_ptr_offset(slot
+ 1),
4626 sizeof(struct btrfs_key_ptr
) *
4627 (nritems
- slot
- 1));
4629 ret
= tree_mod_log_insert_key(root
->fs_info
, parent
, slot
,
4630 MOD_LOG_KEY_REMOVE
);
4635 btrfs_set_header_nritems(parent
, nritems
);
4636 if (nritems
== 0 && parent
== root
->node
) {
4637 BUG_ON(btrfs_header_level(root
->node
) != 1);
4638 /* just turn the root into a leaf and break */
4639 btrfs_set_header_level(root
->node
, 0);
4640 } else if (slot
== 0) {
4641 struct btrfs_disk_key disk_key
;
4643 btrfs_node_key(parent
, &disk_key
, 0);
4644 fixup_low_keys(root
, path
, &disk_key
, level
+ 1);
4646 btrfs_mark_buffer_dirty(parent
);
4650 * a helper function to delete the leaf pointed to by path->slots[1] and
4653 * This deletes the pointer in path->nodes[1] and frees the leaf
4654 * block extent. zero is returned if it all worked out, < 0 otherwise.
4656 * The path must have already been setup for deleting the leaf, including
4657 * all the proper balancing. path->nodes[1] must be locked.
4659 static noinline
void btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
4660 struct btrfs_root
*root
,
4661 struct btrfs_path
*path
,
4662 struct extent_buffer
*leaf
)
4664 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
4665 del_ptr(root
, path
, 1, path
->slots
[1]);
4668 * btrfs_free_extent is expensive, we want to make sure we
4669 * aren't holding any locks when we call it
4671 btrfs_unlock_up_safe(path
, 0);
4673 root_sub_used(root
, leaf
->len
);
4675 extent_buffer_get(leaf
);
4676 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1);
4677 free_extent_buffer_stale(leaf
);
4680 * delete the item at the leaf level in path. If that empties
4681 * the leaf, remove it from the tree
4683 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4684 struct btrfs_path
*path
, int slot
, int nr
)
4686 struct extent_buffer
*leaf
;
4687 struct btrfs_item
*item
;
4694 struct btrfs_map_token token
;
4696 btrfs_init_map_token(&token
);
4698 leaf
= path
->nodes
[0];
4699 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
4701 for (i
= 0; i
< nr
; i
++)
4702 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
4704 nritems
= btrfs_header_nritems(leaf
);
4706 if (slot
+ nr
!= nritems
) {
4707 int data_end
= leaf_data_end(root
, leaf
);
4709 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4711 btrfs_leaf_data(leaf
) + data_end
,
4712 last_off
- data_end
);
4714 for (i
= slot
+ nr
; i
< nritems
; i
++) {
4717 item
= btrfs_item_nr(leaf
, i
);
4718 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4719 btrfs_set_token_item_offset(leaf
, item
,
4720 ioff
+ dsize
, &token
);
4723 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
4724 btrfs_item_nr_offset(slot
+ nr
),
4725 sizeof(struct btrfs_item
) *
4726 (nritems
- slot
- nr
));
4728 btrfs_set_header_nritems(leaf
, nritems
- nr
);
4731 /* delete the leaf if we've emptied it */
4733 if (leaf
== root
->node
) {
4734 btrfs_set_header_level(leaf
, 0);
4736 btrfs_set_path_blocking(path
);
4737 clean_tree_block(trans
, root
, leaf
);
4738 btrfs_del_leaf(trans
, root
, path
, leaf
);
4741 int used
= leaf_space_used(leaf
, 0, nritems
);
4743 struct btrfs_disk_key disk_key
;
4745 btrfs_item_key(leaf
, &disk_key
, 0);
4746 fixup_low_keys(root
, path
, &disk_key
, 1);
4749 /* delete the leaf if it is mostly empty */
4750 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 3) {
4751 /* push_leaf_left fixes the path.
4752 * make sure the path still points to our leaf
4753 * for possible call to del_ptr below
4755 slot
= path
->slots
[1];
4756 extent_buffer_get(leaf
);
4758 btrfs_set_path_blocking(path
);
4759 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
4761 if (wret
< 0 && wret
!= -ENOSPC
)
4764 if (path
->nodes
[0] == leaf
&&
4765 btrfs_header_nritems(leaf
)) {
4766 wret
= push_leaf_right(trans
, root
, path
, 1,
4768 if (wret
< 0 && wret
!= -ENOSPC
)
4772 if (btrfs_header_nritems(leaf
) == 0) {
4773 path
->slots
[1] = slot
;
4774 btrfs_del_leaf(trans
, root
, path
, leaf
);
4775 free_extent_buffer(leaf
);
4778 /* if we're still in the path, make sure
4779 * we're dirty. Otherwise, one of the
4780 * push_leaf functions must have already
4781 * dirtied this buffer
4783 if (path
->nodes
[0] == leaf
)
4784 btrfs_mark_buffer_dirty(leaf
);
4785 free_extent_buffer(leaf
);
4788 btrfs_mark_buffer_dirty(leaf
);
4795 * search the tree again to find a leaf with lesser keys
4796 * returns 0 if it found something or 1 if there are no lesser leaves.
4797 * returns < 0 on io errors.
4799 * This may release the path, and so you may lose any locks held at the
4802 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4804 struct btrfs_key key
;
4805 struct btrfs_disk_key found_key
;
4808 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
4812 else if (key
.type
> 0)
4814 else if (key
.objectid
> 0)
4819 btrfs_release_path(path
);
4820 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4823 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
4824 ret
= comp_keys(&found_key
, &key
);
4831 * A helper function to walk down the tree starting at min_key, and looking
4832 * for nodes or leaves that are have a minimum transaction id.
4833 * This is used by the btree defrag code, and tree logging
4835 * This does not cow, but it does stuff the starting key it finds back
4836 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4837 * key and get a writable path.
4839 * This does lock as it descends, and path->keep_locks should be set
4840 * to 1 by the caller.
4842 * This honors path->lowest_level to prevent descent past a given level
4845 * min_trans indicates the oldest transaction that you are interested
4846 * in walking through. Any nodes or leaves older than min_trans are
4847 * skipped over (without reading them).
4849 * returns zero if something useful was found, < 0 on error and 1 if there
4850 * was nothing in the tree that matched the search criteria.
4852 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
4853 struct btrfs_key
*max_key
,
4854 struct btrfs_path
*path
,
4857 struct extent_buffer
*cur
;
4858 struct btrfs_key found_key
;
4865 WARN_ON(!path
->keep_locks
);
4867 cur
= btrfs_read_lock_root_node(root
);
4868 level
= btrfs_header_level(cur
);
4869 WARN_ON(path
->nodes
[level
]);
4870 path
->nodes
[level
] = cur
;
4871 path
->locks
[level
] = BTRFS_READ_LOCK
;
4873 if (btrfs_header_generation(cur
) < min_trans
) {
4878 nritems
= btrfs_header_nritems(cur
);
4879 level
= btrfs_header_level(cur
);
4880 sret
= bin_search(cur
, min_key
, level
, &slot
);
4882 /* at the lowest level, we're done, setup the path and exit */
4883 if (level
== path
->lowest_level
) {
4884 if (slot
>= nritems
)
4887 path
->slots
[level
] = slot
;
4888 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
4891 if (sret
&& slot
> 0)
4894 * check this node pointer against the min_trans parameters.
4895 * If it is too old, old, skip to the next one.
4897 while (slot
< nritems
) {
4901 blockptr
= btrfs_node_blockptr(cur
, slot
);
4902 gen
= btrfs_node_ptr_generation(cur
, slot
);
4903 if (gen
< min_trans
) {
4911 * we didn't find a candidate key in this node, walk forward
4912 * and find another one
4914 if (slot
>= nritems
) {
4915 path
->slots
[level
] = slot
;
4916 btrfs_set_path_blocking(path
);
4917 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
4920 btrfs_release_path(path
);
4926 /* save our key for returning back */
4927 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
4928 path
->slots
[level
] = slot
;
4929 if (level
== path
->lowest_level
) {
4931 unlock_up(path
, level
, 1, 0, NULL
);
4934 btrfs_set_path_blocking(path
);
4935 cur
= read_node_slot(root
, cur
, slot
);
4936 BUG_ON(!cur
); /* -ENOMEM */
4938 btrfs_tree_read_lock(cur
);
4940 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
4941 path
->nodes
[level
- 1] = cur
;
4942 unlock_up(path
, level
, 1, 0, NULL
);
4943 btrfs_clear_path_blocking(path
, NULL
, 0);
4947 memcpy(min_key
, &found_key
, sizeof(found_key
));
4948 btrfs_set_path_blocking(path
);
4952 static void tree_move_down(struct btrfs_root
*root
,
4953 struct btrfs_path
*path
,
4954 int *level
, int root_level
)
4956 BUG_ON(*level
== 0);
4957 path
->nodes
[*level
- 1] = read_node_slot(root
, path
->nodes
[*level
],
4958 path
->slots
[*level
]);
4959 path
->slots
[*level
- 1] = 0;
4963 static int tree_move_next_or_upnext(struct btrfs_root
*root
,
4964 struct btrfs_path
*path
,
4965 int *level
, int root_level
)
4969 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
4971 path
->slots
[*level
]++;
4973 while (path
->slots
[*level
] >= nritems
) {
4974 if (*level
== root_level
)
4978 path
->slots
[*level
] = 0;
4979 free_extent_buffer(path
->nodes
[*level
]);
4980 path
->nodes
[*level
] = NULL
;
4982 path
->slots
[*level
]++;
4984 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
4991 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
4994 static int tree_advance(struct btrfs_root
*root
,
4995 struct btrfs_path
*path
,
4996 int *level
, int root_level
,
4998 struct btrfs_key
*key
)
5002 if (*level
== 0 || !allow_down
) {
5003 ret
= tree_move_next_or_upnext(root
, path
, level
, root_level
);
5005 tree_move_down(root
, path
, level
, root_level
);
5010 btrfs_item_key_to_cpu(path
->nodes
[*level
], key
,
5011 path
->slots
[*level
]);
5013 btrfs_node_key_to_cpu(path
->nodes
[*level
], key
,
5014 path
->slots
[*level
]);
5019 static int tree_compare_item(struct btrfs_root
*left_root
,
5020 struct btrfs_path
*left_path
,
5021 struct btrfs_path
*right_path
,
5026 unsigned long off1
, off2
;
5028 len1
= btrfs_item_size_nr(left_path
->nodes
[0], left_path
->slots
[0]);
5029 len2
= btrfs_item_size_nr(right_path
->nodes
[0], right_path
->slots
[0]);
5033 off1
= btrfs_item_ptr_offset(left_path
->nodes
[0], left_path
->slots
[0]);
5034 off2
= btrfs_item_ptr_offset(right_path
->nodes
[0],
5035 right_path
->slots
[0]);
5037 read_extent_buffer(left_path
->nodes
[0], tmp_buf
, off1
, len1
);
5039 cmp
= memcmp_extent_buffer(right_path
->nodes
[0], tmp_buf
, off2
, len1
);
5046 #define ADVANCE_ONLY_NEXT -1
5049 * This function compares two trees and calls the provided callback for
5050 * every changed/new/deleted item it finds.
5051 * If shared tree blocks are encountered, whole subtrees are skipped, making
5052 * the compare pretty fast on snapshotted subvolumes.
5054 * This currently works on commit roots only. As commit roots are read only,
5055 * we don't do any locking. The commit roots are protected with transactions.
5056 * Transactions are ended and rejoined when a commit is tried in between.
5058 * This function checks for modifications done to the trees while comparing.
5059 * If it detects a change, it aborts immediately.
5061 int btrfs_compare_trees(struct btrfs_root
*left_root
,
5062 struct btrfs_root
*right_root
,
5063 btrfs_changed_cb_t changed_cb
, void *ctx
)
5067 struct btrfs_trans_handle
*trans
= NULL
;
5068 struct btrfs_path
*left_path
= NULL
;
5069 struct btrfs_path
*right_path
= NULL
;
5070 struct btrfs_key left_key
;
5071 struct btrfs_key right_key
;
5072 char *tmp_buf
= NULL
;
5073 int left_root_level
;
5074 int right_root_level
;
5077 int left_end_reached
;
5078 int right_end_reached
;
5083 u64 left_start_ctransid
;
5084 u64 right_start_ctransid
;
5087 left_path
= btrfs_alloc_path();
5092 right_path
= btrfs_alloc_path();
5098 tmp_buf
= kmalloc(left_root
->leafsize
, GFP_NOFS
);
5104 left_path
->search_commit_root
= 1;
5105 left_path
->skip_locking
= 1;
5106 right_path
->search_commit_root
= 1;
5107 right_path
->skip_locking
= 1;
5109 spin_lock(&left_root
->root_item_lock
);
5110 left_start_ctransid
= btrfs_root_ctransid(&left_root
->root_item
);
5111 spin_unlock(&left_root
->root_item_lock
);
5113 spin_lock(&right_root
->root_item_lock
);
5114 right_start_ctransid
= btrfs_root_ctransid(&right_root
->root_item
);
5115 spin_unlock(&right_root
->root_item_lock
);
5117 trans
= btrfs_join_transaction(left_root
);
5118 if (IS_ERR(trans
)) {
5119 ret
= PTR_ERR(trans
);
5125 * Strategy: Go to the first items of both trees. Then do
5127 * If both trees are at level 0
5128 * Compare keys of current items
5129 * If left < right treat left item as new, advance left tree
5131 * If left > right treat right item as deleted, advance right tree
5133 * If left == right do deep compare of items, treat as changed if
5134 * needed, advance both trees and repeat
5135 * If both trees are at the same level but not at level 0
5136 * Compare keys of current nodes/leafs
5137 * If left < right advance left tree and repeat
5138 * If left > right advance right tree and repeat
5139 * If left == right compare blockptrs of the next nodes/leafs
5140 * If they match advance both trees but stay at the same level
5142 * If they don't match advance both trees while allowing to go
5144 * If tree levels are different
5145 * Advance the tree that needs it and repeat
5147 * Advancing a tree means:
5148 * If we are at level 0, try to go to the next slot. If that's not
5149 * possible, go one level up and repeat. Stop when we found a level
5150 * where we could go to the next slot. We may at this point be on a
5153 * If we are not at level 0 and not on shared tree blocks, go one
5156 * If we are not at level 0 and on shared tree blocks, go one slot to
5157 * the right if possible or go up and right.
5160 left_level
= btrfs_header_level(left_root
->commit_root
);
5161 left_root_level
= left_level
;
5162 left_path
->nodes
[left_level
] = left_root
->commit_root
;
5163 extent_buffer_get(left_path
->nodes
[left_level
]);
5165 right_level
= btrfs_header_level(right_root
->commit_root
);
5166 right_root_level
= right_level
;
5167 right_path
->nodes
[right_level
] = right_root
->commit_root
;
5168 extent_buffer_get(right_path
->nodes
[right_level
]);
5170 if (left_level
== 0)
5171 btrfs_item_key_to_cpu(left_path
->nodes
[left_level
],
5172 &left_key
, left_path
->slots
[left_level
]);
5174 btrfs_node_key_to_cpu(left_path
->nodes
[left_level
],
5175 &left_key
, left_path
->slots
[left_level
]);
5176 if (right_level
== 0)
5177 btrfs_item_key_to_cpu(right_path
->nodes
[right_level
],
5178 &right_key
, right_path
->slots
[right_level
]);
5180 btrfs_node_key_to_cpu(right_path
->nodes
[right_level
],
5181 &right_key
, right_path
->slots
[right_level
]);
5183 left_end_reached
= right_end_reached
= 0;
5184 advance_left
= advance_right
= 0;
5188 * We need to make sure the transaction does not get committed
5189 * while we do anything on commit roots. This means, we need to
5190 * join and leave transactions for every item that we process.
5192 if (trans
&& btrfs_should_end_transaction(trans
, left_root
)) {
5193 btrfs_release_path(left_path
);
5194 btrfs_release_path(right_path
);
5196 ret
= btrfs_end_transaction(trans
, left_root
);
5201 /* now rejoin the transaction */
5203 trans
= btrfs_join_transaction(left_root
);
5204 if (IS_ERR(trans
)) {
5205 ret
= PTR_ERR(trans
);
5210 spin_lock(&left_root
->root_item_lock
);
5211 ctransid
= btrfs_root_ctransid(&left_root
->root_item
);
5212 spin_unlock(&left_root
->root_item_lock
);
5213 if (ctransid
!= left_start_ctransid
)
5214 left_start_ctransid
= 0;
5216 spin_lock(&right_root
->root_item_lock
);
5217 ctransid
= btrfs_root_ctransid(&right_root
->root_item
);
5218 spin_unlock(&right_root
->root_item_lock
);
5219 if (ctransid
!= right_start_ctransid
)
5220 right_start_ctransid
= 0;
5222 if (!left_start_ctransid
|| !right_start_ctransid
) {
5223 WARN(1, KERN_WARNING
5224 "btrfs: btrfs_compare_tree detected "
5225 "a change in one of the trees while "
5226 "iterating. This is probably a "
5233 * the commit root may have changed, so start again
5236 left_path
->lowest_level
= left_level
;
5237 right_path
->lowest_level
= right_level
;
5238 ret
= btrfs_search_slot(NULL
, left_root
,
5239 &left_key
, left_path
, 0, 0);
5242 ret
= btrfs_search_slot(NULL
, right_root
,
5243 &right_key
, right_path
, 0, 0);
5248 if (advance_left
&& !left_end_reached
) {
5249 ret
= tree_advance(left_root
, left_path
, &left_level
,
5251 advance_left
!= ADVANCE_ONLY_NEXT
,
5254 left_end_reached
= ADVANCE
;
5257 if (advance_right
&& !right_end_reached
) {
5258 ret
= tree_advance(right_root
, right_path
, &right_level
,
5260 advance_right
!= ADVANCE_ONLY_NEXT
,
5263 right_end_reached
= ADVANCE
;
5267 if (left_end_reached
&& right_end_reached
) {
5270 } else if (left_end_reached
) {
5271 if (right_level
== 0) {
5272 ret
= changed_cb(left_root
, right_root
,
5273 left_path
, right_path
,
5275 BTRFS_COMPARE_TREE_DELETED
,
5280 advance_right
= ADVANCE
;
5282 } else if (right_end_reached
) {
5283 if (left_level
== 0) {
5284 ret
= changed_cb(left_root
, right_root
,
5285 left_path
, right_path
,
5287 BTRFS_COMPARE_TREE_NEW
,
5292 advance_left
= ADVANCE
;
5296 if (left_level
== 0 && right_level
== 0) {
5297 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5299 ret
= changed_cb(left_root
, right_root
,
5300 left_path
, right_path
,
5302 BTRFS_COMPARE_TREE_NEW
,
5306 advance_left
= ADVANCE
;
5307 } else if (cmp
> 0) {
5308 ret
= changed_cb(left_root
, right_root
,
5309 left_path
, right_path
,
5311 BTRFS_COMPARE_TREE_DELETED
,
5315 advance_right
= ADVANCE
;
5317 WARN_ON(!extent_buffer_uptodate(left_path
->nodes
[0]));
5318 ret
= tree_compare_item(left_root
, left_path
,
5319 right_path
, tmp_buf
);
5321 WARN_ON(!extent_buffer_uptodate(left_path
->nodes
[0]));
5322 ret
= changed_cb(left_root
, right_root
,
5323 left_path
, right_path
,
5325 BTRFS_COMPARE_TREE_CHANGED
,
5330 advance_left
= ADVANCE
;
5331 advance_right
= ADVANCE
;
5333 } else if (left_level
== right_level
) {
5334 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5336 advance_left
= ADVANCE
;
5337 } else if (cmp
> 0) {
5338 advance_right
= ADVANCE
;
5340 left_blockptr
= btrfs_node_blockptr(
5341 left_path
->nodes
[left_level
],
5342 left_path
->slots
[left_level
]);
5343 right_blockptr
= btrfs_node_blockptr(
5344 right_path
->nodes
[right_level
],
5345 right_path
->slots
[right_level
]);
5346 if (left_blockptr
== right_blockptr
) {
5348 * As we're on a shared block, don't
5349 * allow to go deeper.
5351 advance_left
= ADVANCE_ONLY_NEXT
;
5352 advance_right
= ADVANCE_ONLY_NEXT
;
5354 advance_left
= ADVANCE
;
5355 advance_right
= ADVANCE
;
5358 } else if (left_level
< right_level
) {
5359 advance_right
= ADVANCE
;
5361 advance_left
= ADVANCE
;
5366 btrfs_free_path(left_path
);
5367 btrfs_free_path(right_path
);
5372 ret
= btrfs_end_transaction(trans
, left_root
);
5374 btrfs_end_transaction(trans
, left_root
);
5381 * this is similar to btrfs_next_leaf, but does not try to preserve
5382 * and fixup the path. It looks for and returns the next key in the
5383 * tree based on the current path and the min_trans parameters.
5385 * 0 is returned if another key is found, < 0 if there are any errors
5386 * and 1 is returned if there are no higher keys in the tree
5388 * path->keep_locks should be set to 1 on the search made before
5389 * calling this function.
5391 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
5392 struct btrfs_key
*key
, int level
, u64 min_trans
)
5395 struct extent_buffer
*c
;
5397 WARN_ON(!path
->keep_locks
);
5398 while (level
< BTRFS_MAX_LEVEL
) {
5399 if (!path
->nodes
[level
])
5402 slot
= path
->slots
[level
] + 1;
5403 c
= path
->nodes
[level
];
5405 if (slot
>= btrfs_header_nritems(c
)) {
5408 struct btrfs_key cur_key
;
5409 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
5410 !path
->nodes
[level
+ 1])
5413 if (path
->locks
[level
+ 1]) {
5418 slot
= btrfs_header_nritems(c
) - 1;
5420 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
5422 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
5424 orig_lowest
= path
->lowest_level
;
5425 btrfs_release_path(path
);
5426 path
->lowest_level
= level
;
5427 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
5429 path
->lowest_level
= orig_lowest
;
5433 c
= path
->nodes
[level
];
5434 slot
= path
->slots
[level
];
5441 btrfs_item_key_to_cpu(c
, key
, slot
);
5443 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
5445 if (gen
< min_trans
) {
5449 btrfs_node_key_to_cpu(c
, key
, slot
);
5457 * search the tree again to find a leaf with greater keys
5458 * returns 0 if it found something or 1 if there are no greater leaves.
5459 * returns < 0 on io errors.
5461 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5463 return btrfs_next_old_leaf(root
, path
, 0);
5466 int btrfs_next_old_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
5471 struct extent_buffer
*c
;
5472 struct extent_buffer
*next
;
5473 struct btrfs_key key
;
5476 int old_spinning
= path
->leave_spinning
;
5477 int next_rw_lock
= 0;
5479 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5483 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
5488 btrfs_release_path(path
);
5490 path
->keep_locks
= 1;
5491 path
->leave_spinning
= 1;
5494 ret
= btrfs_search_old_slot(root
, &key
, path
, time_seq
);
5496 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5497 path
->keep_locks
= 0;
5502 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5504 * by releasing the path above we dropped all our locks. A balance
5505 * could have added more items next to the key that used to be
5506 * at the very end of the block. So, check again here and
5507 * advance the path if there are now more items available.
5509 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
5516 while (level
< BTRFS_MAX_LEVEL
) {
5517 if (!path
->nodes
[level
]) {
5522 slot
= path
->slots
[level
] + 1;
5523 c
= path
->nodes
[level
];
5524 if (slot
>= btrfs_header_nritems(c
)) {
5526 if (level
== BTRFS_MAX_LEVEL
) {
5534 btrfs_tree_unlock_rw(next
, next_rw_lock
);
5535 free_extent_buffer(next
);
5539 next_rw_lock
= path
->locks
[level
];
5540 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
5546 btrfs_release_path(path
);
5550 if (!path
->skip_locking
) {
5551 ret
= btrfs_try_tree_read_lock(next
);
5552 if (!ret
&& time_seq
) {
5554 * If we don't get the lock, we may be racing
5555 * with push_leaf_left, holding that lock while
5556 * itself waiting for the leaf we've currently
5557 * locked. To solve this situation, we give up
5558 * on our lock and cycle.
5560 free_extent_buffer(next
);
5561 btrfs_release_path(path
);
5566 btrfs_set_path_blocking(path
);
5567 btrfs_tree_read_lock(next
);
5568 btrfs_clear_path_blocking(path
, next
,
5571 next_rw_lock
= BTRFS_READ_LOCK
;
5575 path
->slots
[level
] = slot
;
5578 c
= path
->nodes
[level
];
5579 if (path
->locks
[level
])
5580 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
5582 free_extent_buffer(c
);
5583 path
->nodes
[level
] = next
;
5584 path
->slots
[level
] = 0;
5585 if (!path
->skip_locking
)
5586 path
->locks
[level
] = next_rw_lock
;
5590 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
5596 btrfs_release_path(path
);
5600 if (!path
->skip_locking
) {
5601 ret
= btrfs_try_tree_read_lock(next
);
5603 btrfs_set_path_blocking(path
);
5604 btrfs_tree_read_lock(next
);
5605 btrfs_clear_path_blocking(path
, next
,
5608 next_rw_lock
= BTRFS_READ_LOCK
;
5613 unlock_up(path
, 0, 1, 0, NULL
);
5614 path
->leave_spinning
= old_spinning
;
5616 btrfs_set_path_blocking(path
);
5622 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5623 * searching until it gets past min_objectid or finds an item of 'type'
5625 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5627 int btrfs_previous_item(struct btrfs_root
*root
,
5628 struct btrfs_path
*path
, u64 min_objectid
,
5631 struct btrfs_key found_key
;
5632 struct extent_buffer
*leaf
;
5637 if (path
->slots
[0] == 0) {
5638 btrfs_set_path_blocking(path
);
5639 ret
= btrfs_prev_leaf(root
, path
);
5645 leaf
= path
->nodes
[0];
5646 nritems
= btrfs_header_nritems(leaf
);
5649 if (path
->slots
[0] == nritems
)
5652 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5653 if (found_key
.objectid
< min_objectid
)
5655 if (found_key
.type
== type
)
5657 if (found_key
.objectid
== min_objectid
&&
5658 found_key
.type
< type
)