2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
29 *root
, struct btrfs_path
*path
, int level
);
30 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
31 *root
, struct btrfs_key
*ins_key
,
32 struct btrfs_path
*path
, int data_size
, int extend
);
33 static int push_node_left(struct btrfs_trans_handle
*trans
,
34 struct btrfs_root
*root
, struct extent_buffer
*dst
,
35 struct extent_buffer
*src
, int empty
);
36 static int balance_node_right(struct btrfs_trans_handle
*trans
,
37 struct btrfs_root
*root
,
38 struct extent_buffer
*dst_buf
,
39 struct extent_buffer
*src_buf
);
40 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
42 static void tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
43 struct extent_buffer
*eb
);
44 struct extent_buffer
*read_old_tree_block(struct btrfs_root
*root
, u64 bytenr
,
45 u32 blocksize
, u64 parent_transid
,
47 struct extent_buffer
*btrfs_find_old_tree_block(struct btrfs_root
*root
,
48 u64 bytenr
, u32 blocksize
,
51 struct btrfs_path
*btrfs_alloc_path(void)
53 struct btrfs_path
*path
;
54 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
59 * set all locked nodes in the path to blocking locks. This should
60 * be done before scheduling
62 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
65 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
66 if (!p
->nodes
[i
] || !p
->locks
[i
])
68 btrfs_set_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
69 if (p
->locks
[i
] == BTRFS_READ_LOCK
)
70 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
71 else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
)
72 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
77 * reset all the locked nodes in the patch to spinning locks.
79 * held is used to keep lockdep happy, when lockdep is enabled
80 * we set held to a blocking lock before we go around and
81 * retake all the spinlocks in the path. You can safely use NULL
84 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
85 struct extent_buffer
*held
, int held_rw
)
89 #ifdef CONFIG_DEBUG_LOCK_ALLOC
90 /* lockdep really cares that we take all of these spinlocks
91 * in the right order. If any of the locks in the path are not
92 * currently blocking, it is going to complain. So, make really
93 * really sure by forcing the path to blocking before we clear
97 btrfs_set_lock_blocking_rw(held
, held_rw
);
98 if (held_rw
== BTRFS_WRITE_LOCK
)
99 held_rw
= BTRFS_WRITE_LOCK_BLOCKING
;
100 else if (held_rw
== BTRFS_READ_LOCK
)
101 held_rw
= BTRFS_READ_LOCK_BLOCKING
;
103 btrfs_set_path_blocking(p
);
106 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
107 if (p
->nodes
[i
] && p
->locks
[i
]) {
108 btrfs_clear_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
109 if (p
->locks
[i
] == BTRFS_WRITE_LOCK_BLOCKING
)
110 p
->locks
[i
] = BTRFS_WRITE_LOCK
;
111 else if (p
->locks
[i
] == BTRFS_READ_LOCK_BLOCKING
)
112 p
->locks
[i
] = BTRFS_READ_LOCK
;
116 #ifdef CONFIG_DEBUG_LOCK_ALLOC
118 btrfs_clear_lock_blocking_rw(held
, held_rw
);
122 /* this also releases the path */
123 void btrfs_free_path(struct btrfs_path
*p
)
127 btrfs_release_path(p
);
128 kmem_cache_free(btrfs_path_cachep
, p
);
132 * path release drops references on the extent buffers in the path
133 * and it drops any locks held by this path
135 * It is safe to call this on paths that no locks or extent buffers held.
137 noinline
void btrfs_release_path(struct btrfs_path
*p
)
141 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
146 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
149 free_extent_buffer(p
->nodes
[i
]);
155 * safely gets a reference on the root node of a tree. A lock
156 * is not taken, so a concurrent writer may put a different node
157 * at the root of the tree. See btrfs_lock_root_node for the
160 * The extent buffer returned by this has a reference taken, so
161 * it won't disappear. It may stop being the root of the tree
162 * at any time because there are no locks held.
164 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
166 struct extent_buffer
*eb
;
170 eb
= rcu_dereference(root
->node
);
173 * RCU really hurts here, we could free up the root node because
174 * it was cow'ed but we may not get the new root node yet so do
175 * the inc_not_zero dance and if it doesn't work then
176 * synchronize_rcu and try again.
178 if (atomic_inc_not_zero(&eb
->refs
)) {
188 /* loop around taking references on and locking the root node of the
189 * tree until you end up with a lock on the root. A locked buffer
190 * is returned, with a reference held.
192 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
194 struct extent_buffer
*eb
;
197 eb
= btrfs_root_node(root
);
199 if (eb
== root
->node
)
201 btrfs_tree_unlock(eb
);
202 free_extent_buffer(eb
);
207 /* loop around taking references on and locking the root node of the
208 * tree until you end up with a lock on the root. A locked buffer
209 * is returned, with a reference held.
211 struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
213 struct extent_buffer
*eb
;
216 eb
= btrfs_root_node(root
);
217 btrfs_tree_read_lock(eb
);
218 if (eb
== root
->node
)
220 btrfs_tree_read_unlock(eb
);
221 free_extent_buffer(eb
);
226 /* cowonly root (everything not a reference counted cow subvolume), just get
227 * put onto a simple dirty list. transaction.c walks this to make sure they
228 * get properly updated on disk.
230 static void add_root_to_dirty_list(struct btrfs_root
*root
)
232 spin_lock(&root
->fs_info
->trans_lock
);
233 if (root
->track_dirty
&& list_empty(&root
->dirty_list
)) {
234 list_add(&root
->dirty_list
,
235 &root
->fs_info
->dirty_cowonly_roots
);
237 spin_unlock(&root
->fs_info
->trans_lock
);
241 * used by snapshot creation to make a copy of a root for a tree with
242 * a given objectid. The buffer with the new root node is returned in
243 * cow_ret, and this func returns zero on success or a negative error code.
245 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
246 struct btrfs_root
*root
,
247 struct extent_buffer
*buf
,
248 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
250 struct extent_buffer
*cow
;
253 struct btrfs_disk_key disk_key
;
255 WARN_ON(root
->ref_cows
&& trans
->transid
!=
256 root
->fs_info
->running_transaction
->transid
);
257 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
259 level
= btrfs_header_level(buf
);
261 btrfs_item_key(buf
, &disk_key
, 0);
263 btrfs_node_key(buf
, &disk_key
, 0);
265 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, 0,
266 new_root_objectid
, &disk_key
, level
,
271 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
272 btrfs_set_header_bytenr(cow
, cow
->start
);
273 btrfs_set_header_generation(cow
, trans
->transid
);
274 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
275 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
276 BTRFS_HEADER_FLAG_RELOC
);
277 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
278 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
280 btrfs_set_header_owner(cow
, new_root_objectid
);
282 write_extent_buffer(cow
, root
->fs_info
->fsid
,
283 (unsigned long)btrfs_header_fsid(cow
),
286 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
287 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
288 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
290 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
295 btrfs_mark_buffer_dirty(cow
);
304 MOD_LOG_KEY_REMOVE_WHILE_FREEING
,
305 MOD_LOG_KEY_REMOVE_WHILE_MOVING
,
307 MOD_LOG_ROOT_REPLACE
,
310 struct tree_mod_move
{
315 struct tree_mod_root
{
320 struct tree_mod_elem
{
322 u64 index
; /* shifted logical */
326 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
329 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
332 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
333 struct btrfs_disk_key key
;
336 /* this is used for op == MOD_LOG_MOVE_KEYS */
337 struct tree_mod_move move
;
339 /* this is used for op == MOD_LOG_ROOT_REPLACE */
340 struct tree_mod_root old_root
;
343 static inline void tree_mod_log_read_lock(struct btrfs_fs_info
*fs_info
)
345 read_lock(&fs_info
->tree_mod_log_lock
);
348 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info
*fs_info
)
350 read_unlock(&fs_info
->tree_mod_log_lock
);
353 static inline void tree_mod_log_write_lock(struct btrfs_fs_info
*fs_info
)
355 write_lock(&fs_info
->tree_mod_log_lock
);
358 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info
*fs_info
)
360 write_unlock(&fs_info
->tree_mod_log_lock
);
364 * Increment the upper half of tree_mod_seq, set lower half zero.
366 * Must be called with fs_info->tree_mod_seq_lock held.
368 static inline u64
btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info
*fs_info
)
370 u64 seq
= atomic64_read(&fs_info
->tree_mod_seq
);
371 seq
&= 0xffffffff00000000ull
;
373 atomic64_set(&fs_info
->tree_mod_seq
, seq
);
378 * Increment the lower half of tree_mod_seq.
380 * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers
381 * are generated should not technically require a spin lock here. (Rationale:
382 * incrementing the minor while incrementing the major seq number is between its
383 * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it
384 * just returns a unique sequence number as usual.) We have decided to leave
385 * that requirement in here and rethink it once we notice it really imposes a
386 * problem on some workload.
388 static inline u64
btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info
*fs_info
)
390 return atomic64_inc_return(&fs_info
->tree_mod_seq
);
394 * return the last minor in the previous major tree_mod_seq number
396 u64
btrfs_tree_mod_seq_prev(u64 seq
)
398 return (seq
& 0xffffffff00000000ull
) - 1ull;
402 * This adds a new blocker to the tree mod log's blocker list if the @elem
403 * passed does not already have a sequence number set. So when a caller expects
404 * to record tree modifications, it should ensure to set elem->seq to zero
405 * before calling btrfs_get_tree_mod_seq.
406 * Returns a fresh, unused tree log modification sequence number, even if no new
409 u64
btrfs_get_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
410 struct seq_list
*elem
)
414 tree_mod_log_write_lock(fs_info
);
415 spin_lock(&fs_info
->tree_mod_seq_lock
);
417 elem
->seq
= btrfs_inc_tree_mod_seq_major(fs_info
);
418 list_add_tail(&elem
->list
, &fs_info
->tree_mod_seq_list
);
420 seq
= btrfs_inc_tree_mod_seq_minor(fs_info
);
421 spin_unlock(&fs_info
->tree_mod_seq_lock
);
422 tree_mod_log_write_unlock(fs_info
);
427 void btrfs_put_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
428 struct seq_list
*elem
)
430 struct rb_root
*tm_root
;
431 struct rb_node
*node
;
432 struct rb_node
*next
;
433 struct seq_list
*cur_elem
;
434 struct tree_mod_elem
*tm
;
435 u64 min_seq
= (u64
)-1;
436 u64 seq_putting
= elem
->seq
;
441 spin_lock(&fs_info
->tree_mod_seq_lock
);
442 list_del(&elem
->list
);
445 list_for_each_entry(cur_elem
, &fs_info
->tree_mod_seq_list
, list
) {
446 if (cur_elem
->seq
< min_seq
) {
447 if (seq_putting
> cur_elem
->seq
) {
449 * blocker with lower sequence number exists, we
450 * cannot remove anything from the log
452 spin_unlock(&fs_info
->tree_mod_seq_lock
);
455 min_seq
= cur_elem
->seq
;
458 spin_unlock(&fs_info
->tree_mod_seq_lock
);
461 * anything that's lower than the lowest existing (read: blocked)
462 * sequence number can be removed from the tree.
464 tree_mod_log_write_lock(fs_info
);
465 tm_root
= &fs_info
->tree_mod_log
;
466 for (node
= rb_first(tm_root
); node
; node
= next
) {
467 next
= rb_next(node
);
468 tm
= container_of(node
, struct tree_mod_elem
, node
);
469 if (tm
->seq
> min_seq
)
471 rb_erase(node
, tm_root
);
474 tree_mod_log_write_unlock(fs_info
);
478 * key order of the log:
481 * the index is the shifted logical of the *new* root node for root replace
482 * operations, or the shifted logical of the affected block for all other
486 __tree_mod_log_insert(struct btrfs_fs_info
*fs_info
, struct tree_mod_elem
*tm
)
488 struct rb_root
*tm_root
;
489 struct rb_node
**new;
490 struct rb_node
*parent
= NULL
;
491 struct tree_mod_elem
*cur
;
493 BUG_ON(!tm
|| !tm
->seq
);
495 tm_root
= &fs_info
->tree_mod_log
;
496 new = &tm_root
->rb_node
;
498 cur
= container_of(*new, struct tree_mod_elem
, node
);
500 if (cur
->index
< tm
->index
)
501 new = &((*new)->rb_left
);
502 else if (cur
->index
> tm
->index
)
503 new = &((*new)->rb_right
);
504 else if (cur
->seq
< tm
->seq
)
505 new = &((*new)->rb_left
);
506 else if (cur
->seq
> tm
->seq
)
507 new = &((*new)->rb_right
);
514 rb_link_node(&tm
->node
, parent
, new);
515 rb_insert_color(&tm
->node
, tm_root
);
520 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
521 * returns zero with the tree_mod_log_lock acquired. The caller must hold
522 * this until all tree mod log insertions are recorded in the rb tree and then
523 * call tree_mod_log_write_unlock() to release.
525 static inline int tree_mod_dont_log(struct btrfs_fs_info
*fs_info
,
526 struct extent_buffer
*eb
) {
528 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
530 if (eb
&& btrfs_header_level(eb
) == 0)
533 tree_mod_log_write_lock(fs_info
);
534 if (list_empty(&fs_info
->tree_mod_seq_list
)) {
536 * someone emptied the list while we were waiting for the lock.
537 * we must not add to the list when no blocker exists.
539 tree_mod_log_write_unlock(fs_info
);
547 * This allocates memory and gets a tree modification sequence number.
549 * Returns <0 on error.
550 * Returns >0 (the added sequence number) on success.
552 static inline int tree_mod_alloc(struct btrfs_fs_info
*fs_info
, gfp_t flags
,
553 struct tree_mod_elem
**tm_ret
)
555 struct tree_mod_elem
*tm
;
558 * once we switch from spin locks to something different, we should
559 * honor the flags parameter here.
561 tm
= *tm_ret
= kzalloc(sizeof(*tm
), GFP_ATOMIC
);
565 spin_lock(&fs_info
->tree_mod_seq_lock
);
566 tm
->seq
= btrfs_inc_tree_mod_seq_minor(fs_info
);
567 spin_unlock(&fs_info
->tree_mod_seq_lock
);
573 __tree_mod_log_insert_key(struct btrfs_fs_info
*fs_info
,
574 struct extent_buffer
*eb
, int slot
,
575 enum mod_log_op op
, gfp_t flags
)
578 struct tree_mod_elem
*tm
;
580 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
584 tm
->index
= eb
->start
>> PAGE_CACHE_SHIFT
;
585 if (op
!= MOD_LOG_KEY_ADD
) {
586 btrfs_node_key(eb
, &tm
->key
, slot
);
587 tm
->blockptr
= btrfs_node_blockptr(eb
, slot
);
591 tm
->generation
= btrfs_node_ptr_generation(eb
, slot
);
593 return __tree_mod_log_insert(fs_info
, tm
);
597 tree_mod_log_insert_key_mask(struct btrfs_fs_info
*fs_info
,
598 struct extent_buffer
*eb
, int slot
,
599 enum mod_log_op op
, gfp_t flags
)
603 if (tree_mod_dont_log(fs_info
, eb
))
606 ret
= __tree_mod_log_insert_key(fs_info
, eb
, slot
, op
, flags
);
608 tree_mod_log_write_unlock(fs_info
);
613 tree_mod_log_insert_key(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
614 int slot
, enum mod_log_op op
)
616 return tree_mod_log_insert_key_mask(fs_info
, eb
, slot
, op
, GFP_NOFS
);
620 tree_mod_log_insert_key_locked(struct btrfs_fs_info
*fs_info
,
621 struct extent_buffer
*eb
, int slot
,
624 return __tree_mod_log_insert_key(fs_info
, eb
, slot
, op
, GFP_NOFS
);
628 tree_mod_log_insert_move(struct btrfs_fs_info
*fs_info
,
629 struct extent_buffer
*eb
, int dst_slot
, int src_slot
,
630 int nr_items
, gfp_t flags
)
632 struct tree_mod_elem
*tm
;
636 if (tree_mod_dont_log(fs_info
, eb
))
640 * When we override something during the move, we log these removals.
641 * This can only happen when we move towards the beginning of the
642 * buffer, i.e. dst_slot < src_slot.
644 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
645 ret
= tree_mod_log_insert_key_locked(fs_info
, eb
, i
+ dst_slot
,
646 MOD_LOG_KEY_REMOVE_WHILE_MOVING
);
650 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
654 tm
->index
= eb
->start
>> PAGE_CACHE_SHIFT
;
656 tm
->move
.dst_slot
= dst_slot
;
657 tm
->move
.nr_items
= nr_items
;
658 tm
->op
= MOD_LOG_MOVE_KEYS
;
660 ret
= __tree_mod_log_insert(fs_info
, tm
);
662 tree_mod_log_write_unlock(fs_info
);
667 __tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
)
673 if (btrfs_header_level(eb
) == 0)
676 nritems
= btrfs_header_nritems(eb
);
677 for (i
= nritems
- 1; i
>= 0; i
--) {
678 ret
= tree_mod_log_insert_key_locked(fs_info
, eb
, i
,
679 MOD_LOG_KEY_REMOVE_WHILE_FREEING
);
685 tree_mod_log_insert_root(struct btrfs_fs_info
*fs_info
,
686 struct extent_buffer
*old_root
,
687 struct extent_buffer
*new_root
, gfp_t flags
,
690 struct tree_mod_elem
*tm
;
693 if (tree_mod_dont_log(fs_info
, NULL
))
697 __tree_mod_log_free_eb(fs_info
, old_root
);
699 ret
= tree_mod_alloc(fs_info
, flags
, &tm
);
703 tm
->index
= new_root
->start
>> PAGE_CACHE_SHIFT
;
704 tm
->old_root
.logical
= old_root
->start
;
705 tm
->old_root
.level
= btrfs_header_level(old_root
);
706 tm
->generation
= btrfs_header_generation(old_root
);
707 tm
->op
= MOD_LOG_ROOT_REPLACE
;
709 ret
= __tree_mod_log_insert(fs_info
, tm
);
711 tree_mod_log_write_unlock(fs_info
);
715 static struct tree_mod_elem
*
716 __tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
,
719 struct rb_root
*tm_root
;
720 struct rb_node
*node
;
721 struct tree_mod_elem
*cur
= NULL
;
722 struct tree_mod_elem
*found
= NULL
;
723 u64 index
= start
>> PAGE_CACHE_SHIFT
;
725 tree_mod_log_read_lock(fs_info
);
726 tm_root
= &fs_info
->tree_mod_log
;
727 node
= tm_root
->rb_node
;
729 cur
= container_of(node
, struct tree_mod_elem
, node
);
730 if (cur
->index
< index
) {
731 node
= node
->rb_left
;
732 } else if (cur
->index
> index
) {
733 node
= node
->rb_right
;
734 } else if (cur
->seq
< min_seq
) {
735 node
= node
->rb_left
;
736 } else if (!smallest
) {
737 /* we want the node with the highest seq */
739 BUG_ON(found
->seq
> cur
->seq
);
741 node
= node
->rb_left
;
742 } else if (cur
->seq
> min_seq
) {
743 /* we want the node with the smallest seq */
745 BUG_ON(found
->seq
< cur
->seq
);
747 node
= node
->rb_right
;
753 tree_mod_log_read_unlock(fs_info
);
759 * this returns the element from the log with the smallest time sequence
760 * value that's in the log (the oldest log item). any element with a time
761 * sequence lower than min_seq will be ignored.
763 static struct tree_mod_elem
*
764 tree_mod_log_search_oldest(struct btrfs_fs_info
*fs_info
, u64 start
,
767 return __tree_mod_log_search(fs_info
, start
, min_seq
, 1);
771 * this returns the element from the log with the largest time sequence
772 * value that's in the log (the most recent log item). any element with
773 * a time sequence lower than min_seq will be ignored.
775 static struct tree_mod_elem
*
776 tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
)
778 return __tree_mod_log_search(fs_info
, start
, min_seq
, 0);
782 tree_mod_log_eb_copy(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
783 struct extent_buffer
*src
, unsigned long dst_offset
,
784 unsigned long src_offset
, int nr_items
)
789 if (tree_mod_dont_log(fs_info
, NULL
))
792 if (btrfs_header_level(dst
) == 0 && btrfs_header_level(src
) == 0) {
793 tree_mod_log_write_unlock(fs_info
);
797 for (i
= 0; i
< nr_items
; i
++) {
798 ret
= tree_mod_log_insert_key_locked(fs_info
, src
,
802 ret
= tree_mod_log_insert_key_locked(fs_info
, dst
,
808 tree_mod_log_write_unlock(fs_info
);
812 tree_mod_log_eb_move(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
813 int dst_offset
, int src_offset
, int nr_items
)
816 ret
= tree_mod_log_insert_move(fs_info
, dst
, dst_offset
, src_offset
,
822 tree_mod_log_set_node_key(struct btrfs_fs_info
*fs_info
,
823 struct extent_buffer
*eb
, int slot
, int atomic
)
827 ret
= tree_mod_log_insert_key_mask(fs_info
, eb
, slot
,
829 atomic
? GFP_ATOMIC
: GFP_NOFS
);
834 tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
)
836 if (tree_mod_dont_log(fs_info
, eb
))
839 __tree_mod_log_free_eb(fs_info
, eb
);
841 tree_mod_log_write_unlock(fs_info
);
845 tree_mod_log_set_root_pointer(struct btrfs_root
*root
,
846 struct extent_buffer
*new_root_node
,
850 ret
= tree_mod_log_insert_root(root
->fs_info
, root
->node
,
851 new_root_node
, GFP_NOFS
, log_removal
);
856 * check if the tree block can be shared by multiple trees
858 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
859 struct extent_buffer
*buf
)
862 * Tree blocks not in refernece counted trees and tree roots
863 * are never shared. If a block was allocated after the last
864 * snapshot and the block was not allocated by tree relocation,
865 * we know the block is not shared.
867 if (root
->ref_cows
&&
868 buf
!= root
->node
&& buf
!= root
->commit_root
&&
869 (btrfs_header_generation(buf
) <=
870 btrfs_root_last_snapshot(&root
->root_item
) ||
871 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
873 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
874 if (root
->ref_cows
&&
875 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
881 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
882 struct btrfs_root
*root
,
883 struct extent_buffer
*buf
,
884 struct extent_buffer
*cow
,
894 * Backrefs update rules:
896 * Always use full backrefs for extent pointers in tree block
897 * allocated by tree relocation.
899 * If a shared tree block is no longer referenced by its owner
900 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
901 * use full backrefs for extent pointers in tree block.
903 * If a tree block is been relocating
904 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
905 * use full backrefs for extent pointers in tree block.
906 * The reason for this is some operations (such as drop tree)
907 * are only allowed for blocks use full backrefs.
910 if (btrfs_block_can_be_shared(root
, buf
)) {
911 ret
= btrfs_lookup_extent_info(trans
, root
, buf
->start
,
912 btrfs_header_level(buf
), 1,
918 btrfs_std_error(root
->fs_info
, ret
);
923 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
924 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
925 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
930 owner
= btrfs_header_owner(buf
);
931 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
932 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
935 if ((owner
== root
->root_key
.objectid
||
936 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
937 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
938 ret
= btrfs_inc_ref(trans
, root
, buf
, 1, 1);
939 BUG_ON(ret
); /* -ENOMEM */
941 if (root
->root_key
.objectid
==
942 BTRFS_TREE_RELOC_OBJECTID
) {
943 ret
= btrfs_dec_ref(trans
, root
, buf
, 0, 1);
944 BUG_ON(ret
); /* -ENOMEM */
945 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
946 BUG_ON(ret
); /* -ENOMEM */
948 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
951 if (root
->root_key
.objectid
==
952 BTRFS_TREE_RELOC_OBJECTID
)
953 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
955 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
956 BUG_ON(ret
); /* -ENOMEM */
958 if (new_flags
!= 0) {
959 ret
= btrfs_set_disk_extent_flags(trans
, root
,
967 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
968 if (root
->root_key
.objectid
==
969 BTRFS_TREE_RELOC_OBJECTID
)
970 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
972 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
973 BUG_ON(ret
); /* -ENOMEM */
974 ret
= btrfs_dec_ref(trans
, root
, buf
, 1, 1);
975 BUG_ON(ret
); /* -ENOMEM */
977 clean_tree_block(trans
, root
, buf
);
984 * does the dirty work in cow of a single block. The parent block (if
985 * supplied) is updated to point to the new cow copy. The new buffer is marked
986 * dirty and returned locked. If you modify the block it needs to be marked
989 * search_start -- an allocation hint for the new block
991 * empty_size -- a hint that you plan on doing more cow. This is the size in
992 * bytes the allocator should try to find free next to the block it returns.
993 * This is just a hint and may be ignored by the allocator.
995 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
996 struct btrfs_root
*root
,
997 struct extent_buffer
*buf
,
998 struct extent_buffer
*parent
, int parent_slot
,
999 struct extent_buffer
**cow_ret
,
1000 u64 search_start
, u64 empty_size
)
1002 struct btrfs_disk_key disk_key
;
1003 struct extent_buffer
*cow
;
1006 int unlock_orig
= 0;
1009 if (*cow_ret
== buf
)
1012 btrfs_assert_tree_locked(buf
);
1014 WARN_ON(root
->ref_cows
&& trans
->transid
!=
1015 root
->fs_info
->running_transaction
->transid
);
1016 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
1018 level
= btrfs_header_level(buf
);
1021 btrfs_item_key(buf
, &disk_key
, 0);
1023 btrfs_node_key(buf
, &disk_key
, 0);
1025 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
1027 parent_start
= parent
->start
;
1033 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, parent_start
,
1034 root
->root_key
.objectid
, &disk_key
,
1035 level
, search_start
, empty_size
);
1037 return PTR_ERR(cow
);
1039 /* cow is set to blocking by btrfs_init_new_buffer */
1041 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
1042 btrfs_set_header_bytenr(cow
, cow
->start
);
1043 btrfs_set_header_generation(cow
, trans
->transid
);
1044 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1045 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
1046 BTRFS_HEADER_FLAG_RELOC
);
1047 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1048 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
1050 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
1052 write_extent_buffer(cow
, root
->fs_info
->fsid
,
1053 (unsigned long)btrfs_header_fsid(cow
),
1056 ret
= update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
1058 btrfs_abort_transaction(trans
, root
, ret
);
1063 btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
1065 if (buf
== root
->node
) {
1066 WARN_ON(parent
&& parent
!= buf
);
1067 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1068 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1069 parent_start
= buf
->start
;
1073 extent_buffer_get(cow
);
1074 tree_mod_log_set_root_pointer(root
, cow
, 1);
1075 rcu_assign_pointer(root
->node
, cow
);
1077 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1079 free_extent_buffer(buf
);
1080 add_root_to_dirty_list(root
);
1082 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1083 parent_start
= parent
->start
;
1087 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
1088 tree_mod_log_insert_key(root
->fs_info
, parent
, parent_slot
,
1089 MOD_LOG_KEY_REPLACE
);
1090 btrfs_set_node_blockptr(parent
, parent_slot
,
1092 btrfs_set_node_ptr_generation(parent
, parent_slot
,
1094 btrfs_mark_buffer_dirty(parent
);
1095 tree_mod_log_free_eb(root
->fs_info
, buf
);
1096 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1100 btrfs_tree_unlock(buf
);
1101 free_extent_buffer_stale(buf
);
1102 btrfs_mark_buffer_dirty(cow
);
1108 * returns the logical address of the oldest predecessor of the given root.
1109 * entries older than time_seq are ignored.
1111 static struct tree_mod_elem
*
1112 __tree_mod_log_oldest_root(struct btrfs_fs_info
*fs_info
,
1113 struct extent_buffer
*eb_root
, u64 time_seq
)
1115 struct tree_mod_elem
*tm
;
1116 struct tree_mod_elem
*found
= NULL
;
1117 u64 root_logical
= eb_root
->start
;
1124 * the very last operation that's logged for a root is the replacement
1125 * operation (if it is replaced at all). this has the index of the *new*
1126 * root, making it the very first operation that's logged for this root.
1129 tm
= tree_mod_log_search_oldest(fs_info
, root_logical
,
1134 * if there are no tree operation for the oldest root, we simply
1135 * return it. this should only happen if that (old) root is at
1142 * if there's an operation that's not a root replacement, we
1143 * found the oldest version of our root. normally, we'll find a
1144 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1146 if (tm
->op
!= MOD_LOG_ROOT_REPLACE
)
1150 root_logical
= tm
->old_root
.logical
;
1154 /* if there's no old root to return, return what we found instead */
1162 * tm is a pointer to the first operation to rewind within eb. then, all
1163 * previous operations will be rewinded (until we reach something older than
1167 __tree_mod_log_rewind(struct extent_buffer
*eb
, u64 time_seq
,
1168 struct tree_mod_elem
*first_tm
)
1171 struct rb_node
*next
;
1172 struct tree_mod_elem
*tm
= first_tm
;
1173 unsigned long o_dst
;
1174 unsigned long o_src
;
1175 unsigned long p_size
= sizeof(struct btrfs_key_ptr
);
1177 n
= btrfs_header_nritems(eb
);
1178 while (tm
&& tm
->seq
>= time_seq
) {
1180 * all the operations are recorded with the operator used for
1181 * the modification. as we're going backwards, we do the
1182 * opposite of each operation here.
1185 case MOD_LOG_KEY_REMOVE_WHILE_FREEING
:
1186 BUG_ON(tm
->slot
< n
);
1188 case MOD_LOG_KEY_REMOVE_WHILE_MOVING
:
1189 case MOD_LOG_KEY_REMOVE
:
1190 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1191 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1192 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1196 case MOD_LOG_KEY_REPLACE
:
1197 BUG_ON(tm
->slot
>= n
);
1198 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1199 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1200 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1203 case MOD_LOG_KEY_ADD
:
1204 /* if a move operation is needed it's in the log */
1207 case MOD_LOG_MOVE_KEYS
:
1208 o_dst
= btrfs_node_key_ptr_offset(tm
->slot
);
1209 o_src
= btrfs_node_key_ptr_offset(tm
->move
.dst_slot
);
1210 memmove_extent_buffer(eb
, o_dst
, o_src
,
1211 tm
->move
.nr_items
* p_size
);
1213 case MOD_LOG_ROOT_REPLACE
:
1215 * this operation is special. for roots, this must be
1216 * handled explicitly before rewinding.
1217 * for non-roots, this operation may exist if the node
1218 * was a root: root A -> child B; then A gets empty and
1219 * B is promoted to the new root. in the mod log, we'll
1220 * have a root-replace operation for B, a tree block
1221 * that is no root. we simply ignore that operation.
1225 next
= rb_next(&tm
->node
);
1228 tm
= container_of(next
, struct tree_mod_elem
, node
);
1229 if (tm
->index
!= first_tm
->index
)
1232 btrfs_set_header_nritems(eb
, n
);
1236 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1237 * is returned. If rewind operations happen, a fresh buffer is returned. The
1238 * returned buffer is always read-locked. If the returned buffer is not the
1239 * input buffer, the lock on the input buffer is released and the input buffer
1240 * is freed (its refcount is decremented).
1242 static struct extent_buffer
*
1243 tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
1246 struct extent_buffer
*eb_rewin
;
1247 struct tree_mod_elem
*tm
;
1252 if (btrfs_header_level(eb
) == 0)
1255 tm
= tree_mod_log_search(fs_info
, eb
->start
, time_seq
);
1259 if (tm
->op
== MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1260 BUG_ON(tm
->slot
!= 0);
1261 eb_rewin
= alloc_dummy_extent_buffer(eb
->start
,
1262 fs_info
->tree_root
->nodesize
);
1264 btrfs_set_header_bytenr(eb_rewin
, eb
->start
);
1265 btrfs_set_header_backref_rev(eb_rewin
,
1266 btrfs_header_backref_rev(eb
));
1267 btrfs_set_header_owner(eb_rewin
, btrfs_header_owner(eb
));
1268 btrfs_set_header_level(eb_rewin
, btrfs_header_level(eb
));
1270 eb_rewin
= btrfs_clone_extent_buffer(eb
);
1274 extent_buffer_get(eb_rewin
);
1275 btrfs_tree_read_unlock(eb
);
1276 free_extent_buffer(eb
);
1278 extent_buffer_get(eb_rewin
);
1279 btrfs_tree_read_lock(eb_rewin
);
1280 __tree_mod_log_rewind(eb_rewin
, time_seq
, tm
);
1281 WARN_ON(btrfs_header_nritems(eb_rewin
) >
1282 BTRFS_NODEPTRS_PER_BLOCK(fs_info
->tree_root
));
1288 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1289 * value. If there are no changes, the current root->root_node is returned. If
1290 * anything changed in between, there's a fresh buffer allocated on which the
1291 * rewind operations are done. In any case, the returned buffer is read locked.
1292 * Returns NULL on error (with no locks held).
1294 static inline struct extent_buffer
*
1295 get_old_root(struct btrfs_root
*root
, u64 time_seq
)
1297 struct tree_mod_elem
*tm
;
1298 struct extent_buffer
*eb
= NULL
;
1299 struct extent_buffer
*eb_root
;
1300 struct extent_buffer
*old
;
1301 struct tree_mod_root
*old_root
= NULL
;
1302 u64 old_generation
= 0;
1306 eb_root
= btrfs_read_lock_root_node(root
);
1307 tm
= __tree_mod_log_oldest_root(root
->fs_info
, eb_root
, time_seq
);
1311 if (tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1312 old_root
= &tm
->old_root
;
1313 old_generation
= tm
->generation
;
1314 logical
= old_root
->logical
;
1316 logical
= eb_root
->start
;
1319 tm
= tree_mod_log_search(root
->fs_info
, logical
, time_seq
);
1320 if (old_root
&& tm
&& tm
->op
!= MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1321 btrfs_tree_read_unlock(eb_root
);
1322 free_extent_buffer(eb_root
);
1323 blocksize
= btrfs_level_size(root
, old_root
->level
);
1324 old
= read_tree_block(root
, logical
, blocksize
, 0);
1325 if (!old
|| !extent_buffer_uptodate(old
)) {
1326 free_extent_buffer(old
);
1327 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1331 eb
= btrfs_clone_extent_buffer(old
);
1332 free_extent_buffer(old
);
1334 } else if (old_root
) {
1335 btrfs_tree_read_unlock(eb_root
);
1336 free_extent_buffer(eb_root
);
1337 eb
= alloc_dummy_extent_buffer(logical
, root
->nodesize
);
1339 eb
= btrfs_clone_extent_buffer(eb_root
);
1340 btrfs_tree_read_unlock(eb_root
);
1341 free_extent_buffer(eb_root
);
1346 extent_buffer_get(eb
);
1347 btrfs_tree_read_lock(eb
);
1349 btrfs_set_header_bytenr(eb
, eb
->start
);
1350 btrfs_set_header_backref_rev(eb
, BTRFS_MIXED_BACKREF_REV
);
1351 btrfs_set_header_owner(eb
, btrfs_header_owner(eb_root
));
1352 btrfs_set_header_level(eb
, old_root
->level
);
1353 btrfs_set_header_generation(eb
, old_generation
);
1356 __tree_mod_log_rewind(eb
, time_seq
, tm
);
1358 WARN_ON(btrfs_header_level(eb
) != 0);
1359 WARN_ON(btrfs_header_nritems(eb
) > BTRFS_NODEPTRS_PER_BLOCK(root
));
1364 int btrfs_old_root_level(struct btrfs_root
*root
, u64 time_seq
)
1366 struct tree_mod_elem
*tm
;
1368 struct extent_buffer
*eb_root
= btrfs_root_node(root
);
1370 tm
= __tree_mod_log_oldest_root(root
->fs_info
, eb_root
, time_seq
);
1371 if (tm
&& tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1372 level
= tm
->old_root
.level
;
1374 level
= btrfs_header_level(eb_root
);
1376 free_extent_buffer(eb_root
);
1381 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
1382 struct btrfs_root
*root
,
1383 struct extent_buffer
*buf
)
1385 /* ensure we can see the force_cow */
1389 * We do not need to cow a block if
1390 * 1) this block is not created or changed in this transaction;
1391 * 2) this block does not belong to TREE_RELOC tree;
1392 * 3) the root is not forced COW.
1394 * What is forced COW:
1395 * when we create snapshot during commiting the transaction,
1396 * after we've finished coping src root, we must COW the shared
1397 * block to ensure the metadata consistency.
1399 if (btrfs_header_generation(buf
) == trans
->transid
&&
1400 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
1401 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
1402 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
1409 * cows a single block, see __btrfs_cow_block for the real work.
1410 * This version of it has extra checks so that a block isn't cow'd more than
1411 * once per transaction, as long as it hasn't been written yet
1413 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1414 struct btrfs_root
*root
, struct extent_buffer
*buf
,
1415 struct extent_buffer
*parent
, int parent_slot
,
1416 struct extent_buffer
**cow_ret
)
1421 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
1422 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1423 (unsigned long long)trans
->transid
,
1424 (unsigned long long)
1425 root
->fs_info
->running_transaction
->transid
);
1427 if (trans
->transid
!= root
->fs_info
->generation
)
1428 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1429 (unsigned long long)trans
->transid
,
1430 (unsigned long long)root
->fs_info
->generation
);
1432 if (!should_cow_block(trans
, root
, buf
)) {
1437 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
1440 btrfs_set_lock_blocking(parent
);
1441 btrfs_set_lock_blocking(buf
);
1443 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
1444 parent_slot
, cow_ret
, search_start
, 0);
1446 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
1452 * helper function for defrag to decide if two blocks pointed to by a
1453 * node are actually close by
1455 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
1457 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
1459 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
1465 * compare two keys in a memcmp fashion
1467 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
1469 struct btrfs_key k1
;
1471 btrfs_disk_key_to_cpu(&k1
, disk
);
1473 return btrfs_comp_cpu_keys(&k1
, k2
);
1477 * same as comp_keys only with two btrfs_key's
1479 int btrfs_comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
1481 if (k1
->objectid
> k2
->objectid
)
1483 if (k1
->objectid
< k2
->objectid
)
1485 if (k1
->type
> k2
->type
)
1487 if (k1
->type
< k2
->type
)
1489 if (k1
->offset
> k2
->offset
)
1491 if (k1
->offset
< k2
->offset
)
1497 * this is used by the defrag code to go through all the
1498 * leaves pointed to by a node and reallocate them so that
1499 * disk order is close to key order
1501 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
1502 struct btrfs_root
*root
, struct extent_buffer
*parent
,
1503 int start_slot
, u64
*last_ret
,
1504 struct btrfs_key
*progress
)
1506 struct extent_buffer
*cur
;
1509 u64 search_start
= *last_ret
;
1519 int progress_passed
= 0;
1520 struct btrfs_disk_key disk_key
;
1522 parent_level
= btrfs_header_level(parent
);
1524 WARN_ON(trans
->transaction
!= root
->fs_info
->running_transaction
);
1525 WARN_ON(trans
->transid
!= root
->fs_info
->generation
);
1527 parent_nritems
= btrfs_header_nritems(parent
);
1528 blocksize
= btrfs_level_size(root
, parent_level
- 1);
1529 end_slot
= parent_nritems
;
1531 if (parent_nritems
== 1)
1534 btrfs_set_lock_blocking(parent
);
1536 for (i
= start_slot
; i
< end_slot
; i
++) {
1539 btrfs_node_key(parent
, &disk_key
, i
);
1540 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
1543 progress_passed
= 1;
1544 blocknr
= btrfs_node_blockptr(parent
, i
);
1545 gen
= btrfs_node_ptr_generation(parent
, i
);
1546 if (last_block
== 0)
1547 last_block
= blocknr
;
1550 other
= btrfs_node_blockptr(parent
, i
- 1);
1551 close
= close_blocks(blocknr
, other
, blocksize
);
1553 if (!close
&& i
< end_slot
- 2) {
1554 other
= btrfs_node_blockptr(parent
, i
+ 1);
1555 close
= close_blocks(blocknr
, other
, blocksize
);
1558 last_block
= blocknr
;
1562 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1564 uptodate
= btrfs_buffer_uptodate(cur
, gen
, 0);
1567 if (!cur
|| !uptodate
) {
1569 cur
= read_tree_block(root
, blocknr
,
1571 if (!cur
|| !extent_buffer_uptodate(cur
)) {
1572 free_extent_buffer(cur
);
1575 } else if (!uptodate
) {
1576 err
= btrfs_read_buffer(cur
, gen
);
1578 free_extent_buffer(cur
);
1583 if (search_start
== 0)
1584 search_start
= last_block
;
1586 btrfs_tree_lock(cur
);
1587 btrfs_set_lock_blocking(cur
);
1588 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
1591 (end_slot
- i
) * blocksize
));
1593 btrfs_tree_unlock(cur
);
1594 free_extent_buffer(cur
);
1597 search_start
= cur
->start
;
1598 last_block
= cur
->start
;
1599 *last_ret
= search_start
;
1600 btrfs_tree_unlock(cur
);
1601 free_extent_buffer(cur
);
1607 * The leaf data grows from end-to-front in the node.
1608 * this returns the address of the start of the last item,
1609 * which is the stop of the leaf data stack
1611 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
1612 struct extent_buffer
*leaf
)
1614 u32 nr
= btrfs_header_nritems(leaf
);
1616 return BTRFS_LEAF_DATA_SIZE(root
);
1617 return btrfs_item_offset_nr(leaf
, nr
- 1);
1622 * search for key in the extent_buffer. The items start at offset p,
1623 * and they are item_size apart. There are 'max' items in p.
1625 * the slot in the array is returned via slot, and it points to
1626 * the place where you would insert key if it is not found in
1629 * slot may point to max if the key is bigger than all of the keys
1631 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
1633 int item_size
, struct btrfs_key
*key
,
1640 struct btrfs_disk_key
*tmp
= NULL
;
1641 struct btrfs_disk_key unaligned
;
1642 unsigned long offset
;
1644 unsigned long map_start
= 0;
1645 unsigned long map_len
= 0;
1648 while (low
< high
) {
1649 mid
= (low
+ high
) / 2;
1650 offset
= p
+ mid
* item_size
;
1652 if (!kaddr
|| offset
< map_start
||
1653 (offset
+ sizeof(struct btrfs_disk_key
)) >
1654 map_start
+ map_len
) {
1656 err
= map_private_extent_buffer(eb
, offset
,
1657 sizeof(struct btrfs_disk_key
),
1658 &kaddr
, &map_start
, &map_len
);
1661 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1664 read_extent_buffer(eb
, &unaligned
,
1665 offset
, sizeof(unaligned
));
1670 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1673 ret
= comp_keys(tmp
, key
);
1689 * simple bin_search frontend that does the right thing for
1692 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1693 int level
, int *slot
)
1696 return generic_bin_search(eb
,
1697 offsetof(struct btrfs_leaf
, items
),
1698 sizeof(struct btrfs_item
),
1699 key
, btrfs_header_nritems(eb
),
1702 return generic_bin_search(eb
,
1703 offsetof(struct btrfs_node
, ptrs
),
1704 sizeof(struct btrfs_key_ptr
),
1705 key
, btrfs_header_nritems(eb
),
1709 int btrfs_bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1710 int level
, int *slot
)
1712 return bin_search(eb
, key
, level
, slot
);
1715 static void root_add_used(struct btrfs_root
*root
, u32 size
)
1717 spin_lock(&root
->accounting_lock
);
1718 btrfs_set_root_used(&root
->root_item
,
1719 btrfs_root_used(&root
->root_item
) + size
);
1720 spin_unlock(&root
->accounting_lock
);
1723 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
1725 spin_lock(&root
->accounting_lock
);
1726 btrfs_set_root_used(&root
->root_item
,
1727 btrfs_root_used(&root
->root_item
) - size
);
1728 spin_unlock(&root
->accounting_lock
);
1731 /* given a node and slot number, this reads the blocks it points to. The
1732 * extent buffer is returned with a reference taken (but unlocked).
1733 * NULL is returned on error.
1735 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
1736 struct extent_buffer
*parent
, int slot
)
1738 int level
= btrfs_header_level(parent
);
1739 struct extent_buffer
*eb
;
1743 if (slot
>= btrfs_header_nritems(parent
))
1748 eb
= read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
1749 btrfs_level_size(root
, level
- 1),
1750 btrfs_node_ptr_generation(parent
, slot
));
1751 if (eb
&& !extent_buffer_uptodate(eb
)) {
1752 free_extent_buffer(eb
);
1760 * node level balancing, used to make sure nodes are in proper order for
1761 * item deletion. We balance from the top down, so we have to make sure
1762 * that a deletion won't leave an node completely empty later on.
1764 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
1765 struct btrfs_root
*root
,
1766 struct btrfs_path
*path
, int level
)
1768 struct extent_buffer
*right
= NULL
;
1769 struct extent_buffer
*mid
;
1770 struct extent_buffer
*left
= NULL
;
1771 struct extent_buffer
*parent
= NULL
;
1775 int orig_slot
= path
->slots
[level
];
1781 mid
= path
->nodes
[level
];
1783 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
1784 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
1785 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1787 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1789 if (level
< BTRFS_MAX_LEVEL
- 1) {
1790 parent
= path
->nodes
[level
+ 1];
1791 pslot
= path
->slots
[level
+ 1];
1795 * deal with the case where there is only one pointer in the root
1796 * by promoting the node below to a root
1799 struct extent_buffer
*child
;
1801 if (btrfs_header_nritems(mid
) != 1)
1804 /* promote the child to a root */
1805 child
= read_node_slot(root
, mid
, 0);
1808 btrfs_std_error(root
->fs_info
, ret
);
1812 btrfs_tree_lock(child
);
1813 btrfs_set_lock_blocking(child
);
1814 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1816 btrfs_tree_unlock(child
);
1817 free_extent_buffer(child
);
1821 tree_mod_log_set_root_pointer(root
, child
, 1);
1822 rcu_assign_pointer(root
->node
, child
);
1824 add_root_to_dirty_list(root
);
1825 btrfs_tree_unlock(child
);
1827 path
->locks
[level
] = 0;
1828 path
->nodes
[level
] = NULL
;
1829 clean_tree_block(trans
, root
, mid
);
1830 btrfs_tree_unlock(mid
);
1831 /* once for the path */
1832 free_extent_buffer(mid
);
1834 root_sub_used(root
, mid
->len
);
1835 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1836 /* once for the root ptr */
1837 free_extent_buffer_stale(mid
);
1840 if (btrfs_header_nritems(mid
) >
1841 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
1844 left
= read_node_slot(root
, parent
, pslot
- 1);
1846 btrfs_tree_lock(left
);
1847 btrfs_set_lock_blocking(left
);
1848 wret
= btrfs_cow_block(trans
, root
, left
,
1849 parent
, pslot
- 1, &left
);
1855 right
= read_node_slot(root
, parent
, pslot
+ 1);
1857 btrfs_tree_lock(right
);
1858 btrfs_set_lock_blocking(right
);
1859 wret
= btrfs_cow_block(trans
, root
, right
,
1860 parent
, pslot
+ 1, &right
);
1867 /* first, try to make some room in the middle buffer */
1869 orig_slot
+= btrfs_header_nritems(left
);
1870 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1876 * then try to empty the right most buffer into the middle
1879 wret
= push_node_left(trans
, root
, mid
, right
, 1);
1880 if (wret
< 0 && wret
!= -ENOSPC
)
1882 if (btrfs_header_nritems(right
) == 0) {
1883 clean_tree_block(trans
, root
, right
);
1884 btrfs_tree_unlock(right
);
1885 del_ptr(root
, path
, level
+ 1, pslot
+ 1);
1886 root_sub_used(root
, right
->len
);
1887 btrfs_free_tree_block(trans
, root
, right
, 0, 1);
1888 free_extent_buffer_stale(right
);
1891 struct btrfs_disk_key right_key
;
1892 btrfs_node_key(right
, &right_key
, 0);
1893 tree_mod_log_set_node_key(root
->fs_info
, parent
,
1895 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1896 btrfs_mark_buffer_dirty(parent
);
1899 if (btrfs_header_nritems(mid
) == 1) {
1901 * we're not allowed to leave a node with one item in the
1902 * tree during a delete. A deletion from lower in the tree
1903 * could try to delete the only pointer in this node.
1904 * So, pull some keys from the left.
1905 * There has to be a left pointer at this point because
1906 * otherwise we would have pulled some pointers from the
1911 btrfs_std_error(root
->fs_info
, ret
);
1914 wret
= balance_node_right(trans
, root
, mid
, left
);
1920 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1926 if (btrfs_header_nritems(mid
) == 0) {
1927 clean_tree_block(trans
, root
, mid
);
1928 btrfs_tree_unlock(mid
);
1929 del_ptr(root
, path
, level
+ 1, pslot
);
1930 root_sub_used(root
, mid
->len
);
1931 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1932 free_extent_buffer_stale(mid
);
1935 /* update the parent key to reflect our changes */
1936 struct btrfs_disk_key mid_key
;
1937 btrfs_node_key(mid
, &mid_key
, 0);
1938 tree_mod_log_set_node_key(root
->fs_info
, parent
,
1940 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1941 btrfs_mark_buffer_dirty(parent
);
1944 /* update the path */
1946 if (btrfs_header_nritems(left
) > orig_slot
) {
1947 extent_buffer_get(left
);
1948 /* left was locked after cow */
1949 path
->nodes
[level
] = left
;
1950 path
->slots
[level
+ 1] -= 1;
1951 path
->slots
[level
] = orig_slot
;
1953 btrfs_tree_unlock(mid
);
1954 free_extent_buffer(mid
);
1957 orig_slot
-= btrfs_header_nritems(left
);
1958 path
->slots
[level
] = orig_slot
;
1961 /* double check we haven't messed things up */
1963 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1967 btrfs_tree_unlock(right
);
1968 free_extent_buffer(right
);
1971 if (path
->nodes
[level
] != left
)
1972 btrfs_tree_unlock(left
);
1973 free_extent_buffer(left
);
1978 /* Node balancing for insertion. Here we only split or push nodes around
1979 * when they are completely full. This is also done top down, so we
1980 * have to be pessimistic.
1982 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
1983 struct btrfs_root
*root
,
1984 struct btrfs_path
*path
, int level
)
1986 struct extent_buffer
*right
= NULL
;
1987 struct extent_buffer
*mid
;
1988 struct extent_buffer
*left
= NULL
;
1989 struct extent_buffer
*parent
= NULL
;
1993 int orig_slot
= path
->slots
[level
];
1998 mid
= path
->nodes
[level
];
1999 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
2001 if (level
< BTRFS_MAX_LEVEL
- 1) {
2002 parent
= path
->nodes
[level
+ 1];
2003 pslot
= path
->slots
[level
+ 1];
2009 left
= read_node_slot(root
, parent
, pslot
- 1);
2011 /* first, try to make some room in the middle buffer */
2015 btrfs_tree_lock(left
);
2016 btrfs_set_lock_blocking(left
);
2018 left_nr
= btrfs_header_nritems(left
);
2019 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
2022 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
2027 wret
= push_node_left(trans
, root
,
2034 struct btrfs_disk_key disk_key
;
2035 orig_slot
+= left_nr
;
2036 btrfs_node_key(mid
, &disk_key
, 0);
2037 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2039 btrfs_set_node_key(parent
, &disk_key
, pslot
);
2040 btrfs_mark_buffer_dirty(parent
);
2041 if (btrfs_header_nritems(left
) > orig_slot
) {
2042 path
->nodes
[level
] = left
;
2043 path
->slots
[level
+ 1] -= 1;
2044 path
->slots
[level
] = orig_slot
;
2045 btrfs_tree_unlock(mid
);
2046 free_extent_buffer(mid
);
2049 btrfs_header_nritems(left
);
2050 path
->slots
[level
] = orig_slot
;
2051 btrfs_tree_unlock(left
);
2052 free_extent_buffer(left
);
2056 btrfs_tree_unlock(left
);
2057 free_extent_buffer(left
);
2059 right
= read_node_slot(root
, parent
, pslot
+ 1);
2062 * then try to empty the right most buffer into the middle
2067 btrfs_tree_lock(right
);
2068 btrfs_set_lock_blocking(right
);
2070 right_nr
= btrfs_header_nritems(right
);
2071 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
2074 ret
= btrfs_cow_block(trans
, root
, right
,
2080 wret
= balance_node_right(trans
, root
,
2087 struct btrfs_disk_key disk_key
;
2089 btrfs_node_key(right
, &disk_key
, 0);
2090 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2092 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
2093 btrfs_mark_buffer_dirty(parent
);
2095 if (btrfs_header_nritems(mid
) <= orig_slot
) {
2096 path
->nodes
[level
] = right
;
2097 path
->slots
[level
+ 1] += 1;
2098 path
->slots
[level
] = orig_slot
-
2099 btrfs_header_nritems(mid
);
2100 btrfs_tree_unlock(mid
);
2101 free_extent_buffer(mid
);
2103 btrfs_tree_unlock(right
);
2104 free_extent_buffer(right
);
2108 btrfs_tree_unlock(right
);
2109 free_extent_buffer(right
);
2115 * readahead one full node of leaves, finding things that are close
2116 * to the block in 'slot', and triggering ra on them.
2118 static void reada_for_search(struct btrfs_root
*root
,
2119 struct btrfs_path
*path
,
2120 int level
, int slot
, u64 objectid
)
2122 struct extent_buffer
*node
;
2123 struct btrfs_disk_key disk_key
;
2129 int direction
= path
->reada
;
2130 struct extent_buffer
*eb
;
2138 if (!path
->nodes
[level
])
2141 node
= path
->nodes
[level
];
2143 search
= btrfs_node_blockptr(node
, slot
);
2144 blocksize
= btrfs_level_size(root
, level
- 1);
2145 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
2147 free_extent_buffer(eb
);
2153 nritems
= btrfs_header_nritems(node
);
2157 if (direction
< 0) {
2161 } else if (direction
> 0) {
2166 if (path
->reada
< 0 && objectid
) {
2167 btrfs_node_key(node
, &disk_key
, nr
);
2168 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
2171 search
= btrfs_node_blockptr(node
, nr
);
2172 if ((search
<= target
&& target
- search
<= 65536) ||
2173 (search
> target
&& search
- target
<= 65536)) {
2174 gen
= btrfs_node_ptr_generation(node
, nr
);
2175 readahead_tree_block(root
, search
, blocksize
, gen
);
2179 if ((nread
> 65536 || nscan
> 32))
2185 * returns -EAGAIN if it had to drop the path, or zero if everything was in
2188 static noinline
int reada_for_balance(struct btrfs_root
*root
,
2189 struct btrfs_path
*path
, int level
)
2193 struct extent_buffer
*parent
;
2194 struct extent_buffer
*eb
;
2201 parent
= path
->nodes
[level
+ 1];
2205 nritems
= btrfs_header_nritems(parent
);
2206 slot
= path
->slots
[level
+ 1];
2207 blocksize
= btrfs_level_size(root
, level
);
2210 block1
= btrfs_node_blockptr(parent
, slot
- 1);
2211 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
2212 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
2214 * if we get -eagain from btrfs_buffer_uptodate, we
2215 * don't want to return eagain here. That will loop
2218 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2220 free_extent_buffer(eb
);
2222 if (slot
+ 1 < nritems
) {
2223 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
2224 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
2225 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
2226 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2228 free_extent_buffer(eb
);
2230 if (block1
|| block2
) {
2233 /* release the whole path */
2234 btrfs_release_path(path
);
2236 /* read the blocks */
2238 readahead_tree_block(root
, block1
, blocksize
, 0);
2240 readahead_tree_block(root
, block2
, blocksize
, 0);
2243 eb
= read_tree_block(root
, block1
, blocksize
, 0);
2244 free_extent_buffer(eb
);
2247 eb
= read_tree_block(root
, block2
, blocksize
, 0);
2248 free_extent_buffer(eb
);
2256 * when we walk down the tree, it is usually safe to unlock the higher layers
2257 * in the tree. The exceptions are when our path goes through slot 0, because
2258 * operations on the tree might require changing key pointers higher up in the
2261 * callers might also have set path->keep_locks, which tells this code to keep
2262 * the lock if the path points to the last slot in the block. This is part of
2263 * walking through the tree, and selecting the next slot in the higher block.
2265 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2266 * if lowest_unlock is 1, level 0 won't be unlocked
2268 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
2269 int lowest_unlock
, int min_write_lock_level
,
2270 int *write_lock_level
)
2273 int skip_level
= level
;
2275 struct extent_buffer
*t
;
2277 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2278 if (!path
->nodes
[i
])
2280 if (!path
->locks
[i
])
2282 if (!no_skips
&& path
->slots
[i
] == 0) {
2286 if (!no_skips
&& path
->keep_locks
) {
2289 nritems
= btrfs_header_nritems(t
);
2290 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
2295 if (skip_level
< i
&& i
>= lowest_unlock
)
2299 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
2300 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
2302 if (write_lock_level
&&
2303 i
> min_write_lock_level
&&
2304 i
<= *write_lock_level
) {
2305 *write_lock_level
= i
- 1;
2312 * This releases any locks held in the path starting at level and
2313 * going all the way up to the root.
2315 * btrfs_search_slot will keep the lock held on higher nodes in a few
2316 * corner cases, such as COW of the block at slot zero in the node. This
2317 * ignores those rules, and it should only be called when there are no
2318 * more updates to be done higher up in the tree.
2320 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
2324 if (path
->keep_locks
)
2327 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2328 if (!path
->nodes
[i
])
2330 if (!path
->locks
[i
])
2332 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
2338 * helper function for btrfs_search_slot. The goal is to find a block
2339 * in cache without setting the path to blocking. If we find the block
2340 * we return zero and the path is unchanged.
2342 * If we can't find the block, we set the path blocking and do some
2343 * reada. -EAGAIN is returned and the search must be repeated.
2346 read_block_for_search(struct btrfs_trans_handle
*trans
,
2347 struct btrfs_root
*root
, struct btrfs_path
*p
,
2348 struct extent_buffer
**eb_ret
, int level
, int slot
,
2349 struct btrfs_key
*key
, u64 time_seq
)
2354 struct extent_buffer
*b
= *eb_ret
;
2355 struct extent_buffer
*tmp
;
2358 blocknr
= btrfs_node_blockptr(b
, slot
);
2359 gen
= btrfs_node_ptr_generation(b
, slot
);
2360 blocksize
= btrfs_level_size(root
, level
- 1);
2362 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
2364 /* first we do an atomic uptodate check */
2365 if (btrfs_buffer_uptodate(tmp
, 0, 1) > 0) {
2366 if (btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
2368 * we found an up to date block without
2375 /* the pages were up to date, but we failed
2376 * the generation number check. Do a full
2377 * read for the generation number that is correct.
2378 * We must do this without dropping locks so
2379 * we can trust our generation number
2381 free_extent_buffer(tmp
);
2382 btrfs_set_path_blocking(p
);
2384 /* now we're allowed to do a blocking uptodate check */
2385 tmp
= read_tree_block(root
, blocknr
, blocksize
, gen
);
2386 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
, 0) > 0) {
2390 free_extent_buffer(tmp
);
2391 btrfs_release_path(p
);
2397 * reduce lock contention at high levels
2398 * of the btree by dropping locks before
2399 * we read. Don't release the lock on the current
2400 * level because we need to walk this node to figure
2401 * out which blocks to read.
2403 btrfs_unlock_up_safe(p
, level
+ 1);
2404 btrfs_set_path_blocking(p
);
2406 free_extent_buffer(tmp
);
2408 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
2410 btrfs_release_path(p
);
2413 tmp
= read_tree_block(root
, blocknr
, blocksize
, 0);
2416 * If the read above didn't mark this buffer up to date,
2417 * it will never end up being up to date. Set ret to EIO now
2418 * and give up so that our caller doesn't loop forever
2421 if (!btrfs_buffer_uptodate(tmp
, 0, 0))
2423 free_extent_buffer(tmp
);
2429 * helper function for btrfs_search_slot. This does all of the checks
2430 * for node-level blocks and does any balancing required based on
2433 * If no extra work was required, zero is returned. If we had to
2434 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2438 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
2439 struct btrfs_root
*root
, struct btrfs_path
*p
,
2440 struct extent_buffer
*b
, int level
, int ins_len
,
2441 int *write_lock_level
)
2444 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
2445 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
2448 if (*write_lock_level
< level
+ 1) {
2449 *write_lock_level
= level
+ 1;
2450 btrfs_release_path(p
);
2454 sret
= reada_for_balance(root
, p
, level
);
2458 btrfs_set_path_blocking(p
);
2459 sret
= split_node(trans
, root
, p
, level
);
2460 btrfs_clear_path_blocking(p
, NULL
, 0);
2467 b
= p
->nodes
[level
];
2468 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
2469 BTRFS_NODEPTRS_PER_BLOCK(root
) / 2) {
2472 if (*write_lock_level
< level
+ 1) {
2473 *write_lock_level
= level
+ 1;
2474 btrfs_release_path(p
);
2478 sret
= reada_for_balance(root
, p
, level
);
2482 btrfs_set_path_blocking(p
);
2483 sret
= balance_level(trans
, root
, p
, level
);
2484 btrfs_clear_path_blocking(p
, NULL
, 0);
2490 b
= p
->nodes
[level
];
2492 btrfs_release_path(p
);
2495 BUG_ON(btrfs_header_nritems(b
) == 1);
2506 * look for key in the tree. path is filled in with nodes along the way
2507 * if key is found, we return zero and you can find the item in the leaf
2508 * level of the path (level 0)
2510 * If the key isn't found, the path points to the slot where it should
2511 * be inserted, and 1 is returned. If there are other errors during the
2512 * search a negative error number is returned.
2514 * if ins_len > 0, nodes and leaves will be split as we walk down the
2515 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2518 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2519 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
2522 struct extent_buffer
*b
;
2527 int lowest_unlock
= 1;
2529 /* everything at write_lock_level or lower must be write locked */
2530 int write_lock_level
= 0;
2531 u8 lowest_level
= 0;
2532 int min_write_lock_level
;
2534 lowest_level
= p
->lowest_level
;
2535 WARN_ON(lowest_level
&& ins_len
> 0);
2536 WARN_ON(p
->nodes
[0] != NULL
);
2541 /* when we are removing items, we might have to go up to level
2542 * two as we update tree pointers Make sure we keep write
2543 * for those levels as well
2545 write_lock_level
= 2;
2546 } else if (ins_len
> 0) {
2548 * for inserting items, make sure we have a write lock on
2549 * level 1 so we can update keys
2551 write_lock_level
= 1;
2555 write_lock_level
= -1;
2557 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
2558 write_lock_level
= BTRFS_MAX_LEVEL
;
2560 min_write_lock_level
= write_lock_level
;
2564 * we try very hard to do read locks on the root
2566 root_lock
= BTRFS_READ_LOCK
;
2568 if (p
->search_commit_root
) {
2570 * the commit roots are read only
2571 * so we always do read locks
2573 b
= root
->commit_root
;
2574 extent_buffer_get(b
);
2575 level
= btrfs_header_level(b
);
2576 if (!p
->skip_locking
)
2577 btrfs_tree_read_lock(b
);
2579 if (p
->skip_locking
) {
2580 b
= btrfs_root_node(root
);
2581 level
= btrfs_header_level(b
);
2583 /* we don't know the level of the root node
2584 * until we actually have it read locked
2586 b
= btrfs_read_lock_root_node(root
);
2587 level
= btrfs_header_level(b
);
2588 if (level
<= write_lock_level
) {
2589 /* whoops, must trade for write lock */
2590 btrfs_tree_read_unlock(b
);
2591 free_extent_buffer(b
);
2592 b
= btrfs_lock_root_node(root
);
2593 root_lock
= BTRFS_WRITE_LOCK
;
2595 /* the level might have changed, check again */
2596 level
= btrfs_header_level(b
);
2600 p
->nodes
[level
] = b
;
2601 if (!p
->skip_locking
)
2602 p
->locks
[level
] = root_lock
;
2605 level
= btrfs_header_level(b
);
2608 * setup the path here so we can release it under lock
2609 * contention with the cow code
2613 * if we don't really need to cow this block
2614 * then we don't want to set the path blocking,
2615 * so we test it here
2617 if (!should_cow_block(trans
, root
, b
))
2620 btrfs_set_path_blocking(p
);
2623 * must have write locks on this node and the
2626 if (level
> write_lock_level
||
2627 (level
+ 1 > write_lock_level
&&
2628 level
+ 1 < BTRFS_MAX_LEVEL
&&
2629 p
->nodes
[level
+ 1])) {
2630 write_lock_level
= level
+ 1;
2631 btrfs_release_path(p
);
2635 err
= btrfs_cow_block(trans
, root
, b
,
2636 p
->nodes
[level
+ 1],
2637 p
->slots
[level
+ 1], &b
);
2644 BUG_ON(!cow
&& ins_len
);
2646 p
->nodes
[level
] = b
;
2647 btrfs_clear_path_blocking(p
, NULL
, 0);
2650 * we have a lock on b and as long as we aren't changing
2651 * the tree, there is no way to for the items in b to change.
2652 * It is safe to drop the lock on our parent before we
2653 * go through the expensive btree search on b.
2655 * If cow is true, then we might be changing slot zero,
2656 * which may require changing the parent. So, we can't
2657 * drop the lock until after we know which slot we're
2661 btrfs_unlock_up_safe(p
, level
+ 1);
2663 ret
= bin_search(b
, key
, level
, &slot
);
2667 if (ret
&& slot
> 0) {
2671 p
->slots
[level
] = slot
;
2672 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
2673 ins_len
, &write_lock_level
);
2680 b
= p
->nodes
[level
];
2681 slot
= p
->slots
[level
];
2684 * slot 0 is special, if we change the key
2685 * we have to update the parent pointer
2686 * which means we must have a write lock
2689 if (slot
== 0 && cow
&&
2690 write_lock_level
< level
+ 1) {
2691 write_lock_level
= level
+ 1;
2692 btrfs_release_path(p
);
2696 unlock_up(p
, level
, lowest_unlock
,
2697 min_write_lock_level
, &write_lock_level
);
2699 if (level
== lowest_level
) {
2705 err
= read_block_for_search(trans
, root
, p
,
2706 &b
, level
, slot
, key
, 0);
2714 if (!p
->skip_locking
) {
2715 level
= btrfs_header_level(b
);
2716 if (level
<= write_lock_level
) {
2717 err
= btrfs_try_tree_write_lock(b
);
2719 btrfs_set_path_blocking(p
);
2721 btrfs_clear_path_blocking(p
, b
,
2724 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
2726 err
= btrfs_try_tree_read_lock(b
);
2728 btrfs_set_path_blocking(p
);
2729 btrfs_tree_read_lock(b
);
2730 btrfs_clear_path_blocking(p
, b
,
2733 p
->locks
[level
] = BTRFS_READ_LOCK
;
2735 p
->nodes
[level
] = b
;
2738 p
->slots
[level
] = slot
;
2740 btrfs_leaf_free_space(root
, b
) < ins_len
) {
2741 if (write_lock_level
< 1) {
2742 write_lock_level
= 1;
2743 btrfs_release_path(p
);
2747 btrfs_set_path_blocking(p
);
2748 err
= split_leaf(trans
, root
, key
,
2749 p
, ins_len
, ret
== 0);
2750 btrfs_clear_path_blocking(p
, NULL
, 0);
2758 if (!p
->search_for_split
)
2759 unlock_up(p
, level
, lowest_unlock
,
2760 min_write_lock_level
, &write_lock_level
);
2767 * we don't really know what they plan on doing with the path
2768 * from here on, so for now just mark it as blocking
2770 if (!p
->leave_spinning
)
2771 btrfs_set_path_blocking(p
);
2773 btrfs_release_path(p
);
2778 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2779 * current state of the tree together with the operations recorded in the tree
2780 * modification log to search for the key in a previous version of this tree, as
2781 * denoted by the time_seq parameter.
2783 * Naturally, there is no support for insert, delete or cow operations.
2785 * The resulting path and return value will be set up as if we called
2786 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2788 int btrfs_search_old_slot(struct btrfs_root
*root
, struct btrfs_key
*key
,
2789 struct btrfs_path
*p
, u64 time_seq
)
2791 struct extent_buffer
*b
;
2796 int lowest_unlock
= 1;
2797 u8 lowest_level
= 0;
2799 lowest_level
= p
->lowest_level
;
2800 WARN_ON(p
->nodes
[0] != NULL
);
2802 if (p
->search_commit_root
) {
2804 return btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2808 b
= get_old_root(root
, time_seq
);
2809 level
= btrfs_header_level(b
);
2810 p
->locks
[level
] = BTRFS_READ_LOCK
;
2813 level
= btrfs_header_level(b
);
2814 p
->nodes
[level
] = b
;
2815 btrfs_clear_path_blocking(p
, NULL
, 0);
2818 * we have a lock on b and as long as we aren't changing
2819 * the tree, there is no way to for the items in b to change.
2820 * It is safe to drop the lock on our parent before we
2821 * go through the expensive btree search on b.
2823 btrfs_unlock_up_safe(p
, level
+ 1);
2825 ret
= bin_search(b
, key
, level
, &slot
);
2829 if (ret
&& slot
> 0) {
2833 p
->slots
[level
] = slot
;
2834 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2836 if (level
== lowest_level
) {
2842 err
= read_block_for_search(NULL
, root
, p
, &b
, level
,
2843 slot
, key
, time_seq
);
2851 level
= btrfs_header_level(b
);
2852 err
= btrfs_try_tree_read_lock(b
);
2854 btrfs_set_path_blocking(p
);
2855 btrfs_tree_read_lock(b
);
2856 btrfs_clear_path_blocking(p
, b
,
2859 b
= tree_mod_log_rewind(root
->fs_info
, b
, time_seq
);
2860 p
->locks
[level
] = BTRFS_READ_LOCK
;
2861 p
->nodes
[level
] = b
;
2863 p
->slots
[level
] = slot
;
2864 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
2870 if (!p
->leave_spinning
)
2871 btrfs_set_path_blocking(p
);
2873 btrfs_release_path(p
);
2879 * helper to use instead of search slot if no exact match is needed but
2880 * instead the next or previous item should be returned.
2881 * When find_higher is true, the next higher item is returned, the next lower
2883 * When return_any and find_higher are both true, and no higher item is found,
2884 * return the next lower instead.
2885 * When return_any is true and find_higher is false, and no lower item is found,
2886 * return the next higher instead.
2887 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2890 int btrfs_search_slot_for_read(struct btrfs_root
*root
,
2891 struct btrfs_key
*key
, struct btrfs_path
*p
,
2892 int find_higher
, int return_any
)
2895 struct extent_buffer
*leaf
;
2898 ret
= btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2902 * a return value of 1 means the path is at the position where the
2903 * item should be inserted. Normally this is the next bigger item,
2904 * but in case the previous item is the last in a leaf, path points
2905 * to the first free slot in the previous leaf, i.e. at an invalid
2911 if (p
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2912 ret
= btrfs_next_leaf(root
, p
);
2918 * no higher item found, return the next
2923 btrfs_release_path(p
);
2927 if (p
->slots
[0] == 0) {
2928 ret
= btrfs_prev_leaf(root
, p
);
2932 p
->slots
[0] = btrfs_header_nritems(leaf
) - 1;
2938 * no lower item found, return the next
2943 btrfs_release_path(p
);
2953 * adjust the pointers going up the tree, starting at level
2954 * making sure the right key of each node is points to 'key'.
2955 * This is used after shifting pointers to the left, so it stops
2956 * fixing up pointers when a given leaf/node is not in slot 0 of the
2960 static void fixup_low_keys(struct btrfs_root
*root
, struct btrfs_path
*path
,
2961 struct btrfs_disk_key
*key
, int level
)
2964 struct extent_buffer
*t
;
2966 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2967 int tslot
= path
->slots
[i
];
2968 if (!path
->nodes
[i
])
2971 tree_mod_log_set_node_key(root
->fs_info
, t
, tslot
, 1);
2972 btrfs_set_node_key(t
, key
, tslot
);
2973 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
2982 * This function isn't completely safe. It's the caller's responsibility
2983 * that the new key won't break the order
2985 void btrfs_set_item_key_safe(struct btrfs_root
*root
, struct btrfs_path
*path
,
2986 struct btrfs_key
*new_key
)
2988 struct btrfs_disk_key disk_key
;
2989 struct extent_buffer
*eb
;
2992 eb
= path
->nodes
[0];
2993 slot
= path
->slots
[0];
2995 btrfs_item_key(eb
, &disk_key
, slot
- 1);
2996 BUG_ON(comp_keys(&disk_key
, new_key
) >= 0);
2998 if (slot
< btrfs_header_nritems(eb
) - 1) {
2999 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
3000 BUG_ON(comp_keys(&disk_key
, new_key
) <= 0);
3003 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3004 btrfs_set_item_key(eb
, &disk_key
, slot
);
3005 btrfs_mark_buffer_dirty(eb
);
3007 fixup_low_keys(root
, path
, &disk_key
, 1);
3011 * try to push data from one node into the next node left in the
3014 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3015 * error, and > 0 if there was no room in the left hand block.
3017 static int push_node_left(struct btrfs_trans_handle
*trans
,
3018 struct btrfs_root
*root
, struct extent_buffer
*dst
,
3019 struct extent_buffer
*src
, int empty
)
3026 src_nritems
= btrfs_header_nritems(src
);
3027 dst_nritems
= btrfs_header_nritems(dst
);
3028 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
3029 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3030 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3032 if (!empty
&& src_nritems
<= 8)
3035 if (push_items
<= 0)
3039 push_items
= min(src_nritems
, push_items
);
3040 if (push_items
< src_nritems
) {
3041 /* leave at least 8 pointers in the node if
3042 * we aren't going to empty it
3044 if (src_nritems
- push_items
< 8) {
3045 if (push_items
<= 8)
3051 push_items
= min(src_nritems
- 8, push_items
);
3053 tree_mod_log_eb_copy(root
->fs_info
, dst
, src
, dst_nritems
, 0,
3055 copy_extent_buffer(dst
, src
,
3056 btrfs_node_key_ptr_offset(dst_nritems
),
3057 btrfs_node_key_ptr_offset(0),
3058 push_items
* sizeof(struct btrfs_key_ptr
));
3060 if (push_items
< src_nritems
) {
3062 * don't call tree_mod_log_eb_move here, key removal was already
3063 * fully logged by tree_mod_log_eb_copy above.
3065 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
3066 btrfs_node_key_ptr_offset(push_items
),
3067 (src_nritems
- push_items
) *
3068 sizeof(struct btrfs_key_ptr
));
3070 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3071 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3072 btrfs_mark_buffer_dirty(src
);
3073 btrfs_mark_buffer_dirty(dst
);
3079 * try to push data from one node into the next node right in the
3082 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3083 * error, and > 0 if there was no room in the right hand block.
3085 * this will only push up to 1/2 the contents of the left node over
3087 static int balance_node_right(struct btrfs_trans_handle
*trans
,
3088 struct btrfs_root
*root
,
3089 struct extent_buffer
*dst
,
3090 struct extent_buffer
*src
)
3098 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3099 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3101 src_nritems
= btrfs_header_nritems(src
);
3102 dst_nritems
= btrfs_header_nritems(dst
);
3103 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
3104 if (push_items
<= 0)
3107 if (src_nritems
< 4)
3110 max_push
= src_nritems
/ 2 + 1;
3111 /* don't try to empty the node */
3112 if (max_push
>= src_nritems
)
3115 if (max_push
< push_items
)
3116 push_items
= max_push
;
3118 tree_mod_log_eb_move(root
->fs_info
, dst
, push_items
, 0, dst_nritems
);
3119 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
3120 btrfs_node_key_ptr_offset(0),
3122 sizeof(struct btrfs_key_ptr
));
3124 tree_mod_log_eb_copy(root
->fs_info
, dst
, src
, 0,
3125 src_nritems
- push_items
, push_items
);
3126 copy_extent_buffer(dst
, src
,
3127 btrfs_node_key_ptr_offset(0),
3128 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
3129 push_items
* sizeof(struct btrfs_key_ptr
));
3131 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3132 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3134 btrfs_mark_buffer_dirty(src
);
3135 btrfs_mark_buffer_dirty(dst
);
3141 * helper function to insert a new root level in the tree.
3142 * A new node is allocated, and a single item is inserted to
3143 * point to the existing root
3145 * returns zero on success or < 0 on failure.
3147 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
3148 struct btrfs_root
*root
,
3149 struct btrfs_path
*path
, int level
, int log_removal
)
3152 struct extent_buffer
*lower
;
3153 struct extent_buffer
*c
;
3154 struct extent_buffer
*old
;
3155 struct btrfs_disk_key lower_key
;
3157 BUG_ON(path
->nodes
[level
]);
3158 BUG_ON(path
->nodes
[level
-1] != root
->node
);
3160 lower
= path
->nodes
[level
-1];
3162 btrfs_item_key(lower
, &lower_key
, 0);
3164 btrfs_node_key(lower
, &lower_key
, 0);
3166 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
3167 root
->root_key
.objectid
, &lower_key
,
3168 level
, root
->node
->start
, 0);
3172 root_add_used(root
, root
->nodesize
);
3174 memset_extent_buffer(c
, 0, 0, sizeof(struct btrfs_header
));
3175 btrfs_set_header_nritems(c
, 1);
3176 btrfs_set_header_level(c
, level
);
3177 btrfs_set_header_bytenr(c
, c
->start
);
3178 btrfs_set_header_generation(c
, trans
->transid
);
3179 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
3180 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
3182 write_extent_buffer(c
, root
->fs_info
->fsid
,
3183 (unsigned long)btrfs_header_fsid(c
),
3186 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
3187 (unsigned long)btrfs_header_chunk_tree_uuid(c
),
3190 btrfs_set_node_key(c
, &lower_key
, 0);
3191 btrfs_set_node_blockptr(c
, 0, lower
->start
);
3192 lower_gen
= btrfs_header_generation(lower
);
3193 WARN_ON(lower_gen
!= trans
->transid
);
3195 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
3197 btrfs_mark_buffer_dirty(c
);
3200 tree_mod_log_set_root_pointer(root
, c
, log_removal
);
3201 rcu_assign_pointer(root
->node
, c
);
3203 /* the super has an extra ref to root->node */
3204 free_extent_buffer(old
);
3206 add_root_to_dirty_list(root
);
3207 extent_buffer_get(c
);
3208 path
->nodes
[level
] = c
;
3209 path
->locks
[level
] = BTRFS_WRITE_LOCK
;
3210 path
->slots
[level
] = 0;
3215 * worker function to insert a single pointer in a node.
3216 * the node should have enough room for the pointer already
3218 * slot and level indicate where you want the key to go, and
3219 * blocknr is the block the key points to.
3221 static void insert_ptr(struct btrfs_trans_handle
*trans
,
3222 struct btrfs_root
*root
, struct btrfs_path
*path
,
3223 struct btrfs_disk_key
*key
, u64 bytenr
,
3224 int slot
, int level
)
3226 struct extent_buffer
*lower
;
3230 BUG_ON(!path
->nodes
[level
]);
3231 btrfs_assert_tree_locked(path
->nodes
[level
]);
3232 lower
= path
->nodes
[level
];
3233 nritems
= btrfs_header_nritems(lower
);
3234 BUG_ON(slot
> nritems
);
3235 BUG_ON(nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
));
3236 if (slot
!= nritems
) {
3238 tree_mod_log_eb_move(root
->fs_info
, lower
, slot
+ 1,
3239 slot
, nritems
- slot
);
3240 memmove_extent_buffer(lower
,
3241 btrfs_node_key_ptr_offset(slot
+ 1),
3242 btrfs_node_key_ptr_offset(slot
),
3243 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
3246 ret
= tree_mod_log_insert_key(root
->fs_info
, lower
, slot
,
3250 btrfs_set_node_key(lower
, key
, slot
);
3251 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
3252 WARN_ON(trans
->transid
== 0);
3253 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
3254 btrfs_set_header_nritems(lower
, nritems
+ 1);
3255 btrfs_mark_buffer_dirty(lower
);
3259 * split the node at the specified level in path in two.
3260 * The path is corrected to point to the appropriate node after the split
3262 * Before splitting this tries to make some room in the node by pushing
3263 * left and right, if either one works, it returns right away.
3265 * returns 0 on success and < 0 on failure
3267 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
3268 struct btrfs_root
*root
,
3269 struct btrfs_path
*path
, int level
)
3271 struct extent_buffer
*c
;
3272 struct extent_buffer
*split
;
3273 struct btrfs_disk_key disk_key
;
3278 c
= path
->nodes
[level
];
3279 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
3280 if (c
== root
->node
) {
3282 * trying to split the root, lets make a new one
3284 * tree mod log: We pass 0 as log_removal parameter to
3285 * insert_new_root, because that root buffer will be kept as a
3286 * normal node. We are going to log removal of half of the
3287 * elements below with tree_mod_log_eb_copy. We're holding a
3288 * tree lock on the buffer, which is why we cannot race with
3289 * other tree_mod_log users.
3291 ret
= insert_new_root(trans
, root
, path
, level
+ 1, 0);
3295 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
3296 c
= path
->nodes
[level
];
3297 if (!ret
&& btrfs_header_nritems(c
) <
3298 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
3304 c_nritems
= btrfs_header_nritems(c
);
3305 mid
= (c_nritems
+ 1) / 2;
3306 btrfs_node_key(c
, &disk_key
, mid
);
3308 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
3309 root
->root_key
.objectid
,
3310 &disk_key
, level
, c
->start
, 0);
3312 return PTR_ERR(split
);
3314 root_add_used(root
, root
->nodesize
);
3316 memset_extent_buffer(split
, 0, 0, sizeof(struct btrfs_header
));
3317 btrfs_set_header_level(split
, btrfs_header_level(c
));
3318 btrfs_set_header_bytenr(split
, split
->start
);
3319 btrfs_set_header_generation(split
, trans
->transid
);
3320 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
3321 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
3322 write_extent_buffer(split
, root
->fs_info
->fsid
,
3323 (unsigned long)btrfs_header_fsid(split
),
3325 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
3326 (unsigned long)btrfs_header_chunk_tree_uuid(split
),
3329 tree_mod_log_eb_copy(root
->fs_info
, split
, c
, 0, mid
, c_nritems
- mid
);
3330 copy_extent_buffer(split
, c
,
3331 btrfs_node_key_ptr_offset(0),
3332 btrfs_node_key_ptr_offset(mid
),
3333 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
3334 btrfs_set_header_nritems(split
, c_nritems
- mid
);
3335 btrfs_set_header_nritems(c
, mid
);
3338 btrfs_mark_buffer_dirty(c
);
3339 btrfs_mark_buffer_dirty(split
);
3341 insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
3342 path
->slots
[level
+ 1] + 1, level
+ 1);
3344 if (path
->slots
[level
] >= mid
) {
3345 path
->slots
[level
] -= mid
;
3346 btrfs_tree_unlock(c
);
3347 free_extent_buffer(c
);
3348 path
->nodes
[level
] = split
;
3349 path
->slots
[level
+ 1] += 1;
3351 btrfs_tree_unlock(split
);
3352 free_extent_buffer(split
);
3358 * how many bytes are required to store the items in a leaf. start
3359 * and nr indicate which items in the leaf to check. This totals up the
3360 * space used both by the item structs and the item data
3362 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
3364 struct btrfs_item
*start_item
;
3365 struct btrfs_item
*end_item
;
3366 struct btrfs_map_token token
;
3368 int nritems
= btrfs_header_nritems(l
);
3369 int end
= min(nritems
, start
+ nr
) - 1;
3373 btrfs_init_map_token(&token
);
3374 start_item
= btrfs_item_nr(l
, start
);
3375 end_item
= btrfs_item_nr(l
, end
);
3376 data_len
= btrfs_token_item_offset(l
, start_item
, &token
) +
3377 btrfs_token_item_size(l
, start_item
, &token
);
3378 data_len
= data_len
- btrfs_token_item_offset(l
, end_item
, &token
);
3379 data_len
+= sizeof(struct btrfs_item
) * nr
;
3380 WARN_ON(data_len
< 0);
3385 * The space between the end of the leaf items and
3386 * the start of the leaf data. IOW, how much room
3387 * the leaf has left for both items and data
3389 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
3390 struct extent_buffer
*leaf
)
3392 int nritems
= btrfs_header_nritems(leaf
);
3394 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
3396 printk(KERN_CRIT
"leaf free space ret %d, leaf data size %lu, "
3397 "used %d nritems %d\n",
3398 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
3399 leaf_space_used(leaf
, 0, nritems
), nritems
);
3405 * min slot controls the lowest index we're willing to push to the
3406 * right. We'll push up to and including min_slot, but no lower
3408 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
3409 struct btrfs_root
*root
,
3410 struct btrfs_path
*path
,
3411 int data_size
, int empty
,
3412 struct extent_buffer
*right
,
3413 int free_space
, u32 left_nritems
,
3416 struct extent_buffer
*left
= path
->nodes
[0];
3417 struct extent_buffer
*upper
= path
->nodes
[1];
3418 struct btrfs_map_token token
;
3419 struct btrfs_disk_key disk_key
;
3424 struct btrfs_item
*item
;
3430 btrfs_init_map_token(&token
);
3435 nr
= max_t(u32
, 1, min_slot
);
3437 if (path
->slots
[0] >= left_nritems
)
3438 push_space
+= data_size
;
3440 slot
= path
->slots
[1];
3441 i
= left_nritems
- 1;
3443 item
= btrfs_item_nr(left
, i
);
3445 if (!empty
&& push_items
> 0) {
3446 if (path
->slots
[0] > i
)
3448 if (path
->slots
[0] == i
) {
3449 int space
= btrfs_leaf_free_space(root
, left
);
3450 if (space
+ push_space
* 2 > free_space
)
3455 if (path
->slots
[0] == i
)
3456 push_space
+= data_size
;
3458 this_item_size
= btrfs_item_size(left
, item
);
3459 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3463 push_space
+= this_item_size
+ sizeof(*item
);
3469 if (push_items
== 0)
3472 WARN_ON(!empty
&& push_items
== left_nritems
);
3474 /* push left to right */
3475 right_nritems
= btrfs_header_nritems(right
);
3477 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
3478 push_space
-= leaf_data_end(root
, left
);
3480 /* make room in the right data area */
3481 data_end
= leaf_data_end(root
, right
);
3482 memmove_extent_buffer(right
,
3483 btrfs_leaf_data(right
) + data_end
- push_space
,
3484 btrfs_leaf_data(right
) + data_end
,
3485 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
3487 /* copy from the left data area */
3488 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
3489 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3490 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
3493 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
3494 btrfs_item_nr_offset(0),
3495 right_nritems
* sizeof(struct btrfs_item
));
3497 /* copy the items from left to right */
3498 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
3499 btrfs_item_nr_offset(left_nritems
- push_items
),
3500 push_items
* sizeof(struct btrfs_item
));
3502 /* update the item pointers */
3503 right_nritems
+= push_items
;
3504 btrfs_set_header_nritems(right
, right_nritems
);
3505 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3506 for (i
= 0; i
< right_nritems
; i
++) {
3507 item
= btrfs_item_nr(right
, i
);
3508 push_space
-= btrfs_token_item_size(right
, item
, &token
);
3509 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3512 left_nritems
-= push_items
;
3513 btrfs_set_header_nritems(left
, left_nritems
);
3516 btrfs_mark_buffer_dirty(left
);
3518 clean_tree_block(trans
, root
, left
);
3520 btrfs_mark_buffer_dirty(right
);
3522 btrfs_item_key(right
, &disk_key
, 0);
3523 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
3524 btrfs_mark_buffer_dirty(upper
);
3526 /* then fixup the leaf pointer in the path */
3527 if (path
->slots
[0] >= left_nritems
) {
3528 path
->slots
[0] -= left_nritems
;
3529 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
3530 clean_tree_block(trans
, root
, path
->nodes
[0]);
3531 btrfs_tree_unlock(path
->nodes
[0]);
3532 free_extent_buffer(path
->nodes
[0]);
3533 path
->nodes
[0] = right
;
3534 path
->slots
[1] += 1;
3536 btrfs_tree_unlock(right
);
3537 free_extent_buffer(right
);
3542 btrfs_tree_unlock(right
);
3543 free_extent_buffer(right
);
3548 * push some data in the path leaf to the right, trying to free up at
3549 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3551 * returns 1 if the push failed because the other node didn't have enough
3552 * room, 0 if everything worked out and < 0 if there were major errors.
3554 * this will push starting from min_slot to the end of the leaf. It won't
3555 * push any slot lower than min_slot
3557 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
3558 *root
, struct btrfs_path
*path
,
3559 int min_data_size
, int data_size
,
3560 int empty
, u32 min_slot
)
3562 struct extent_buffer
*left
= path
->nodes
[0];
3563 struct extent_buffer
*right
;
3564 struct extent_buffer
*upper
;
3570 if (!path
->nodes
[1])
3573 slot
= path
->slots
[1];
3574 upper
= path
->nodes
[1];
3575 if (slot
>= btrfs_header_nritems(upper
) - 1)
3578 btrfs_assert_tree_locked(path
->nodes
[1]);
3580 right
= read_node_slot(root
, upper
, slot
+ 1);
3584 btrfs_tree_lock(right
);
3585 btrfs_set_lock_blocking(right
);
3587 free_space
= btrfs_leaf_free_space(root
, right
);
3588 if (free_space
< data_size
)
3591 /* cow and double check */
3592 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
3597 free_space
= btrfs_leaf_free_space(root
, right
);
3598 if (free_space
< data_size
)
3601 left_nritems
= btrfs_header_nritems(left
);
3602 if (left_nritems
== 0)
3605 return __push_leaf_right(trans
, root
, path
, min_data_size
, empty
,
3606 right
, free_space
, left_nritems
, min_slot
);
3608 btrfs_tree_unlock(right
);
3609 free_extent_buffer(right
);
3614 * push some data in the path leaf to the left, trying to free up at
3615 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3617 * max_slot can put a limit on how far into the leaf we'll push items. The
3618 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3621 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
3622 struct btrfs_root
*root
,
3623 struct btrfs_path
*path
, int data_size
,
3624 int empty
, struct extent_buffer
*left
,
3625 int free_space
, u32 right_nritems
,
3628 struct btrfs_disk_key disk_key
;
3629 struct extent_buffer
*right
= path
->nodes
[0];
3633 struct btrfs_item
*item
;
3634 u32 old_left_nritems
;
3638 u32 old_left_item_size
;
3639 struct btrfs_map_token token
;
3641 btrfs_init_map_token(&token
);
3644 nr
= min(right_nritems
, max_slot
);
3646 nr
= min(right_nritems
- 1, max_slot
);
3648 for (i
= 0; i
< nr
; i
++) {
3649 item
= btrfs_item_nr(right
, i
);
3651 if (!empty
&& push_items
> 0) {
3652 if (path
->slots
[0] < i
)
3654 if (path
->slots
[0] == i
) {
3655 int space
= btrfs_leaf_free_space(root
, right
);
3656 if (space
+ push_space
* 2 > free_space
)
3661 if (path
->slots
[0] == i
)
3662 push_space
+= data_size
;
3664 this_item_size
= btrfs_item_size(right
, item
);
3665 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3669 push_space
+= this_item_size
+ sizeof(*item
);
3672 if (push_items
== 0) {
3676 if (!empty
&& push_items
== btrfs_header_nritems(right
))
3679 /* push data from right to left */
3680 copy_extent_buffer(left
, right
,
3681 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
3682 btrfs_item_nr_offset(0),
3683 push_items
* sizeof(struct btrfs_item
));
3685 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
3686 btrfs_item_offset_nr(right
, push_items
- 1);
3688 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
3689 leaf_data_end(root
, left
) - push_space
,
3690 btrfs_leaf_data(right
) +
3691 btrfs_item_offset_nr(right
, push_items
- 1),
3693 old_left_nritems
= btrfs_header_nritems(left
);
3694 BUG_ON(old_left_nritems
<= 0);
3696 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
3697 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
3700 item
= btrfs_item_nr(left
, i
);
3702 ioff
= btrfs_token_item_offset(left
, item
, &token
);
3703 btrfs_set_token_item_offset(left
, item
,
3704 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
),
3707 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
3709 /* fixup right node */
3710 if (push_items
> right_nritems
)
3711 WARN(1, KERN_CRIT
"push items %d nr %u\n", push_items
,
3714 if (push_items
< right_nritems
) {
3715 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
3716 leaf_data_end(root
, right
);
3717 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
3718 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3719 btrfs_leaf_data(right
) +
3720 leaf_data_end(root
, right
), push_space
);
3722 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
3723 btrfs_item_nr_offset(push_items
),
3724 (btrfs_header_nritems(right
) - push_items
) *
3725 sizeof(struct btrfs_item
));
3727 right_nritems
-= push_items
;
3728 btrfs_set_header_nritems(right
, right_nritems
);
3729 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3730 for (i
= 0; i
< right_nritems
; i
++) {
3731 item
= btrfs_item_nr(right
, i
);
3733 push_space
= push_space
- btrfs_token_item_size(right
,
3735 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3738 btrfs_mark_buffer_dirty(left
);
3740 btrfs_mark_buffer_dirty(right
);
3742 clean_tree_block(trans
, root
, right
);
3744 btrfs_item_key(right
, &disk_key
, 0);
3745 fixup_low_keys(root
, path
, &disk_key
, 1);
3747 /* then fixup the leaf pointer in the path */
3748 if (path
->slots
[0] < push_items
) {
3749 path
->slots
[0] += old_left_nritems
;
3750 btrfs_tree_unlock(path
->nodes
[0]);
3751 free_extent_buffer(path
->nodes
[0]);
3752 path
->nodes
[0] = left
;
3753 path
->slots
[1] -= 1;
3755 btrfs_tree_unlock(left
);
3756 free_extent_buffer(left
);
3757 path
->slots
[0] -= push_items
;
3759 BUG_ON(path
->slots
[0] < 0);
3762 btrfs_tree_unlock(left
);
3763 free_extent_buffer(left
);
3768 * push some data in the path leaf to the left, trying to free up at
3769 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3771 * max_slot can put a limit on how far into the leaf we'll push items. The
3772 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3775 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
3776 *root
, struct btrfs_path
*path
, int min_data_size
,
3777 int data_size
, int empty
, u32 max_slot
)
3779 struct extent_buffer
*right
= path
->nodes
[0];
3780 struct extent_buffer
*left
;
3786 slot
= path
->slots
[1];
3789 if (!path
->nodes
[1])
3792 right_nritems
= btrfs_header_nritems(right
);
3793 if (right_nritems
== 0)
3796 btrfs_assert_tree_locked(path
->nodes
[1]);
3798 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
3802 btrfs_tree_lock(left
);
3803 btrfs_set_lock_blocking(left
);
3805 free_space
= btrfs_leaf_free_space(root
, left
);
3806 if (free_space
< data_size
) {
3811 /* cow and double check */
3812 ret
= btrfs_cow_block(trans
, root
, left
,
3813 path
->nodes
[1], slot
- 1, &left
);
3815 /* we hit -ENOSPC, but it isn't fatal here */
3821 free_space
= btrfs_leaf_free_space(root
, left
);
3822 if (free_space
< data_size
) {
3827 return __push_leaf_left(trans
, root
, path
, min_data_size
,
3828 empty
, left
, free_space
, right_nritems
,
3831 btrfs_tree_unlock(left
);
3832 free_extent_buffer(left
);
3837 * split the path's leaf in two, making sure there is at least data_size
3838 * available for the resulting leaf level of the path.
3840 static noinline
void copy_for_split(struct btrfs_trans_handle
*trans
,
3841 struct btrfs_root
*root
,
3842 struct btrfs_path
*path
,
3843 struct extent_buffer
*l
,
3844 struct extent_buffer
*right
,
3845 int slot
, int mid
, int nritems
)
3850 struct btrfs_disk_key disk_key
;
3851 struct btrfs_map_token token
;
3853 btrfs_init_map_token(&token
);
3855 nritems
= nritems
- mid
;
3856 btrfs_set_header_nritems(right
, nritems
);
3857 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
3859 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
3860 btrfs_item_nr_offset(mid
),
3861 nritems
* sizeof(struct btrfs_item
));
3863 copy_extent_buffer(right
, l
,
3864 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
3865 data_copy_size
, btrfs_leaf_data(l
) +
3866 leaf_data_end(root
, l
), data_copy_size
);
3868 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
3869 btrfs_item_end_nr(l
, mid
);
3871 for (i
= 0; i
< nritems
; i
++) {
3872 struct btrfs_item
*item
= btrfs_item_nr(right
, i
);
3875 ioff
= btrfs_token_item_offset(right
, item
, &token
);
3876 btrfs_set_token_item_offset(right
, item
,
3877 ioff
+ rt_data_off
, &token
);
3880 btrfs_set_header_nritems(l
, mid
);
3881 btrfs_item_key(right
, &disk_key
, 0);
3882 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
3883 path
->slots
[1] + 1, 1);
3885 btrfs_mark_buffer_dirty(right
);
3886 btrfs_mark_buffer_dirty(l
);
3887 BUG_ON(path
->slots
[0] != slot
);
3890 btrfs_tree_unlock(path
->nodes
[0]);
3891 free_extent_buffer(path
->nodes
[0]);
3892 path
->nodes
[0] = right
;
3893 path
->slots
[0] -= mid
;
3894 path
->slots
[1] += 1;
3896 btrfs_tree_unlock(right
);
3897 free_extent_buffer(right
);
3900 BUG_ON(path
->slots
[0] < 0);
3904 * double splits happen when we need to insert a big item in the middle
3905 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3906 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3909 * We avoid this by trying to push the items on either side of our target
3910 * into the adjacent leaves. If all goes well we can avoid the double split
3913 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
3914 struct btrfs_root
*root
,
3915 struct btrfs_path
*path
,
3923 slot
= path
->slots
[0];
3926 * try to push all the items after our slot into the
3929 ret
= push_leaf_right(trans
, root
, path
, 1, data_size
, 0, slot
);
3936 nritems
= btrfs_header_nritems(path
->nodes
[0]);
3938 * our goal is to get our slot at the start or end of a leaf. If
3939 * we've done so we're done
3941 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
3944 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
3947 /* try to push all the items before our slot into the next leaf */
3948 slot
= path
->slots
[0];
3949 ret
= push_leaf_left(trans
, root
, path
, 1, data_size
, 0, slot
);
3962 * split the path's leaf in two, making sure there is at least data_size
3963 * available for the resulting leaf level of the path.
3965 * returns 0 if all went well and < 0 on failure.
3967 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
3968 struct btrfs_root
*root
,
3969 struct btrfs_key
*ins_key
,
3970 struct btrfs_path
*path
, int data_size
,
3973 struct btrfs_disk_key disk_key
;
3974 struct extent_buffer
*l
;
3978 struct extent_buffer
*right
;
3982 int num_doubles
= 0;
3983 int tried_avoid_double
= 0;
3986 slot
= path
->slots
[0];
3987 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
3988 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(root
))
3991 /* first try to make some room by pushing left and right */
3993 wret
= push_leaf_right(trans
, root
, path
, data_size
,
3998 wret
= push_leaf_left(trans
, root
, path
, data_size
,
3999 data_size
, 0, (u32
)-1);
4005 /* did the pushes work? */
4006 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
4010 if (!path
->nodes
[1]) {
4011 ret
= insert_new_root(trans
, root
, path
, 1, 1);
4018 slot
= path
->slots
[0];
4019 nritems
= btrfs_header_nritems(l
);
4020 mid
= (nritems
+ 1) / 2;
4024 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
4025 BTRFS_LEAF_DATA_SIZE(root
)) {
4026 if (slot
>= nritems
) {
4030 if (mid
!= nritems
&&
4031 leaf_space_used(l
, mid
, nritems
- mid
) +
4032 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
4033 if (data_size
&& !tried_avoid_double
)
4034 goto push_for_double
;
4040 if (leaf_space_used(l
, 0, mid
) + data_size
>
4041 BTRFS_LEAF_DATA_SIZE(root
)) {
4042 if (!extend
&& data_size
&& slot
== 0) {
4044 } else if ((extend
|| !data_size
) && slot
== 0) {
4048 if (mid
!= nritems
&&
4049 leaf_space_used(l
, mid
, nritems
- mid
) +
4050 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
4051 if (data_size
&& !tried_avoid_double
)
4052 goto push_for_double
;
4060 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
4062 btrfs_item_key(l
, &disk_key
, mid
);
4064 right
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
4065 root
->root_key
.objectid
,
4066 &disk_key
, 0, l
->start
, 0);
4068 return PTR_ERR(right
);
4070 root_add_used(root
, root
->leafsize
);
4072 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
4073 btrfs_set_header_bytenr(right
, right
->start
);
4074 btrfs_set_header_generation(right
, trans
->transid
);
4075 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
4076 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
4077 btrfs_set_header_level(right
, 0);
4078 write_extent_buffer(right
, root
->fs_info
->fsid
,
4079 (unsigned long)btrfs_header_fsid(right
),
4082 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
4083 (unsigned long)btrfs_header_chunk_tree_uuid(right
),
4088 btrfs_set_header_nritems(right
, 0);
4089 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
4090 path
->slots
[1] + 1, 1);
4091 btrfs_tree_unlock(path
->nodes
[0]);
4092 free_extent_buffer(path
->nodes
[0]);
4093 path
->nodes
[0] = right
;
4095 path
->slots
[1] += 1;
4097 btrfs_set_header_nritems(right
, 0);
4098 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
4100 btrfs_tree_unlock(path
->nodes
[0]);
4101 free_extent_buffer(path
->nodes
[0]);
4102 path
->nodes
[0] = right
;
4104 if (path
->slots
[1] == 0)
4105 fixup_low_keys(root
, path
, &disk_key
, 1);
4107 btrfs_mark_buffer_dirty(right
);
4111 copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
4114 BUG_ON(num_doubles
!= 0);
4122 push_for_double_split(trans
, root
, path
, data_size
);
4123 tried_avoid_double
= 1;
4124 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
4129 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
4130 struct btrfs_root
*root
,
4131 struct btrfs_path
*path
, int ins_len
)
4133 struct btrfs_key key
;
4134 struct extent_buffer
*leaf
;
4135 struct btrfs_file_extent_item
*fi
;
4140 leaf
= path
->nodes
[0];
4141 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4143 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
4144 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
4146 if (btrfs_leaf_free_space(root
, leaf
) >= ins_len
)
4149 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4150 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4151 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4152 struct btrfs_file_extent_item
);
4153 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
4155 btrfs_release_path(path
);
4157 path
->keep_locks
= 1;
4158 path
->search_for_split
= 1;
4159 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4160 path
->search_for_split
= 0;
4165 leaf
= path
->nodes
[0];
4166 /* if our item isn't there or got smaller, return now */
4167 if (ret
> 0 || item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
4170 /* the leaf has changed, it now has room. return now */
4171 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= ins_len
)
4174 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4175 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4176 struct btrfs_file_extent_item
);
4177 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
4181 btrfs_set_path_blocking(path
);
4182 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
4186 path
->keep_locks
= 0;
4187 btrfs_unlock_up_safe(path
, 1);
4190 path
->keep_locks
= 0;
4194 static noinline
int split_item(struct btrfs_trans_handle
*trans
,
4195 struct btrfs_root
*root
,
4196 struct btrfs_path
*path
,
4197 struct btrfs_key
*new_key
,
4198 unsigned long split_offset
)
4200 struct extent_buffer
*leaf
;
4201 struct btrfs_item
*item
;
4202 struct btrfs_item
*new_item
;
4208 struct btrfs_disk_key disk_key
;
4210 leaf
= path
->nodes
[0];
4211 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
4213 btrfs_set_path_blocking(path
);
4215 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
4216 orig_offset
= btrfs_item_offset(leaf
, item
);
4217 item_size
= btrfs_item_size(leaf
, item
);
4219 buf
= kmalloc(item_size
, GFP_NOFS
);
4223 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
4224 path
->slots
[0]), item_size
);
4226 slot
= path
->slots
[0] + 1;
4227 nritems
= btrfs_header_nritems(leaf
);
4228 if (slot
!= nritems
) {
4229 /* shift the items */
4230 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
4231 btrfs_item_nr_offset(slot
),
4232 (nritems
- slot
) * sizeof(struct btrfs_item
));
4235 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
4236 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4238 new_item
= btrfs_item_nr(leaf
, slot
);
4240 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
4241 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
4243 btrfs_set_item_offset(leaf
, item
,
4244 orig_offset
+ item_size
- split_offset
);
4245 btrfs_set_item_size(leaf
, item
, split_offset
);
4247 btrfs_set_header_nritems(leaf
, nritems
+ 1);
4249 /* write the data for the start of the original item */
4250 write_extent_buffer(leaf
, buf
,
4251 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4254 /* write the data for the new item */
4255 write_extent_buffer(leaf
, buf
+ split_offset
,
4256 btrfs_item_ptr_offset(leaf
, slot
),
4257 item_size
- split_offset
);
4258 btrfs_mark_buffer_dirty(leaf
);
4260 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < 0);
4266 * This function splits a single item into two items,
4267 * giving 'new_key' to the new item and splitting the
4268 * old one at split_offset (from the start of the item).
4270 * The path may be released by this operation. After
4271 * the split, the path is pointing to the old item. The
4272 * new item is going to be in the same node as the old one.
4274 * Note, the item being split must be smaller enough to live alone on
4275 * a tree block with room for one extra struct btrfs_item
4277 * This allows us to split the item in place, keeping a lock on the
4278 * leaf the entire time.
4280 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
4281 struct btrfs_root
*root
,
4282 struct btrfs_path
*path
,
4283 struct btrfs_key
*new_key
,
4284 unsigned long split_offset
)
4287 ret
= setup_leaf_for_split(trans
, root
, path
,
4288 sizeof(struct btrfs_item
));
4292 ret
= split_item(trans
, root
, path
, new_key
, split_offset
);
4297 * This function duplicate a item, giving 'new_key' to the new item.
4298 * It guarantees both items live in the same tree leaf and the new item
4299 * is contiguous with the original item.
4301 * This allows us to split file extent in place, keeping a lock on the
4302 * leaf the entire time.
4304 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
4305 struct btrfs_root
*root
,
4306 struct btrfs_path
*path
,
4307 struct btrfs_key
*new_key
)
4309 struct extent_buffer
*leaf
;
4313 leaf
= path
->nodes
[0];
4314 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4315 ret
= setup_leaf_for_split(trans
, root
, path
,
4316 item_size
+ sizeof(struct btrfs_item
));
4321 setup_items_for_insert(root
, path
, new_key
, &item_size
,
4322 item_size
, item_size
+
4323 sizeof(struct btrfs_item
), 1);
4324 leaf
= path
->nodes
[0];
4325 memcpy_extent_buffer(leaf
,
4326 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4327 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
4333 * make the item pointed to by the path smaller. new_size indicates
4334 * how small to make it, and from_end tells us if we just chop bytes
4335 * off the end of the item or if we shift the item to chop bytes off
4338 void btrfs_truncate_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
4339 u32 new_size
, int from_end
)
4342 struct extent_buffer
*leaf
;
4343 struct btrfs_item
*item
;
4345 unsigned int data_end
;
4346 unsigned int old_data_start
;
4347 unsigned int old_size
;
4348 unsigned int size_diff
;
4350 struct btrfs_map_token token
;
4352 btrfs_init_map_token(&token
);
4354 leaf
= path
->nodes
[0];
4355 slot
= path
->slots
[0];
4357 old_size
= btrfs_item_size_nr(leaf
, slot
);
4358 if (old_size
== new_size
)
4361 nritems
= btrfs_header_nritems(leaf
);
4362 data_end
= leaf_data_end(root
, leaf
);
4364 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
4366 size_diff
= old_size
- new_size
;
4369 BUG_ON(slot
>= nritems
);
4372 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4374 /* first correct the data pointers */
4375 for (i
= slot
; i
< nritems
; i
++) {
4377 item
= btrfs_item_nr(leaf
, i
);
4379 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4380 btrfs_set_token_item_offset(leaf
, item
,
4381 ioff
+ size_diff
, &token
);
4384 /* shift the data */
4386 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4387 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4388 data_end
, old_data_start
+ new_size
- data_end
);
4390 struct btrfs_disk_key disk_key
;
4393 btrfs_item_key(leaf
, &disk_key
, slot
);
4395 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
4397 struct btrfs_file_extent_item
*fi
;
4399 fi
= btrfs_item_ptr(leaf
, slot
,
4400 struct btrfs_file_extent_item
);
4401 fi
= (struct btrfs_file_extent_item
*)(
4402 (unsigned long)fi
- size_diff
);
4404 if (btrfs_file_extent_type(leaf
, fi
) ==
4405 BTRFS_FILE_EXTENT_INLINE
) {
4406 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
4407 memmove_extent_buffer(leaf
, ptr
,
4409 offsetof(struct btrfs_file_extent_item
,
4414 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4415 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4416 data_end
, old_data_start
- data_end
);
4418 offset
= btrfs_disk_key_offset(&disk_key
);
4419 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
4420 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4422 fixup_low_keys(root
, path
, &disk_key
, 1);
4425 item
= btrfs_item_nr(leaf
, slot
);
4426 btrfs_set_item_size(leaf
, item
, new_size
);
4427 btrfs_mark_buffer_dirty(leaf
);
4429 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4430 btrfs_print_leaf(root
, leaf
);
4436 * make the item pointed to by the path bigger, data_size is the new size.
4438 void btrfs_extend_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
4442 struct extent_buffer
*leaf
;
4443 struct btrfs_item
*item
;
4445 unsigned int data_end
;
4446 unsigned int old_data
;
4447 unsigned int old_size
;
4449 struct btrfs_map_token token
;
4451 btrfs_init_map_token(&token
);
4453 leaf
= path
->nodes
[0];
4455 nritems
= btrfs_header_nritems(leaf
);
4456 data_end
= leaf_data_end(root
, leaf
);
4458 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
4459 btrfs_print_leaf(root
, leaf
);
4462 slot
= path
->slots
[0];
4463 old_data
= btrfs_item_end_nr(leaf
, slot
);
4466 if (slot
>= nritems
) {
4467 btrfs_print_leaf(root
, leaf
);
4468 printk(KERN_CRIT
"slot %d too large, nritems %d\n",
4474 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4476 /* first correct the data pointers */
4477 for (i
= slot
; i
< nritems
; i
++) {
4479 item
= btrfs_item_nr(leaf
, i
);
4481 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4482 btrfs_set_token_item_offset(leaf
, item
,
4483 ioff
- data_size
, &token
);
4486 /* shift the data */
4487 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4488 data_end
- data_size
, btrfs_leaf_data(leaf
) +
4489 data_end
, old_data
- data_end
);
4491 data_end
= old_data
;
4492 old_size
= btrfs_item_size_nr(leaf
, slot
);
4493 item
= btrfs_item_nr(leaf
, slot
);
4494 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
4495 btrfs_mark_buffer_dirty(leaf
);
4497 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4498 btrfs_print_leaf(root
, leaf
);
4504 * this is a helper for btrfs_insert_empty_items, the main goal here is
4505 * to save stack depth by doing the bulk of the work in a function
4506 * that doesn't call btrfs_search_slot
4508 void setup_items_for_insert(struct btrfs_root
*root
, struct btrfs_path
*path
,
4509 struct btrfs_key
*cpu_key
, u32
*data_size
,
4510 u32 total_data
, u32 total_size
, int nr
)
4512 struct btrfs_item
*item
;
4515 unsigned int data_end
;
4516 struct btrfs_disk_key disk_key
;
4517 struct extent_buffer
*leaf
;
4519 struct btrfs_map_token token
;
4521 btrfs_init_map_token(&token
);
4523 leaf
= path
->nodes
[0];
4524 slot
= path
->slots
[0];
4526 nritems
= btrfs_header_nritems(leaf
);
4527 data_end
= leaf_data_end(root
, leaf
);
4529 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
4530 btrfs_print_leaf(root
, leaf
);
4531 printk(KERN_CRIT
"not enough freespace need %u have %d\n",
4532 total_size
, btrfs_leaf_free_space(root
, leaf
));
4536 if (slot
!= nritems
) {
4537 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4539 if (old_data
< data_end
) {
4540 btrfs_print_leaf(root
, leaf
);
4541 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
4542 slot
, old_data
, data_end
);
4546 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4548 /* first correct the data pointers */
4549 for (i
= slot
; i
< nritems
; i
++) {
4552 item
= btrfs_item_nr(leaf
, i
);
4553 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4554 btrfs_set_token_item_offset(leaf
, item
,
4555 ioff
- total_data
, &token
);
4557 /* shift the items */
4558 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4559 btrfs_item_nr_offset(slot
),
4560 (nritems
- slot
) * sizeof(struct btrfs_item
));
4562 /* shift the data */
4563 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4564 data_end
- total_data
, btrfs_leaf_data(leaf
) +
4565 data_end
, old_data
- data_end
);
4566 data_end
= old_data
;
4569 /* setup the item for the new data */
4570 for (i
= 0; i
< nr
; i
++) {
4571 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4572 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4573 item
= btrfs_item_nr(leaf
, slot
+ i
);
4574 btrfs_set_token_item_offset(leaf
, item
,
4575 data_end
- data_size
[i
], &token
);
4576 data_end
-= data_size
[i
];
4577 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
4580 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4583 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4584 fixup_low_keys(root
, path
, &disk_key
, 1);
4586 btrfs_unlock_up_safe(path
, 1);
4587 btrfs_mark_buffer_dirty(leaf
);
4589 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4590 btrfs_print_leaf(root
, leaf
);
4596 * Given a key and some data, insert items into the tree.
4597 * This does all the path init required, making room in the tree if needed.
4599 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
4600 struct btrfs_root
*root
,
4601 struct btrfs_path
*path
,
4602 struct btrfs_key
*cpu_key
, u32
*data_size
,
4611 for (i
= 0; i
< nr
; i
++)
4612 total_data
+= data_size
[i
];
4614 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4615 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4621 slot
= path
->slots
[0];
4624 setup_items_for_insert(root
, path
, cpu_key
, data_size
,
4625 total_data
, total_size
, nr
);
4630 * Given a key and some data, insert an item into the tree.
4631 * This does all the path init required, making room in the tree if needed.
4633 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
4634 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
4638 struct btrfs_path
*path
;
4639 struct extent_buffer
*leaf
;
4642 path
= btrfs_alloc_path();
4645 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
4647 leaf
= path
->nodes
[0];
4648 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
4649 write_extent_buffer(leaf
, data
, ptr
, data_size
);
4650 btrfs_mark_buffer_dirty(leaf
);
4652 btrfs_free_path(path
);
4657 * delete the pointer from a given node.
4659 * the tree should have been previously balanced so the deletion does not
4662 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
4663 int level
, int slot
)
4665 struct extent_buffer
*parent
= path
->nodes
[level
];
4669 nritems
= btrfs_header_nritems(parent
);
4670 if (slot
!= nritems
- 1) {
4672 tree_mod_log_eb_move(root
->fs_info
, parent
, slot
,
4673 slot
+ 1, nritems
- slot
- 1);
4674 memmove_extent_buffer(parent
,
4675 btrfs_node_key_ptr_offset(slot
),
4676 btrfs_node_key_ptr_offset(slot
+ 1),
4677 sizeof(struct btrfs_key_ptr
) *
4678 (nritems
- slot
- 1));
4680 ret
= tree_mod_log_insert_key(root
->fs_info
, parent
, slot
,
4681 MOD_LOG_KEY_REMOVE
);
4686 btrfs_set_header_nritems(parent
, nritems
);
4687 if (nritems
== 0 && parent
== root
->node
) {
4688 BUG_ON(btrfs_header_level(root
->node
) != 1);
4689 /* just turn the root into a leaf and break */
4690 btrfs_set_header_level(root
->node
, 0);
4691 } else if (slot
== 0) {
4692 struct btrfs_disk_key disk_key
;
4694 btrfs_node_key(parent
, &disk_key
, 0);
4695 fixup_low_keys(root
, path
, &disk_key
, level
+ 1);
4697 btrfs_mark_buffer_dirty(parent
);
4701 * a helper function to delete the leaf pointed to by path->slots[1] and
4704 * This deletes the pointer in path->nodes[1] and frees the leaf
4705 * block extent. zero is returned if it all worked out, < 0 otherwise.
4707 * The path must have already been setup for deleting the leaf, including
4708 * all the proper balancing. path->nodes[1] must be locked.
4710 static noinline
void btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
4711 struct btrfs_root
*root
,
4712 struct btrfs_path
*path
,
4713 struct extent_buffer
*leaf
)
4715 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
4716 del_ptr(root
, path
, 1, path
->slots
[1]);
4719 * btrfs_free_extent is expensive, we want to make sure we
4720 * aren't holding any locks when we call it
4722 btrfs_unlock_up_safe(path
, 0);
4724 root_sub_used(root
, leaf
->len
);
4726 extent_buffer_get(leaf
);
4727 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1);
4728 free_extent_buffer_stale(leaf
);
4731 * delete the item at the leaf level in path. If that empties
4732 * the leaf, remove it from the tree
4734 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4735 struct btrfs_path
*path
, int slot
, int nr
)
4737 struct extent_buffer
*leaf
;
4738 struct btrfs_item
*item
;
4745 struct btrfs_map_token token
;
4747 btrfs_init_map_token(&token
);
4749 leaf
= path
->nodes
[0];
4750 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
4752 for (i
= 0; i
< nr
; i
++)
4753 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
4755 nritems
= btrfs_header_nritems(leaf
);
4757 if (slot
+ nr
!= nritems
) {
4758 int data_end
= leaf_data_end(root
, leaf
);
4760 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4762 btrfs_leaf_data(leaf
) + data_end
,
4763 last_off
- data_end
);
4765 for (i
= slot
+ nr
; i
< nritems
; i
++) {
4768 item
= btrfs_item_nr(leaf
, i
);
4769 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4770 btrfs_set_token_item_offset(leaf
, item
,
4771 ioff
+ dsize
, &token
);
4774 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
4775 btrfs_item_nr_offset(slot
+ nr
),
4776 sizeof(struct btrfs_item
) *
4777 (nritems
- slot
- nr
));
4779 btrfs_set_header_nritems(leaf
, nritems
- nr
);
4782 /* delete the leaf if we've emptied it */
4784 if (leaf
== root
->node
) {
4785 btrfs_set_header_level(leaf
, 0);
4787 btrfs_set_path_blocking(path
);
4788 clean_tree_block(trans
, root
, leaf
);
4789 btrfs_del_leaf(trans
, root
, path
, leaf
);
4792 int used
= leaf_space_used(leaf
, 0, nritems
);
4794 struct btrfs_disk_key disk_key
;
4796 btrfs_item_key(leaf
, &disk_key
, 0);
4797 fixup_low_keys(root
, path
, &disk_key
, 1);
4800 /* delete the leaf if it is mostly empty */
4801 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 3) {
4802 /* push_leaf_left fixes the path.
4803 * make sure the path still points to our leaf
4804 * for possible call to del_ptr below
4806 slot
= path
->slots
[1];
4807 extent_buffer_get(leaf
);
4809 btrfs_set_path_blocking(path
);
4810 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
4812 if (wret
< 0 && wret
!= -ENOSPC
)
4815 if (path
->nodes
[0] == leaf
&&
4816 btrfs_header_nritems(leaf
)) {
4817 wret
= push_leaf_right(trans
, root
, path
, 1,
4819 if (wret
< 0 && wret
!= -ENOSPC
)
4823 if (btrfs_header_nritems(leaf
) == 0) {
4824 path
->slots
[1] = slot
;
4825 btrfs_del_leaf(trans
, root
, path
, leaf
);
4826 free_extent_buffer(leaf
);
4829 /* if we're still in the path, make sure
4830 * we're dirty. Otherwise, one of the
4831 * push_leaf functions must have already
4832 * dirtied this buffer
4834 if (path
->nodes
[0] == leaf
)
4835 btrfs_mark_buffer_dirty(leaf
);
4836 free_extent_buffer(leaf
);
4839 btrfs_mark_buffer_dirty(leaf
);
4846 * search the tree again to find a leaf with lesser keys
4847 * returns 0 if it found something or 1 if there are no lesser leaves.
4848 * returns < 0 on io errors.
4850 * This may release the path, and so you may lose any locks held at the
4853 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4855 struct btrfs_key key
;
4856 struct btrfs_disk_key found_key
;
4859 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
4863 else if (key
.type
> 0)
4865 else if (key
.objectid
> 0)
4870 btrfs_release_path(path
);
4871 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4874 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
4875 ret
= comp_keys(&found_key
, &key
);
4882 * A helper function to walk down the tree starting at min_key, and looking
4883 * for nodes or leaves that are have a minimum transaction id.
4884 * This is used by the btree defrag code, and tree logging
4886 * This does not cow, but it does stuff the starting key it finds back
4887 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4888 * key and get a writable path.
4890 * This does lock as it descends, and path->keep_locks should be set
4891 * to 1 by the caller.
4893 * This honors path->lowest_level to prevent descent past a given level
4896 * min_trans indicates the oldest transaction that you are interested
4897 * in walking through. Any nodes or leaves older than min_trans are
4898 * skipped over (without reading them).
4900 * returns zero if something useful was found, < 0 on error and 1 if there
4901 * was nothing in the tree that matched the search criteria.
4903 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
4904 struct btrfs_key
*max_key
,
4905 struct btrfs_path
*path
,
4908 struct extent_buffer
*cur
;
4909 struct btrfs_key found_key
;
4916 WARN_ON(!path
->keep_locks
);
4918 cur
= btrfs_read_lock_root_node(root
);
4919 level
= btrfs_header_level(cur
);
4920 WARN_ON(path
->nodes
[level
]);
4921 path
->nodes
[level
] = cur
;
4922 path
->locks
[level
] = BTRFS_READ_LOCK
;
4924 if (btrfs_header_generation(cur
) < min_trans
) {
4929 nritems
= btrfs_header_nritems(cur
);
4930 level
= btrfs_header_level(cur
);
4931 sret
= bin_search(cur
, min_key
, level
, &slot
);
4933 /* at the lowest level, we're done, setup the path and exit */
4934 if (level
== path
->lowest_level
) {
4935 if (slot
>= nritems
)
4938 path
->slots
[level
] = slot
;
4939 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
4942 if (sret
&& slot
> 0)
4945 * check this node pointer against the min_trans parameters.
4946 * If it is too old, old, skip to the next one.
4948 while (slot
< nritems
) {
4952 blockptr
= btrfs_node_blockptr(cur
, slot
);
4953 gen
= btrfs_node_ptr_generation(cur
, slot
);
4954 if (gen
< min_trans
) {
4962 * we didn't find a candidate key in this node, walk forward
4963 * and find another one
4965 if (slot
>= nritems
) {
4966 path
->slots
[level
] = slot
;
4967 btrfs_set_path_blocking(path
);
4968 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
4971 btrfs_release_path(path
);
4977 /* save our key for returning back */
4978 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
4979 path
->slots
[level
] = slot
;
4980 if (level
== path
->lowest_level
) {
4982 unlock_up(path
, level
, 1, 0, NULL
);
4985 btrfs_set_path_blocking(path
);
4986 cur
= read_node_slot(root
, cur
, slot
);
4987 BUG_ON(!cur
); /* -ENOMEM */
4989 btrfs_tree_read_lock(cur
);
4991 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
4992 path
->nodes
[level
- 1] = cur
;
4993 unlock_up(path
, level
, 1, 0, NULL
);
4994 btrfs_clear_path_blocking(path
, NULL
, 0);
4998 memcpy(min_key
, &found_key
, sizeof(found_key
));
4999 btrfs_set_path_blocking(path
);
5003 static void tree_move_down(struct btrfs_root
*root
,
5004 struct btrfs_path
*path
,
5005 int *level
, int root_level
)
5007 BUG_ON(*level
== 0);
5008 path
->nodes
[*level
- 1] = read_node_slot(root
, path
->nodes
[*level
],
5009 path
->slots
[*level
]);
5010 path
->slots
[*level
- 1] = 0;
5014 static int tree_move_next_or_upnext(struct btrfs_root
*root
,
5015 struct btrfs_path
*path
,
5016 int *level
, int root_level
)
5020 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
5022 path
->slots
[*level
]++;
5024 while (path
->slots
[*level
] >= nritems
) {
5025 if (*level
== root_level
)
5029 path
->slots
[*level
] = 0;
5030 free_extent_buffer(path
->nodes
[*level
]);
5031 path
->nodes
[*level
] = NULL
;
5033 path
->slots
[*level
]++;
5035 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
5042 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5045 static int tree_advance(struct btrfs_root
*root
,
5046 struct btrfs_path
*path
,
5047 int *level
, int root_level
,
5049 struct btrfs_key
*key
)
5053 if (*level
== 0 || !allow_down
) {
5054 ret
= tree_move_next_or_upnext(root
, path
, level
, root_level
);
5056 tree_move_down(root
, path
, level
, root_level
);
5061 btrfs_item_key_to_cpu(path
->nodes
[*level
], key
,
5062 path
->slots
[*level
]);
5064 btrfs_node_key_to_cpu(path
->nodes
[*level
], key
,
5065 path
->slots
[*level
]);
5070 static int tree_compare_item(struct btrfs_root
*left_root
,
5071 struct btrfs_path
*left_path
,
5072 struct btrfs_path
*right_path
,
5077 unsigned long off1
, off2
;
5079 len1
= btrfs_item_size_nr(left_path
->nodes
[0], left_path
->slots
[0]);
5080 len2
= btrfs_item_size_nr(right_path
->nodes
[0], right_path
->slots
[0]);
5084 off1
= btrfs_item_ptr_offset(left_path
->nodes
[0], left_path
->slots
[0]);
5085 off2
= btrfs_item_ptr_offset(right_path
->nodes
[0],
5086 right_path
->slots
[0]);
5088 read_extent_buffer(left_path
->nodes
[0], tmp_buf
, off1
, len1
);
5090 cmp
= memcmp_extent_buffer(right_path
->nodes
[0], tmp_buf
, off2
, len1
);
5097 #define ADVANCE_ONLY_NEXT -1
5100 * This function compares two trees and calls the provided callback for
5101 * every changed/new/deleted item it finds.
5102 * If shared tree blocks are encountered, whole subtrees are skipped, making
5103 * the compare pretty fast on snapshotted subvolumes.
5105 * This currently works on commit roots only. As commit roots are read only,
5106 * we don't do any locking. The commit roots are protected with transactions.
5107 * Transactions are ended and rejoined when a commit is tried in between.
5109 * This function checks for modifications done to the trees while comparing.
5110 * If it detects a change, it aborts immediately.
5112 int btrfs_compare_trees(struct btrfs_root
*left_root
,
5113 struct btrfs_root
*right_root
,
5114 btrfs_changed_cb_t changed_cb
, void *ctx
)
5118 struct btrfs_trans_handle
*trans
= NULL
;
5119 struct btrfs_path
*left_path
= NULL
;
5120 struct btrfs_path
*right_path
= NULL
;
5121 struct btrfs_key left_key
;
5122 struct btrfs_key right_key
;
5123 char *tmp_buf
= NULL
;
5124 int left_root_level
;
5125 int right_root_level
;
5128 int left_end_reached
;
5129 int right_end_reached
;
5134 u64 left_start_ctransid
;
5135 u64 right_start_ctransid
;
5138 left_path
= btrfs_alloc_path();
5143 right_path
= btrfs_alloc_path();
5149 tmp_buf
= kmalloc(left_root
->leafsize
, GFP_NOFS
);
5155 left_path
->search_commit_root
= 1;
5156 left_path
->skip_locking
= 1;
5157 right_path
->search_commit_root
= 1;
5158 right_path
->skip_locking
= 1;
5160 spin_lock(&left_root
->root_item_lock
);
5161 left_start_ctransid
= btrfs_root_ctransid(&left_root
->root_item
);
5162 spin_unlock(&left_root
->root_item_lock
);
5164 spin_lock(&right_root
->root_item_lock
);
5165 right_start_ctransid
= btrfs_root_ctransid(&right_root
->root_item
);
5166 spin_unlock(&right_root
->root_item_lock
);
5168 trans
= btrfs_join_transaction(left_root
);
5169 if (IS_ERR(trans
)) {
5170 ret
= PTR_ERR(trans
);
5176 * Strategy: Go to the first items of both trees. Then do
5178 * If both trees are at level 0
5179 * Compare keys of current items
5180 * If left < right treat left item as new, advance left tree
5182 * If left > right treat right item as deleted, advance right tree
5184 * If left == right do deep compare of items, treat as changed if
5185 * needed, advance both trees and repeat
5186 * If both trees are at the same level but not at level 0
5187 * Compare keys of current nodes/leafs
5188 * If left < right advance left tree and repeat
5189 * If left > right advance right tree and repeat
5190 * If left == right compare blockptrs of the next nodes/leafs
5191 * If they match advance both trees but stay at the same level
5193 * If they don't match advance both trees while allowing to go
5195 * If tree levels are different
5196 * Advance the tree that needs it and repeat
5198 * Advancing a tree means:
5199 * If we are at level 0, try to go to the next slot. If that's not
5200 * possible, go one level up and repeat. Stop when we found a level
5201 * where we could go to the next slot. We may at this point be on a
5204 * If we are not at level 0 and not on shared tree blocks, go one
5207 * If we are not at level 0 and on shared tree blocks, go one slot to
5208 * the right if possible or go up and right.
5211 left_level
= btrfs_header_level(left_root
->commit_root
);
5212 left_root_level
= left_level
;
5213 left_path
->nodes
[left_level
] = left_root
->commit_root
;
5214 extent_buffer_get(left_path
->nodes
[left_level
]);
5216 right_level
= btrfs_header_level(right_root
->commit_root
);
5217 right_root_level
= right_level
;
5218 right_path
->nodes
[right_level
] = right_root
->commit_root
;
5219 extent_buffer_get(right_path
->nodes
[right_level
]);
5221 if (left_level
== 0)
5222 btrfs_item_key_to_cpu(left_path
->nodes
[left_level
],
5223 &left_key
, left_path
->slots
[left_level
]);
5225 btrfs_node_key_to_cpu(left_path
->nodes
[left_level
],
5226 &left_key
, left_path
->slots
[left_level
]);
5227 if (right_level
== 0)
5228 btrfs_item_key_to_cpu(right_path
->nodes
[right_level
],
5229 &right_key
, right_path
->slots
[right_level
]);
5231 btrfs_node_key_to_cpu(right_path
->nodes
[right_level
],
5232 &right_key
, right_path
->slots
[right_level
]);
5234 left_end_reached
= right_end_reached
= 0;
5235 advance_left
= advance_right
= 0;
5239 * We need to make sure the transaction does not get committed
5240 * while we do anything on commit roots. This means, we need to
5241 * join and leave transactions for every item that we process.
5243 if (trans
&& btrfs_should_end_transaction(trans
, left_root
)) {
5244 btrfs_release_path(left_path
);
5245 btrfs_release_path(right_path
);
5247 ret
= btrfs_end_transaction(trans
, left_root
);
5252 /* now rejoin the transaction */
5254 trans
= btrfs_join_transaction(left_root
);
5255 if (IS_ERR(trans
)) {
5256 ret
= PTR_ERR(trans
);
5261 spin_lock(&left_root
->root_item_lock
);
5262 ctransid
= btrfs_root_ctransid(&left_root
->root_item
);
5263 spin_unlock(&left_root
->root_item_lock
);
5264 if (ctransid
!= left_start_ctransid
)
5265 left_start_ctransid
= 0;
5267 spin_lock(&right_root
->root_item_lock
);
5268 ctransid
= btrfs_root_ctransid(&right_root
->root_item
);
5269 spin_unlock(&right_root
->root_item_lock
);
5270 if (ctransid
!= right_start_ctransid
)
5271 right_start_ctransid
= 0;
5273 if (!left_start_ctransid
|| !right_start_ctransid
) {
5274 WARN(1, KERN_WARNING
5275 "btrfs: btrfs_compare_tree detected "
5276 "a change in one of the trees while "
5277 "iterating. This is probably a "
5284 * the commit root may have changed, so start again
5287 left_path
->lowest_level
= left_level
;
5288 right_path
->lowest_level
= right_level
;
5289 ret
= btrfs_search_slot(NULL
, left_root
,
5290 &left_key
, left_path
, 0, 0);
5293 ret
= btrfs_search_slot(NULL
, right_root
,
5294 &right_key
, right_path
, 0, 0);
5299 if (advance_left
&& !left_end_reached
) {
5300 ret
= tree_advance(left_root
, left_path
, &left_level
,
5302 advance_left
!= ADVANCE_ONLY_NEXT
,
5305 left_end_reached
= ADVANCE
;
5308 if (advance_right
&& !right_end_reached
) {
5309 ret
= tree_advance(right_root
, right_path
, &right_level
,
5311 advance_right
!= ADVANCE_ONLY_NEXT
,
5314 right_end_reached
= ADVANCE
;
5318 if (left_end_reached
&& right_end_reached
) {
5321 } else if (left_end_reached
) {
5322 if (right_level
== 0) {
5323 ret
= changed_cb(left_root
, right_root
,
5324 left_path
, right_path
,
5326 BTRFS_COMPARE_TREE_DELETED
,
5331 advance_right
= ADVANCE
;
5333 } else if (right_end_reached
) {
5334 if (left_level
== 0) {
5335 ret
= changed_cb(left_root
, right_root
,
5336 left_path
, right_path
,
5338 BTRFS_COMPARE_TREE_NEW
,
5343 advance_left
= ADVANCE
;
5347 if (left_level
== 0 && right_level
== 0) {
5348 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5350 ret
= changed_cb(left_root
, right_root
,
5351 left_path
, right_path
,
5353 BTRFS_COMPARE_TREE_NEW
,
5357 advance_left
= ADVANCE
;
5358 } else if (cmp
> 0) {
5359 ret
= changed_cb(left_root
, right_root
,
5360 left_path
, right_path
,
5362 BTRFS_COMPARE_TREE_DELETED
,
5366 advance_right
= ADVANCE
;
5368 WARN_ON(!extent_buffer_uptodate(left_path
->nodes
[0]));
5369 ret
= tree_compare_item(left_root
, left_path
,
5370 right_path
, tmp_buf
);
5372 WARN_ON(!extent_buffer_uptodate(left_path
->nodes
[0]));
5373 ret
= changed_cb(left_root
, right_root
,
5374 left_path
, right_path
,
5376 BTRFS_COMPARE_TREE_CHANGED
,
5381 advance_left
= ADVANCE
;
5382 advance_right
= ADVANCE
;
5384 } else if (left_level
== right_level
) {
5385 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5387 advance_left
= ADVANCE
;
5388 } else if (cmp
> 0) {
5389 advance_right
= ADVANCE
;
5391 left_blockptr
= btrfs_node_blockptr(
5392 left_path
->nodes
[left_level
],
5393 left_path
->slots
[left_level
]);
5394 right_blockptr
= btrfs_node_blockptr(
5395 right_path
->nodes
[right_level
],
5396 right_path
->slots
[right_level
]);
5397 if (left_blockptr
== right_blockptr
) {
5399 * As we're on a shared block, don't
5400 * allow to go deeper.
5402 advance_left
= ADVANCE_ONLY_NEXT
;
5403 advance_right
= ADVANCE_ONLY_NEXT
;
5405 advance_left
= ADVANCE
;
5406 advance_right
= ADVANCE
;
5409 } else if (left_level
< right_level
) {
5410 advance_right
= ADVANCE
;
5412 advance_left
= ADVANCE
;
5417 btrfs_free_path(left_path
);
5418 btrfs_free_path(right_path
);
5423 ret
= btrfs_end_transaction(trans
, left_root
);
5425 btrfs_end_transaction(trans
, left_root
);
5432 * this is similar to btrfs_next_leaf, but does not try to preserve
5433 * and fixup the path. It looks for and returns the next key in the
5434 * tree based on the current path and the min_trans parameters.
5436 * 0 is returned if another key is found, < 0 if there are any errors
5437 * and 1 is returned if there are no higher keys in the tree
5439 * path->keep_locks should be set to 1 on the search made before
5440 * calling this function.
5442 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
5443 struct btrfs_key
*key
, int level
, u64 min_trans
)
5446 struct extent_buffer
*c
;
5448 WARN_ON(!path
->keep_locks
);
5449 while (level
< BTRFS_MAX_LEVEL
) {
5450 if (!path
->nodes
[level
])
5453 slot
= path
->slots
[level
] + 1;
5454 c
= path
->nodes
[level
];
5456 if (slot
>= btrfs_header_nritems(c
)) {
5459 struct btrfs_key cur_key
;
5460 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
5461 !path
->nodes
[level
+ 1])
5464 if (path
->locks
[level
+ 1]) {
5469 slot
= btrfs_header_nritems(c
) - 1;
5471 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
5473 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
5475 orig_lowest
= path
->lowest_level
;
5476 btrfs_release_path(path
);
5477 path
->lowest_level
= level
;
5478 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
5480 path
->lowest_level
= orig_lowest
;
5484 c
= path
->nodes
[level
];
5485 slot
= path
->slots
[level
];
5492 btrfs_item_key_to_cpu(c
, key
, slot
);
5494 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
5496 if (gen
< min_trans
) {
5500 btrfs_node_key_to_cpu(c
, key
, slot
);
5508 * search the tree again to find a leaf with greater keys
5509 * returns 0 if it found something or 1 if there are no greater leaves.
5510 * returns < 0 on io errors.
5512 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5514 return btrfs_next_old_leaf(root
, path
, 0);
5517 int btrfs_next_old_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
5522 struct extent_buffer
*c
;
5523 struct extent_buffer
*next
;
5524 struct btrfs_key key
;
5527 int old_spinning
= path
->leave_spinning
;
5528 int next_rw_lock
= 0;
5530 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5534 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
5539 btrfs_release_path(path
);
5541 path
->keep_locks
= 1;
5542 path
->leave_spinning
= 1;
5545 ret
= btrfs_search_old_slot(root
, &key
, path
, time_seq
);
5547 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5548 path
->keep_locks
= 0;
5553 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5555 * by releasing the path above we dropped all our locks. A balance
5556 * could have added more items next to the key that used to be
5557 * at the very end of the block. So, check again here and
5558 * advance the path if there are now more items available.
5560 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
5567 while (level
< BTRFS_MAX_LEVEL
) {
5568 if (!path
->nodes
[level
]) {
5573 slot
= path
->slots
[level
] + 1;
5574 c
= path
->nodes
[level
];
5575 if (slot
>= btrfs_header_nritems(c
)) {
5577 if (level
== BTRFS_MAX_LEVEL
) {
5585 btrfs_tree_unlock_rw(next
, next_rw_lock
);
5586 free_extent_buffer(next
);
5590 next_rw_lock
= path
->locks
[level
];
5591 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
5597 btrfs_release_path(path
);
5601 if (!path
->skip_locking
) {
5602 ret
= btrfs_try_tree_read_lock(next
);
5603 if (!ret
&& time_seq
) {
5605 * If we don't get the lock, we may be racing
5606 * with push_leaf_left, holding that lock while
5607 * itself waiting for the leaf we've currently
5608 * locked. To solve this situation, we give up
5609 * on our lock and cycle.
5611 free_extent_buffer(next
);
5612 btrfs_release_path(path
);
5617 btrfs_set_path_blocking(path
);
5618 btrfs_tree_read_lock(next
);
5619 btrfs_clear_path_blocking(path
, next
,
5622 next_rw_lock
= BTRFS_READ_LOCK
;
5626 path
->slots
[level
] = slot
;
5629 c
= path
->nodes
[level
];
5630 if (path
->locks
[level
])
5631 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
5633 free_extent_buffer(c
);
5634 path
->nodes
[level
] = next
;
5635 path
->slots
[level
] = 0;
5636 if (!path
->skip_locking
)
5637 path
->locks
[level
] = next_rw_lock
;
5641 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
5647 btrfs_release_path(path
);
5651 if (!path
->skip_locking
) {
5652 ret
= btrfs_try_tree_read_lock(next
);
5654 btrfs_set_path_blocking(path
);
5655 btrfs_tree_read_lock(next
);
5656 btrfs_clear_path_blocking(path
, next
,
5659 next_rw_lock
= BTRFS_READ_LOCK
;
5664 unlock_up(path
, 0, 1, 0, NULL
);
5665 path
->leave_spinning
= old_spinning
;
5667 btrfs_set_path_blocking(path
);
5673 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5674 * searching until it gets past min_objectid or finds an item of 'type'
5676 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5678 int btrfs_previous_item(struct btrfs_root
*root
,
5679 struct btrfs_path
*path
, u64 min_objectid
,
5682 struct btrfs_key found_key
;
5683 struct extent_buffer
*leaf
;
5688 if (path
->slots
[0] == 0) {
5689 btrfs_set_path_blocking(path
);
5690 ret
= btrfs_prev_leaf(root
, path
);
5696 leaf
= path
->nodes
[0];
5697 nritems
= btrfs_header_nritems(leaf
);
5700 if (path
->slots
[0] == nritems
)
5703 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5704 if (found_key
.objectid
< min_objectid
)
5706 if (found_key
.type
== type
)
5708 if (found_key
.objectid
== min_objectid
&&
5709 found_key
.type
< type
)