2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
24 #include "transaction.h"
25 #include "print-tree.h"
28 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
29 *root
, struct btrfs_path
*path
, int level
);
30 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
31 *root
, struct btrfs_key
*ins_key
,
32 struct btrfs_path
*path
, int data_size
, int extend
);
33 static int push_node_left(struct btrfs_trans_handle
*trans
,
34 struct btrfs_root
*root
, struct extent_buffer
*dst
,
35 struct extent_buffer
*src
, int empty
);
36 static int balance_node_right(struct btrfs_trans_handle
*trans
,
37 struct btrfs_root
*root
,
38 struct extent_buffer
*dst_buf
,
39 struct extent_buffer
*src_buf
);
40 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
42 static int tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
43 struct extent_buffer
*eb
);
45 struct btrfs_path
*btrfs_alloc_path(void)
47 struct btrfs_path
*path
;
48 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
56 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
59 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
60 if (!p
->nodes
[i
] || !p
->locks
[i
])
62 btrfs_set_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
63 if (p
->locks
[i
] == BTRFS_READ_LOCK
)
64 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
65 else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
)
66 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
71 * reset all the locked nodes in the patch to spinning locks.
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
78 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
79 struct extent_buffer
*held
, int held_rw
)
83 #ifdef CONFIG_DEBUG_LOCK_ALLOC
84 /* lockdep really cares that we take all of these spinlocks
85 * in the right order. If any of the locks in the path are not
86 * currently blocking, it is going to complain. So, make really
87 * really sure by forcing the path to blocking before we clear
91 btrfs_set_lock_blocking_rw(held
, held_rw
);
92 if (held_rw
== BTRFS_WRITE_LOCK
)
93 held_rw
= BTRFS_WRITE_LOCK_BLOCKING
;
94 else if (held_rw
== BTRFS_READ_LOCK
)
95 held_rw
= BTRFS_READ_LOCK_BLOCKING
;
97 btrfs_set_path_blocking(p
);
100 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
101 if (p
->nodes
[i
] && p
->locks
[i
]) {
102 btrfs_clear_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
103 if (p
->locks
[i
] == BTRFS_WRITE_LOCK_BLOCKING
)
104 p
->locks
[i
] = BTRFS_WRITE_LOCK
;
105 else if (p
->locks
[i
] == BTRFS_READ_LOCK_BLOCKING
)
106 p
->locks
[i
] = BTRFS_READ_LOCK
;
110 #ifdef CONFIG_DEBUG_LOCK_ALLOC
112 btrfs_clear_lock_blocking_rw(held
, held_rw
);
116 /* this also releases the path */
117 void btrfs_free_path(struct btrfs_path
*p
)
121 btrfs_release_path(p
);
122 kmem_cache_free(btrfs_path_cachep
, p
);
126 * path release drops references on the extent buffers in the path
127 * and it drops any locks held by this path
129 * It is safe to call this on paths that no locks or extent buffers held.
131 noinline
void btrfs_release_path(struct btrfs_path
*p
)
135 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
140 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
143 free_extent_buffer(p
->nodes
[i
]);
149 * safely gets a reference on the root node of a tree. A lock
150 * is not taken, so a concurrent writer may put a different node
151 * at the root of the tree. See btrfs_lock_root_node for the
154 * The extent buffer returned by this has a reference taken, so
155 * it won't disappear. It may stop being the root of the tree
156 * at any time because there are no locks held.
158 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
160 struct extent_buffer
*eb
;
164 eb
= rcu_dereference(root
->node
);
167 * RCU really hurts here, we could free up the root node because
168 * it was cow'ed but we may not get the new root node yet so do
169 * the inc_not_zero dance and if it doesn't work then
170 * synchronize_rcu and try again.
172 if (atomic_inc_not_zero(&eb
->refs
)) {
182 /* loop around taking references on and locking the root node of the
183 * tree until you end up with a lock on the root. A locked buffer
184 * is returned, with a reference held.
186 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
188 struct extent_buffer
*eb
;
191 eb
= btrfs_root_node(root
);
193 if (eb
== root
->node
)
195 btrfs_tree_unlock(eb
);
196 free_extent_buffer(eb
);
201 /* loop around taking references on and locking the root node of the
202 * tree until you end up with a lock on the root. A locked buffer
203 * is returned, with a reference held.
205 static struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
207 struct extent_buffer
*eb
;
210 eb
= btrfs_root_node(root
);
211 btrfs_tree_read_lock(eb
);
212 if (eb
== root
->node
)
214 btrfs_tree_read_unlock(eb
);
215 free_extent_buffer(eb
);
220 /* cowonly root (everything not a reference counted cow subvolume), just get
221 * put onto a simple dirty list. transaction.c walks this to make sure they
222 * get properly updated on disk.
224 static void add_root_to_dirty_list(struct btrfs_root
*root
)
226 spin_lock(&root
->fs_info
->trans_lock
);
227 if (test_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
) &&
228 list_empty(&root
->dirty_list
)) {
229 list_add(&root
->dirty_list
,
230 &root
->fs_info
->dirty_cowonly_roots
);
232 spin_unlock(&root
->fs_info
->trans_lock
);
236 * used by snapshot creation to make a copy of a root for a tree with
237 * a given objectid. The buffer with the new root node is returned in
238 * cow_ret, and this func returns zero on success or a negative error code.
240 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
241 struct btrfs_root
*root
,
242 struct extent_buffer
*buf
,
243 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
245 struct extent_buffer
*cow
;
248 struct btrfs_disk_key disk_key
;
250 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
251 trans
->transid
!= root
->fs_info
->running_transaction
->transid
);
252 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
253 trans
->transid
!= root
->last_trans
);
255 level
= btrfs_header_level(buf
);
257 btrfs_item_key(buf
, &disk_key
, 0);
259 btrfs_node_key(buf
, &disk_key
, 0);
261 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, 0,
262 new_root_objectid
, &disk_key
, level
,
267 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
268 btrfs_set_header_bytenr(cow
, cow
->start
);
269 btrfs_set_header_generation(cow
, trans
->transid
);
270 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
271 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
272 BTRFS_HEADER_FLAG_RELOC
);
273 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
274 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
276 btrfs_set_header_owner(cow
, new_root_objectid
);
278 write_extent_buffer(cow
, root
->fs_info
->fsid
, btrfs_header_fsid(),
281 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
282 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
283 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
285 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
290 btrfs_mark_buffer_dirty(cow
);
299 MOD_LOG_KEY_REMOVE_WHILE_FREEING
,
300 MOD_LOG_KEY_REMOVE_WHILE_MOVING
,
302 MOD_LOG_ROOT_REPLACE
,
305 struct tree_mod_move
{
310 struct tree_mod_root
{
315 struct tree_mod_elem
{
317 u64 index
; /* shifted logical */
321 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
324 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
327 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
328 struct btrfs_disk_key key
;
331 /* this is used for op == MOD_LOG_MOVE_KEYS */
332 struct tree_mod_move move
;
334 /* this is used for op == MOD_LOG_ROOT_REPLACE */
335 struct tree_mod_root old_root
;
338 static inline void tree_mod_log_read_lock(struct btrfs_fs_info
*fs_info
)
340 read_lock(&fs_info
->tree_mod_log_lock
);
343 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info
*fs_info
)
345 read_unlock(&fs_info
->tree_mod_log_lock
);
348 static inline void tree_mod_log_write_lock(struct btrfs_fs_info
*fs_info
)
350 write_lock(&fs_info
->tree_mod_log_lock
);
353 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info
*fs_info
)
355 write_unlock(&fs_info
->tree_mod_log_lock
);
359 * Pull a new tree mod seq number for our operation.
361 static inline u64
btrfs_inc_tree_mod_seq(struct btrfs_fs_info
*fs_info
)
363 return atomic64_inc_return(&fs_info
->tree_mod_seq
);
367 * This adds a new blocker to the tree mod log's blocker list if the @elem
368 * passed does not already have a sequence number set. So when a caller expects
369 * to record tree modifications, it should ensure to set elem->seq to zero
370 * before calling btrfs_get_tree_mod_seq.
371 * Returns a fresh, unused tree log modification sequence number, even if no new
374 u64
btrfs_get_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
375 struct seq_list
*elem
)
377 tree_mod_log_write_lock(fs_info
);
378 spin_lock(&fs_info
->tree_mod_seq_lock
);
380 elem
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
381 list_add_tail(&elem
->list
, &fs_info
->tree_mod_seq_list
);
383 spin_unlock(&fs_info
->tree_mod_seq_lock
);
384 tree_mod_log_write_unlock(fs_info
);
389 void btrfs_put_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
390 struct seq_list
*elem
)
392 struct rb_root
*tm_root
;
393 struct rb_node
*node
;
394 struct rb_node
*next
;
395 struct seq_list
*cur_elem
;
396 struct tree_mod_elem
*tm
;
397 u64 min_seq
= (u64
)-1;
398 u64 seq_putting
= elem
->seq
;
403 spin_lock(&fs_info
->tree_mod_seq_lock
);
404 list_del(&elem
->list
);
407 list_for_each_entry(cur_elem
, &fs_info
->tree_mod_seq_list
, list
) {
408 if (cur_elem
->seq
< min_seq
) {
409 if (seq_putting
> cur_elem
->seq
) {
411 * blocker with lower sequence number exists, we
412 * cannot remove anything from the log
414 spin_unlock(&fs_info
->tree_mod_seq_lock
);
417 min_seq
= cur_elem
->seq
;
420 spin_unlock(&fs_info
->tree_mod_seq_lock
);
423 * anything that's lower than the lowest existing (read: blocked)
424 * sequence number can be removed from the tree.
426 tree_mod_log_write_lock(fs_info
);
427 tm_root
= &fs_info
->tree_mod_log
;
428 for (node
= rb_first(tm_root
); node
; node
= next
) {
429 next
= rb_next(node
);
430 tm
= container_of(node
, struct tree_mod_elem
, node
);
431 if (tm
->seq
> min_seq
)
433 rb_erase(node
, tm_root
);
436 tree_mod_log_write_unlock(fs_info
);
440 * key order of the log:
443 * the index is the shifted logical of the *new* root node for root replace
444 * operations, or the shifted logical of the affected block for all other
447 * Note: must be called with write lock (tree_mod_log_write_lock).
450 __tree_mod_log_insert(struct btrfs_fs_info
*fs_info
, struct tree_mod_elem
*tm
)
452 struct rb_root
*tm_root
;
453 struct rb_node
**new;
454 struct rb_node
*parent
= NULL
;
455 struct tree_mod_elem
*cur
;
459 tm
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
461 tm_root
= &fs_info
->tree_mod_log
;
462 new = &tm_root
->rb_node
;
464 cur
= container_of(*new, struct tree_mod_elem
, node
);
466 if (cur
->index
< tm
->index
)
467 new = &((*new)->rb_left
);
468 else if (cur
->index
> tm
->index
)
469 new = &((*new)->rb_right
);
470 else if (cur
->seq
< tm
->seq
)
471 new = &((*new)->rb_left
);
472 else if (cur
->seq
> tm
->seq
)
473 new = &((*new)->rb_right
);
478 rb_link_node(&tm
->node
, parent
, new);
479 rb_insert_color(&tm
->node
, tm_root
);
484 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
485 * returns zero with the tree_mod_log_lock acquired. The caller must hold
486 * this until all tree mod log insertions are recorded in the rb tree and then
487 * call tree_mod_log_write_unlock() to release.
489 static inline int tree_mod_dont_log(struct btrfs_fs_info
*fs_info
,
490 struct extent_buffer
*eb
) {
492 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
494 if (eb
&& btrfs_header_level(eb
) == 0)
497 tree_mod_log_write_lock(fs_info
);
498 if (list_empty(&(fs_info
)->tree_mod_seq_list
)) {
499 tree_mod_log_write_unlock(fs_info
);
506 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
507 static inline int tree_mod_need_log(const struct btrfs_fs_info
*fs_info
,
508 struct extent_buffer
*eb
)
511 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
513 if (eb
&& btrfs_header_level(eb
) == 0)
519 static struct tree_mod_elem
*
520 alloc_tree_mod_elem(struct extent_buffer
*eb
, int slot
,
521 enum mod_log_op op
, gfp_t flags
)
523 struct tree_mod_elem
*tm
;
525 tm
= kzalloc(sizeof(*tm
), flags
);
529 tm
->index
= eb
->start
>> PAGE_CACHE_SHIFT
;
530 if (op
!= MOD_LOG_KEY_ADD
) {
531 btrfs_node_key(eb
, &tm
->key
, slot
);
532 tm
->blockptr
= btrfs_node_blockptr(eb
, slot
);
536 tm
->generation
= btrfs_node_ptr_generation(eb
, slot
);
537 RB_CLEAR_NODE(&tm
->node
);
543 tree_mod_log_insert_key(struct btrfs_fs_info
*fs_info
,
544 struct extent_buffer
*eb
, int slot
,
545 enum mod_log_op op
, gfp_t flags
)
547 struct tree_mod_elem
*tm
;
550 if (!tree_mod_need_log(fs_info
, eb
))
553 tm
= alloc_tree_mod_elem(eb
, slot
, op
, flags
);
557 if (tree_mod_dont_log(fs_info
, eb
)) {
562 ret
= __tree_mod_log_insert(fs_info
, tm
);
563 tree_mod_log_write_unlock(fs_info
);
571 tree_mod_log_insert_move(struct btrfs_fs_info
*fs_info
,
572 struct extent_buffer
*eb
, int dst_slot
, int src_slot
,
573 int nr_items
, gfp_t flags
)
575 struct tree_mod_elem
*tm
= NULL
;
576 struct tree_mod_elem
**tm_list
= NULL
;
581 if (!tree_mod_need_log(fs_info
, eb
))
584 tm_list
= kzalloc(nr_items
* sizeof(struct tree_mod_elem
*), flags
);
588 tm
= kzalloc(sizeof(*tm
), flags
);
594 tm
->index
= eb
->start
>> PAGE_CACHE_SHIFT
;
596 tm
->move
.dst_slot
= dst_slot
;
597 tm
->move
.nr_items
= nr_items
;
598 tm
->op
= MOD_LOG_MOVE_KEYS
;
600 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
601 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
+ dst_slot
,
602 MOD_LOG_KEY_REMOVE_WHILE_MOVING
, flags
);
609 if (tree_mod_dont_log(fs_info
, eb
))
614 * When we override something during the move, we log these removals.
615 * This can only happen when we move towards the beginning of the
616 * buffer, i.e. dst_slot < src_slot.
618 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
619 ret
= __tree_mod_log_insert(fs_info
, tm_list
[i
]);
624 ret
= __tree_mod_log_insert(fs_info
, tm
);
627 tree_mod_log_write_unlock(fs_info
);
632 for (i
= 0; i
< nr_items
; i
++) {
633 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
634 rb_erase(&tm_list
[i
]->node
, &fs_info
->tree_mod_log
);
638 tree_mod_log_write_unlock(fs_info
);
646 __tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
647 struct tree_mod_elem
**tm_list
,
653 for (i
= nritems
- 1; i
>= 0; i
--) {
654 ret
= __tree_mod_log_insert(fs_info
, tm_list
[i
]);
656 for (j
= nritems
- 1; j
> i
; j
--)
657 rb_erase(&tm_list
[j
]->node
,
658 &fs_info
->tree_mod_log
);
667 tree_mod_log_insert_root(struct btrfs_fs_info
*fs_info
,
668 struct extent_buffer
*old_root
,
669 struct extent_buffer
*new_root
, gfp_t flags
,
672 struct tree_mod_elem
*tm
= NULL
;
673 struct tree_mod_elem
**tm_list
= NULL
;
678 if (!tree_mod_need_log(fs_info
, NULL
))
681 if (log_removal
&& btrfs_header_level(old_root
) > 0) {
682 nritems
= btrfs_header_nritems(old_root
);
683 tm_list
= kzalloc(nritems
* sizeof(struct tree_mod_elem
*),
689 for (i
= 0; i
< nritems
; i
++) {
690 tm_list
[i
] = alloc_tree_mod_elem(old_root
, i
,
691 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, flags
);
699 tm
= kzalloc(sizeof(*tm
), flags
);
705 tm
->index
= new_root
->start
>> PAGE_CACHE_SHIFT
;
706 tm
->old_root
.logical
= old_root
->start
;
707 tm
->old_root
.level
= btrfs_header_level(old_root
);
708 tm
->generation
= btrfs_header_generation(old_root
);
709 tm
->op
= MOD_LOG_ROOT_REPLACE
;
711 if (tree_mod_dont_log(fs_info
, NULL
))
715 ret
= __tree_mod_log_free_eb(fs_info
, tm_list
, nritems
);
717 ret
= __tree_mod_log_insert(fs_info
, tm
);
719 tree_mod_log_write_unlock(fs_info
);
728 for (i
= 0; i
< nritems
; i
++)
737 static struct tree_mod_elem
*
738 __tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
,
741 struct rb_root
*tm_root
;
742 struct rb_node
*node
;
743 struct tree_mod_elem
*cur
= NULL
;
744 struct tree_mod_elem
*found
= NULL
;
745 u64 index
= start
>> PAGE_CACHE_SHIFT
;
747 tree_mod_log_read_lock(fs_info
);
748 tm_root
= &fs_info
->tree_mod_log
;
749 node
= tm_root
->rb_node
;
751 cur
= container_of(node
, struct tree_mod_elem
, node
);
752 if (cur
->index
< index
) {
753 node
= node
->rb_left
;
754 } else if (cur
->index
> index
) {
755 node
= node
->rb_right
;
756 } else if (cur
->seq
< min_seq
) {
757 node
= node
->rb_left
;
758 } else if (!smallest
) {
759 /* we want the node with the highest seq */
761 BUG_ON(found
->seq
> cur
->seq
);
763 node
= node
->rb_left
;
764 } else if (cur
->seq
> min_seq
) {
765 /* we want the node with the smallest seq */
767 BUG_ON(found
->seq
< cur
->seq
);
769 node
= node
->rb_right
;
775 tree_mod_log_read_unlock(fs_info
);
781 * this returns the element from the log with the smallest time sequence
782 * value that's in the log (the oldest log item). any element with a time
783 * sequence lower than min_seq will be ignored.
785 static struct tree_mod_elem
*
786 tree_mod_log_search_oldest(struct btrfs_fs_info
*fs_info
, u64 start
,
789 return __tree_mod_log_search(fs_info
, start
, min_seq
, 1);
793 * this returns the element from the log with the largest time sequence
794 * value that's in the log (the most recent log item). any element with
795 * a time sequence lower than min_seq will be ignored.
797 static struct tree_mod_elem
*
798 tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
)
800 return __tree_mod_log_search(fs_info
, start
, min_seq
, 0);
804 tree_mod_log_eb_copy(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
805 struct extent_buffer
*src
, unsigned long dst_offset
,
806 unsigned long src_offset
, int nr_items
)
809 struct tree_mod_elem
**tm_list
= NULL
;
810 struct tree_mod_elem
**tm_list_add
, **tm_list_rem
;
814 if (!tree_mod_need_log(fs_info
, NULL
))
817 if (btrfs_header_level(dst
) == 0 && btrfs_header_level(src
) == 0)
820 tm_list
= kzalloc(nr_items
* 2 * sizeof(struct tree_mod_elem
*),
825 tm_list_add
= tm_list
;
826 tm_list_rem
= tm_list
+ nr_items
;
827 for (i
= 0; i
< nr_items
; i
++) {
828 tm_list_rem
[i
] = alloc_tree_mod_elem(src
, i
+ src_offset
,
829 MOD_LOG_KEY_REMOVE
, GFP_NOFS
);
830 if (!tm_list_rem
[i
]) {
835 tm_list_add
[i
] = alloc_tree_mod_elem(dst
, i
+ dst_offset
,
836 MOD_LOG_KEY_ADD
, GFP_NOFS
);
837 if (!tm_list_add
[i
]) {
843 if (tree_mod_dont_log(fs_info
, NULL
))
847 for (i
= 0; i
< nr_items
; i
++) {
848 ret
= __tree_mod_log_insert(fs_info
, tm_list_rem
[i
]);
851 ret
= __tree_mod_log_insert(fs_info
, tm_list_add
[i
]);
856 tree_mod_log_write_unlock(fs_info
);
862 for (i
= 0; i
< nr_items
* 2; i
++) {
863 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
864 rb_erase(&tm_list
[i
]->node
, &fs_info
->tree_mod_log
);
868 tree_mod_log_write_unlock(fs_info
);
875 tree_mod_log_eb_move(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
876 int dst_offset
, int src_offset
, int nr_items
)
879 ret
= tree_mod_log_insert_move(fs_info
, dst
, dst_offset
, src_offset
,
885 tree_mod_log_set_node_key(struct btrfs_fs_info
*fs_info
,
886 struct extent_buffer
*eb
, int slot
, int atomic
)
890 ret
= tree_mod_log_insert_key(fs_info
, eb
, slot
,
892 atomic
? GFP_ATOMIC
: GFP_NOFS
);
897 tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
)
899 struct tree_mod_elem
**tm_list
= NULL
;
904 if (btrfs_header_level(eb
) == 0)
907 if (!tree_mod_need_log(fs_info
, NULL
))
910 nritems
= btrfs_header_nritems(eb
);
911 tm_list
= kzalloc(nritems
* sizeof(struct tree_mod_elem
*),
916 for (i
= 0; i
< nritems
; i
++) {
917 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
,
918 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, GFP_NOFS
);
925 if (tree_mod_dont_log(fs_info
, eb
))
928 ret
= __tree_mod_log_free_eb(fs_info
, tm_list
, nritems
);
929 tree_mod_log_write_unlock(fs_info
);
937 for (i
= 0; i
< nritems
; i
++)
945 tree_mod_log_set_root_pointer(struct btrfs_root
*root
,
946 struct extent_buffer
*new_root_node
,
950 ret
= tree_mod_log_insert_root(root
->fs_info
, root
->node
,
951 new_root_node
, GFP_NOFS
, log_removal
);
956 * check if the tree block can be shared by multiple trees
958 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
959 struct extent_buffer
*buf
)
962 * Tree blocks not in refernece counted trees and tree roots
963 * are never shared. If a block was allocated after the last
964 * snapshot and the block was not allocated by tree relocation,
965 * we know the block is not shared.
967 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
968 buf
!= root
->node
&& buf
!= root
->commit_root
&&
969 (btrfs_header_generation(buf
) <=
970 btrfs_root_last_snapshot(&root
->root_item
) ||
971 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
973 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
974 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
975 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
981 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
982 struct btrfs_root
*root
,
983 struct extent_buffer
*buf
,
984 struct extent_buffer
*cow
,
994 * Backrefs update rules:
996 * Always use full backrefs for extent pointers in tree block
997 * allocated by tree relocation.
999 * If a shared tree block is no longer referenced by its owner
1000 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
1001 * use full backrefs for extent pointers in tree block.
1003 * If a tree block is been relocating
1004 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1005 * use full backrefs for extent pointers in tree block.
1006 * The reason for this is some operations (such as drop tree)
1007 * are only allowed for blocks use full backrefs.
1010 if (btrfs_block_can_be_shared(root
, buf
)) {
1011 ret
= btrfs_lookup_extent_info(trans
, root
, buf
->start
,
1012 btrfs_header_level(buf
), 1,
1018 btrfs_std_error(root
->fs_info
, ret
);
1023 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1024 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1025 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
1030 owner
= btrfs_header_owner(buf
);
1031 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
1032 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
1035 if ((owner
== root
->root_key
.objectid
||
1036 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
1037 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
1038 ret
= btrfs_inc_ref(trans
, root
, buf
, 1);
1039 BUG_ON(ret
); /* -ENOMEM */
1041 if (root
->root_key
.objectid
==
1042 BTRFS_TREE_RELOC_OBJECTID
) {
1043 ret
= btrfs_dec_ref(trans
, root
, buf
, 0);
1044 BUG_ON(ret
); /* -ENOMEM */
1045 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
1046 BUG_ON(ret
); /* -ENOMEM */
1048 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
1051 if (root
->root_key
.objectid
==
1052 BTRFS_TREE_RELOC_OBJECTID
)
1053 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
1055 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
1056 BUG_ON(ret
); /* -ENOMEM */
1058 if (new_flags
!= 0) {
1059 int level
= btrfs_header_level(buf
);
1061 ret
= btrfs_set_disk_extent_flags(trans
, root
,
1064 new_flags
, level
, 0);
1069 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
1070 if (root
->root_key
.objectid
==
1071 BTRFS_TREE_RELOC_OBJECTID
)
1072 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
1074 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
1075 BUG_ON(ret
); /* -ENOMEM */
1076 ret
= btrfs_dec_ref(trans
, root
, buf
, 1);
1077 BUG_ON(ret
); /* -ENOMEM */
1079 clean_tree_block(trans
, root
, buf
);
1086 * does the dirty work in cow of a single block. The parent block (if
1087 * supplied) is updated to point to the new cow copy. The new buffer is marked
1088 * dirty and returned locked. If you modify the block it needs to be marked
1091 * search_start -- an allocation hint for the new block
1093 * empty_size -- a hint that you plan on doing more cow. This is the size in
1094 * bytes the allocator should try to find free next to the block it returns.
1095 * This is just a hint and may be ignored by the allocator.
1097 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1098 struct btrfs_root
*root
,
1099 struct extent_buffer
*buf
,
1100 struct extent_buffer
*parent
, int parent_slot
,
1101 struct extent_buffer
**cow_ret
,
1102 u64 search_start
, u64 empty_size
)
1104 struct btrfs_disk_key disk_key
;
1105 struct extent_buffer
*cow
;
1108 int unlock_orig
= 0;
1111 if (*cow_ret
== buf
)
1114 btrfs_assert_tree_locked(buf
);
1116 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
1117 trans
->transid
!= root
->fs_info
->running_transaction
->transid
);
1118 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
1119 trans
->transid
!= root
->last_trans
);
1121 level
= btrfs_header_level(buf
);
1124 btrfs_item_key(buf
, &disk_key
, 0);
1126 btrfs_node_key(buf
, &disk_key
, 0);
1128 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
1130 parent_start
= parent
->start
;
1136 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, parent_start
,
1137 root
->root_key
.objectid
, &disk_key
,
1138 level
, search_start
, empty_size
);
1140 return PTR_ERR(cow
);
1142 /* cow is set to blocking by btrfs_init_new_buffer */
1144 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
1145 btrfs_set_header_bytenr(cow
, cow
->start
);
1146 btrfs_set_header_generation(cow
, trans
->transid
);
1147 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1148 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
1149 BTRFS_HEADER_FLAG_RELOC
);
1150 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1151 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
1153 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
1155 write_extent_buffer(cow
, root
->fs_info
->fsid
, btrfs_header_fsid(),
1158 ret
= update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
1160 btrfs_abort_transaction(trans
, root
, ret
);
1164 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
)) {
1165 ret
= btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
1170 if (buf
== root
->node
) {
1171 WARN_ON(parent
&& parent
!= buf
);
1172 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1173 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1174 parent_start
= buf
->start
;
1178 extent_buffer_get(cow
);
1179 tree_mod_log_set_root_pointer(root
, cow
, 1);
1180 rcu_assign_pointer(root
->node
, cow
);
1182 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1184 free_extent_buffer(buf
);
1185 add_root_to_dirty_list(root
);
1187 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1188 parent_start
= parent
->start
;
1192 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
1193 tree_mod_log_insert_key(root
->fs_info
, parent
, parent_slot
,
1194 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1195 btrfs_set_node_blockptr(parent
, parent_slot
,
1197 btrfs_set_node_ptr_generation(parent
, parent_slot
,
1199 btrfs_mark_buffer_dirty(parent
);
1201 ret
= tree_mod_log_free_eb(root
->fs_info
, buf
);
1203 btrfs_abort_transaction(trans
, root
, ret
);
1207 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1211 btrfs_tree_unlock(buf
);
1212 free_extent_buffer_stale(buf
);
1213 btrfs_mark_buffer_dirty(cow
);
1219 * returns the logical address of the oldest predecessor of the given root.
1220 * entries older than time_seq are ignored.
1222 static struct tree_mod_elem
*
1223 __tree_mod_log_oldest_root(struct btrfs_fs_info
*fs_info
,
1224 struct extent_buffer
*eb_root
, u64 time_seq
)
1226 struct tree_mod_elem
*tm
;
1227 struct tree_mod_elem
*found
= NULL
;
1228 u64 root_logical
= eb_root
->start
;
1235 * the very last operation that's logged for a root is the replacement
1236 * operation (if it is replaced at all). this has the index of the *new*
1237 * root, making it the very first operation that's logged for this root.
1240 tm
= tree_mod_log_search_oldest(fs_info
, root_logical
,
1245 * if there are no tree operation for the oldest root, we simply
1246 * return it. this should only happen if that (old) root is at
1253 * if there's an operation that's not a root replacement, we
1254 * found the oldest version of our root. normally, we'll find a
1255 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1257 if (tm
->op
!= MOD_LOG_ROOT_REPLACE
)
1261 root_logical
= tm
->old_root
.logical
;
1265 /* if there's no old root to return, return what we found instead */
1273 * tm is a pointer to the first operation to rewind within eb. then, all
1274 * previous operations will be rewinded (until we reach something older than
1278 __tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
1279 u64 time_seq
, struct tree_mod_elem
*first_tm
)
1282 struct rb_node
*next
;
1283 struct tree_mod_elem
*tm
= first_tm
;
1284 unsigned long o_dst
;
1285 unsigned long o_src
;
1286 unsigned long p_size
= sizeof(struct btrfs_key_ptr
);
1288 n
= btrfs_header_nritems(eb
);
1289 tree_mod_log_read_lock(fs_info
);
1290 while (tm
&& tm
->seq
>= time_seq
) {
1292 * all the operations are recorded with the operator used for
1293 * the modification. as we're going backwards, we do the
1294 * opposite of each operation here.
1297 case MOD_LOG_KEY_REMOVE_WHILE_FREEING
:
1298 BUG_ON(tm
->slot
< n
);
1300 case MOD_LOG_KEY_REMOVE_WHILE_MOVING
:
1301 case MOD_LOG_KEY_REMOVE
:
1302 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1303 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1304 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1308 case MOD_LOG_KEY_REPLACE
:
1309 BUG_ON(tm
->slot
>= n
);
1310 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1311 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1312 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1315 case MOD_LOG_KEY_ADD
:
1316 /* if a move operation is needed it's in the log */
1319 case MOD_LOG_MOVE_KEYS
:
1320 o_dst
= btrfs_node_key_ptr_offset(tm
->slot
);
1321 o_src
= btrfs_node_key_ptr_offset(tm
->move
.dst_slot
);
1322 memmove_extent_buffer(eb
, o_dst
, o_src
,
1323 tm
->move
.nr_items
* p_size
);
1325 case MOD_LOG_ROOT_REPLACE
:
1327 * this operation is special. for roots, this must be
1328 * handled explicitly before rewinding.
1329 * for non-roots, this operation may exist if the node
1330 * was a root: root A -> child B; then A gets empty and
1331 * B is promoted to the new root. in the mod log, we'll
1332 * have a root-replace operation for B, a tree block
1333 * that is no root. we simply ignore that operation.
1337 next
= rb_next(&tm
->node
);
1340 tm
= container_of(next
, struct tree_mod_elem
, node
);
1341 if (tm
->index
!= first_tm
->index
)
1344 tree_mod_log_read_unlock(fs_info
);
1345 btrfs_set_header_nritems(eb
, n
);
1349 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1350 * is returned. If rewind operations happen, a fresh buffer is returned. The
1351 * returned buffer is always read-locked. If the returned buffer is not the
1352 * input buffer, the lock on the input buffer is released and the input buffer
1353 * is freed (its refcount is decremented).
1355 static struct extent_buffer
*
1356 tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
1357 struct extent_buffer
*eb
, u64 time_seq
)
1359 struct extent_buffer
*eb_rewin
;
1360 struct tree_mod_elem
*tm
;
1365 if (btrfs_header_level(eb
) == 0)
1368 tm
= tree_mod_log_search(fs_info
, eb
->start
, time_seq
);
1372 btrfs_set_path_blocking(path
);
1373 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1375 if (tm
->op
== MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1376 BUG_ON(tm
->slot
!= 0);
1377 eb_rewin
= alloc_dummy_extent_buffer(eb
->start
,
1378 fs_info
->tree_root
->nodesize
);
1380 btrfs_tree_read_unlock_blocking(eb
);
1381 free_extent_buffer(eb
);
1384 btrfs_set_header_bytenr(eb_rewin
, eb
->start
);
1385 btrfs_set_header_backref_rev(eb_rewin
,
1386 btrfs_header_backref_rev(eb
));
1387 btrfs_set_header_owner(eb_rewin
, btrfs_header_owner(eb
));
1388 btrfs_set_header_level(eb_rewin
, btrfs_header_level(eb
));
1390 eb_rewin
= btrfs_clone_extent_buffer(eb
);
1392 btrfs_tree_read_unlock_blocking(eb
);
1393 free_extent_buffer(eb
);
1398 btrfs_clear_path_blocking(path
, NULL
, BTRFS_READ_LOCK
);
1399 btrfs_tree_read_unlock_blocking(eb
);
1400 free_extent_buffer(eb
);
1402 extent_buffer_get(eb_rewin
);
1403 btrfs_tree_read_lock(eb_rewin
);
1404 __tree_mod_log_rewind(fs_info
, eb_rewin
, time_seq
, tm
);
1405 WARN_ON(btrfs_header_nritems(eb_rewin
) >
1406 BTRFS_NODEPTRS_PER_BLOCK(fs_info
->tree_root
));
1412 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1413 * value. If there are no changes, the current root->root_node is returned. If
1414 * anything changed in between, there's a fresh buffer allocated on which the
1415 * rewind operations are done. In any case, the returned buffer is read locked.
1416 * Returns NULL on error (with no locks held).
1418 static inline struct extent_buffer
*
1419 get_old_root(struct btrfs_root
*root
, u64 time_seq
)
1421 struct tree_mod_elem
*tm
;
1422 struct extent_buffer
*eb
= NULL
;
1423 struct extent_buffer
*eb_root
;
1424 struct extent_buffer
*old
;
1425 struct tree_mod_root
*old_root
= NULL
;
1426 u64 old_generation
= 0;
1430 eb_root
= btrfs_read_lock_root_node(root
);
1431 tm
= __tree_mod_log_oldest_root(root
->fs_info
, eb_root
, time_seq
);
1435 if (tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1436 old_root
= &tm
->old_root
;
1437 old_generation
= tm
->generation
;
1438 logical
= old_root
->logical
;
1440 logical
= eb_root
->start
;
1443 tm
= tree_mod_log_search(root
->fs_info
, logical
, time_seq
);
1444 if (old_root
&& tm
&& tm
->op
!= MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1445 btrfs_tree_read_unlock(eb_root
);
1446 free_extent_buffer(eb_root
);
1447 blocksize
= root
->nodesize
;
1448 old
= read_tree_block(root
, logical
, blocksize
, 0);
1449 if (WARN_ON(!old
|| !extent_buffer_uptodate(old
))) {
1450 free_extent_buffer(old
);
1451 btrfs_warn(root
->fs_info
,
1452 "failed to read tree block %llu from get_old_root", logical
);
1454 eb
= btrfs_clone_extent_buffer(old
);
1455 free_extent_buffer(old
);
1457 } else if (old_root
) {
1458 btrfs_tree_read_unlock(eb_root
);
1459 free_extent_buffer(eb_root
);
1460 eb
= alloc_dummy_extent_buffer(logical
, root
->nodesize
);
1462 btrfs_set_lock_blocking_rw(eb_root
, BTRFS_READ_LOCK
);
1463 eb
= btrfs_clone_extent_buffer(eb_root
);
1464 btrfs_tree_read_unlock_blocking(eb_root
);
1465 free_extent_buffer(eb_root
);
1470 extent_buffer_get(eb
);
1471 btrfs_tree_read_lock(eb
);
1473 btrfs_set_header_bytenr(eb
, eb
->start
);
1474 btrfs_set_header_backref_rev(eb
, BTRFS_MIXED_BACKREF_REV
);
1475 btrfs_set_header_owner(eb
, btrfs_header_owner(eb_root
));
1476 btrfs_set_header_level(eb
, old_root
->level
);
1477 btrfs_set_header_generation(eb
, old_generation
);
1480 __tree_mod_log_rewind(root
->fs_info
, eb
, time_seq
, tm
);
1482 WARN_ON(btrfs_header_level(eb
) != 0);
1483 WARN_ON(btrfs_header_nritems(eb
) > BTRFS_NODEPTRS_PER_BLOCK(root
));
1488 int btrfs_old_root_level(struct btrfs_root
*root
, u64 time_seq
)
1490 struct tree_mod_elem
*tm
;
1492 struct extent_buffer
*eb_root
= btrfs_root_node(root
);
1494 tm
= __tree_mod_log_oldest_root(root
->fs_info
, eb_root
, time_seq
);
1495 if (tm
&& tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1496 level
= tm
->old_root
.level
;
1498 level
= btrfs_header_level(eb_root
);
1500 free_extent_buffer(eb_root
);
1505 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
1506 struct btrfs_root
*root
,
1507 struct extent_buffer
*buf
)
1509 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
1510 if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT
, &root
->state
)))
1513 /* ensure we can see the force_cow */
1517 * We do not need to cow a block if
1518 * 1) this block is not created or changed in this transaction;
1519 * 2) this block does not belong to TREE_RELOC tree;
1520 * 3) the root is not forced COW.
1522 * What is forced COW:
1523 * when we create snapshot during commiting the transaction,
1524 * after we've finished coping src root, we must COW the shared
1525 * block to ensure the metadata consistency.
1527 if (btrfs_header_generation(buf
) == trans
->transid
&&
1528 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
1529 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
1530 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
1531 !test_bit(BTRFS_ROOT_FORCE_COW
, &root
->state
))
1537 * cows a single block, see __btrfs_cow_block for the real work.
1538 * This version of it has extra checks so that a block isn't cow'd more than
1539 * once per transaction, as long as it hasn't been written yet
1541 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1542 struct btrfs_root
*root
, struct extent_buffer
*buf
,
1543 struct extent_buffer
*parent
, int parent_slot
,
1544 struct extent_buffer
**cow_ret
)
1549 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
1550 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1552 root
->fs_info
->running_transaction
->transid
);
1554 if (trans
->transid
!= root
->fs_info
->generation
)
1555 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1556 trans
->transid
, root
->fs_info
->generation
);
1558 if (!should_cow_block(trans
, root
, buf
)) {
1563 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
1566 btrfs_set_lock_blocking(parent
);
1567 btrfs_set_lock_blocking(buf
);
1569 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
1570 parent_slot
, cow_ret
, search_start
, 0);
1572 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
1578 * helper function for defrag to decide if two blocks pointed to by a
1579 * node are actually close by
1581 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
1583 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
1585 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
1591 * compare two keys in a memcmp fashion
1593 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
1595 struct btrfs_key k1
;
1597 btrfs_disk_key_to_cpu(&k1
, disk
);
1599 return btrfs_comp_cpu_keys(&k1
, k2
);
1603 * same as comp_keys only with two btrfs_key's
1605 int btrfs_comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
1607 if (k1
->objectid
> k2
->objectid
)
1609 if (k1
->objectid
< k2
->objectid
)
1611 if (k1
->type
> k2
->type
)
1613 if (k1
->type
< k2
->type
)
1615 if (k1
->offset
> k2
->offset
)
1617 if (k1
->offset
< k2
->offset
)
1623 * this is used by the defrag code to go through all the
1624 * leaves pointed to by a node and reallocate them so that
1625 * disk order is close to key order
1627 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
1628 struct btrfs_root
*root
, struct extent_buffer
*parent
,
1629 int start_slot
, u64
*last_ret
,
1630 struct btrfs_key
*progress
)
1632 struct extent_buffer
*cur
;
1635 u64 search_start
= *last_ret
;
1645 int progress_passed
= 0;
1646 struct btrfs_disk_key disk_key
;
1648 parent_level
= btrfs_header_level(parent
);
1650 WARN_ON(trans
->transaction
!= root
->fs_info
->running_transaction
);
1651 WARN_ON(trans
->transid
!= root
->fs_info
->generation
);
1653 parent_nritems
= btrfs_header_nritems(parent
);
1654 blocksize
= root
->nodesize
;
1655 end_slot
= parent_nritems
;
1657 if (parent_nritems
== 1)
1660 btrfs_set_lock_blocking(parent
);
1662 for (i
= start_slot
; i
< end_slot
; i
++) {
1665 btrfs_node_key(parent
, &disk_key
, i
);
1666 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
1669 progress_passed
= 1;
1670 blocknr
= btrfs_node_blockptr(parent
, i
);
1671 gen
= btrfs_node_ptr_generation(parent
, i
);
1672 if (last_block
== 0)
1673 last_block
= blocknr
;
1676 other
= btrfs_node_blockptr(parent
, i
- 1);
1677 close
= close_blocks(blocknr
, other
, blocksize
);
1679 if (!close
&& i
< end_slot
- 2) {
1680 other
= btrfs_node_blockptr(parent
, i
+ 1);
1681 close
= close_blocks(blocknr
, other
, blocksize
);
1684 last_block
= blocknr
;
1688 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1690 uptodate
= btrfs_buffer_uptodate(cur
, gen
, 0);
1693 if (!cur
|| !uptodate
) {
1695 cur
= read_tree_block(root
, blocknr
,
1697 if (!cur
|| !extent_buffer_uptodate(cur
)) {
1698 free_extent_buffer(cur
);
1701 } else if (!uptodate
) {
1702 err
= btrfs_read_buffer(cur
, gen
);
1704 free_extent_buffer(cur
);
1709 if (search_start
== 0)
1710 search_start
= last_block
;
1712 btrfs_tree_lock(cur
);
1713 btrfs_set_lock_blocking(cur
);
1714 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
1717 (end_slot
- i
) * blocksize
));
1719 btrfs_tree_unlock(cur
);
1720 free_extent_buffer(cur
);
1723 search_start
= cur
->start
;
1724 last_block
= cur
->start
;
1725 *last_ret
= search_start
;
1726 btrfs_tree_unlock(cur
);
1727 free_extent_buffer(cur
);
1733 * The leaf data grows from end-to-front in the node.
1734 * this returns the address of the start of the last item,
1735 * which is the stop of the leaf data stack
1737 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
1738 struct extent_buffer
*leaf
)
1740 u32 nr
= btrfs_header_nritems(leaf
);
1742 return BTRFS_LEAF_DATA_SIZE(root
);
1743 return btrfs_item_offset_nr(leaf
, nr
- 1);
1748 * search for key in the extent_buffer. The items start at offset p,
1749 * and they are item_size apart. There are 'max' items in p.
1751 * the slot in the array is returned via slot, and it points to
1752 * the place where you would insert key if it is not found in
1755 * slot may point to max if the key is bigger than all of the keys
1757 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
1759 int item_size
, struct btrfs_key
*key
,
1766 struct btrfs_disk_key
*tmp
= NULL
;
1767 struct btrfs_disk_key unaligned
;
1768 unsigned long offset
;
1770 unsigned long map_start
= 0;
1771 unsigned long map_len
= 0;
1774 while (low
< high
) {
1775 mid
= (low
+ high
) / 2;
1776 offset
= p
+ mid
* item_size
;
1778 if (!kaddr
|| offset
< map_start
||
1779 (offset
+ sizeof(struct btrfs_disk_key
)) >
1780 map_start
+ map_len
) {
1782 err
= map_private_extent_buffer(eb
, offset
,
1783 sizeof(struct btrfs_disk_key
),
1784 &kaddr
, &map_start
, &map_len
);
1787 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1790 read_extent_buffer(eb
, &unaligned
,
1791 offset
, sizeof(unaligned
));
1796 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1799 ret
= comp_keys(tmp
, key
);
1815 * simple bin_search frontend that does the right thing for
1818 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1819 int level
, int *slot
)
1822 return generic_bin_search(eb
,
1823 offsetof(struct btrfs_leaf
, items
),
1824 sizeof(struct btrfs_item
),
1825 key
, btrfs_header_nritems(eb
),
1828 return generic_bin_search(eb
,
1829 offsetof(struct btrfs_node
, ptrs
),
1830 sizeof(struct btrfs_key_ptr
),
1831 key
, btrfs_header_nritems(eb
),
1835 int btrfs_bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1836 int level
, int *slot
)
1838 return bin_search(eb
, key
, level
, slot
);
1841 static void root_add_used(struct btrfs_root
*root
, u32 size
)
1843 spin_lock(&root
->accounting_lock
);
1844 btrfs_set_root_used(&root
->root_item
,
1845 btrfs_root_used(&root
->root_item
) + size
);
1846 spin_unlock(&root
->accounting_lock
);
1849 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
1851 spin_lock(&root
->accounting_lock
);
1852 btrfs_set_root_used(&root
->root_item
,
1853 btrfs_root_used(&root
->root_item
) - size
);
1854 spin_unlock(&root
->accounting_lock
);
1857 /* given a node and slot number, this reads the blocks it points to. The
1858 * extent buffer is returned with a reference taken (but unlocked).
1859 * NULL is returned on error.
1861 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
1862 struct extent_buffer
*parent
, int slot
)
1864 int level
= btrfs_header_level(parent
);
1865 struct extent_buffer
*eb
;
1869 if (slot
>= btrfs_header_nritems(parent
))
1874 eb
= read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
1876 btrfs_node_ptr_generation(parent
, slot
));
1877 if (eb
&& !extent_buffer_uptodate(eb
)) {
1878 free_extent_buffer(eb
);
1886 * node level balancing, used to make sure nodes are in proper order for
1887 * item deletion. We balance from the top down, so we have to make sure
1888 * that a deletion won't leave an node completely empty later on.
1890 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
1891 struct btrfs_root
*root
,
1892 struct btrfs_path
*path
, int level
)
1894 struct extent_buffer
*right
= NULL
;
1895 struct extent_buffer
*mid
;
1896 struct extent_buffer
*left
= NULL
;
1897 struct extent_buffer
*parent
= NULL
;
1901 int orig_slot
= path
->slots
[level
];
1907 mid
= path
->nodes
[level
];
1909 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
1910 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
1911 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1913 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1915 if (level
< BTRFS_MAX_LEVEL
- 1) {
1916 parent
= path
->nodes
[level
+ 1];
1917 pslot
= path
->slots
[level
+ 1];
1921 * deal with the case where there is only one pointer in the root
1922 * by promoting the node below to a root
1925 struct extent_buffer
*child
;
1927 if (btrfs_header_nritems(mid
) != 1)
1930 /* promote the child to a root */
1931 child
= read_node_slot(root
, mid
, 0);
1934 btrfs_std_error(root
->fs_info
, ret
);
1938 btrfs_tree_lock(child
);
1939 btrfs_set_lock_blocking(child
);
1940 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1942 btrfs_tree_unlock(child
);
1943 free_extent_buffer(child
);
1947 tree_mod_log_set_root_pointer(root
, child
, 1);
1948 rcu_assign_pointer(root
->node
, child
);
1950 add_root_to_dirty_list(root
);
1951 btrfs_tree_unlock(child
);
1953 path
->locks
[level
] = 0;
1954 path
->nodes
[level
] = NULL
;
1955 clean_tree_block(trans
, root
, mid
);
1956 btrfs_tree_unlock(mid
);
1957 /* once for the path */
1958 free_extent_buffer(mid
);
1960 root_sub_used(root
, mid
->len
);
1961 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1962 /* once for the root ptr */
1963 free_extent_buffer_stale(mid
);
1966 if (btrfs_header_nritems(mid
) >
1967 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
1970 left
= read_node_slot(root
, parent
, pslot
- 1);
1972 btrfs_tree_lock(left
);
1973 btrfs_set_lock_blocking(left
);
1974 wret
= btrfs_cow_block(trans
, root
, left
,
1975 parent
, pslot
- 1, &left
);
1981 right
= read_node_slot(root
, parent
, pslot
+ 1);
1983 btrfs_tree_lock(right
);
1984 btrfs_set_lock_blocking(right
);
1985 wret
= btrfs_cow_block(trans
, root
, right
,
1986 parent
, pslot
+ 1, &right
);
1993 /* first, try to make some room in the middle buffer */
1995 orig_slot
+= btrfs_header_nritems(left
);
1996 wret
= push_node_left(trans
, root
, left
, mid
, 1);
2002 * then try to empty the right most buffer into the middle
2005 wret
= push_node_left(trans
, root
, mid
, right
, 1);
2006 if (wret
< 0 && wret
!= -ENOSPC
)
2008 if (btrfs_header_nritems(right
) == 0) {
2009 clean_tree_block(trans
, root
, right
);
2010 btrfs_tree_unlock(right
);
2011 del_ptr(root
, path
, level
+ 1, pslot
+ 1);
2012 root_sub_used(root
, right
->len
);
2013 btrfs_free_tree_block(trans
, root
, right
, 0, 1);
2014 free_extent_buffer_stale(right
);
2017 struct btrfs_disk_key right_key
;
2018 btrfs_node_key(right
, &right_key
, 0);
2019 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2021 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
2022 btrfs_mark_buffer_dirty(parent
);
2025 if (btrfs_header_nritems(mid
) == 1) {
2027 * we're not allowed to leave a node with one item in the
2028 * tree during a delete. A deletion from lower in the tree
2029 * could try to delete the only pointer in this node.
2030 * So, pull some keys from the left.
2031 * There has to be a left pointer at this point because
2032 * otherwise we would have pulled some pointers from the
2037 btrfs_std_error(root
->fs_info
, ret
);
2040 wret
= balance_node_right(trans
, root
, mid
, left
);
2046 wret
= push_node_left(trans
, root
, left
, mid
, 1);
2052 if (btrfs_header_nritems(mid
) == 0) {
2053 clean_tree_block(trans
, root
, mid
);
2054 btrfs_tree_unlock(mid
);
2055 del_ptr(root
, path
, level
+ 1, pslot
);
2056 root_sub_used(root
, mid
->len
);
2057 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
2058 free_extent_buffer_stale(mid
);
2061 /* update the parent key to reflect our changes */
2062 struct btrfs_disk_key mid_key
;
2063 btrfs_node_key(mid
, &mid_key
, 0);
2064 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2066 btrfs_set_node_key(parent
, &mid_key
, pslot
);
2067 btrfs_mark_buffer_dirty(parent
);
2070 /* update the path */
2072 if (btrfs_header_nritems(left
) > orig_slot
) {
2073 extent_buffer_get(left
);
2074 /* left was locked after cow */
2075 path
->nodes
[level
] = left
;
2076 path
->slots
[level
+ 1] -= 1;
2077 path
->slots
[level
] = orig_slot
;
2079 btrfs_tree_unlock(mid
);
2080 free_extent_buffer(mid
);
2083 orig_slot
-= btrfs_header_nritems(left
);
2084 path
->slots
[level
] = orig_slot
;
2087 /* double check we haven't messed things up */
2089 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
2093 btrfs_tree_unlock(right
);
2094 free_extent_buffer(right
);
2097 if (path
->nodes
[level
] != left
)
2098 btrfs_tree_unlock(left
);
2099 free_extent_buffer(left
);
2104 /* Node balancing for insertion. Here we only split or push nodes around
2105 * when they are completely full. This is also done top down, so we
2106 * have to be pessimistic.
2108 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
2109 struct btrfs_root
*root
,
2110 struct btrfs_path
*path
, int level
)
2112 struct extent_buffer
*right
= NULL
;
2113 struct extent_buffer
*mid
;
2114 struct extent_buffer
*left
= NULL
;
2115 struct extent_buffer
*parent
= NULL
;
2119 int orig_slot
= path
->slots
[level
];
2124 mid
= path
->nodes
[level
];
2125 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
2127 if (level
< BTRFS_MAX_LEVEL
- 1) {
2128 parent
= path
->nodes
[level
+ 1];
2129 pslot
= path
->slots
[level
+ 1];
2135 left
= read_node_slot(root
, parent
, pslot
- 1);
2137 /* first, try to make some room in the middle buffer */
2141 btrfs_tree_lock(left
);
2142 btrfs_set_lock_blocking(left
);
2144 left_nr
= btrfs_header_nritems(left
);
2145 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
2148 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
2153 wret
= push_node_left(trans
, root
,
2160 struct btrfs_disk_key disk_key
;
2161 orig_slot
+= left_nr
;
2162 btrfs_node_key(mid
, &disk_key
, 0);
2163 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2165 btrfs_set_node_key(parent
, &disk_key
, pslot
);
2166 btrfs_mark_buffer_dirty(parent
);
2167 if (btrfs_header_nritems(left
) > orig_slot
) {
2168 path
->nodes
[level
] = left
;
2169 path
->slots
[level
+ 1] -= 1;
2170 path
->slots
[level
] = orig_slot
;
2171 btrfs_tree_unlock(mid
);
2172 free_extent_buffer(mid
);
2175 btrfs_header_nritems(left
);
2176 path
->slots
[level
] = orig_slot
;
2177 btrfs_tree_unlock(left
);
2178 free_extent_buffer(left
);
2182 btrfs_tree_unlock(left
);
2183 free_extent_buffer(left
);
2185 right
= read_node_slot(root
, parent
, pslot
+ 1);
2188 * then try to empty the right most buffer into the middle
2193 btrfs_tree_lock(right
);
2194 btrfs_set_lock_blocking(right
);
2196 right_nr
= btrfs_header_nritems(right
);
2197 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
2200 ret
= btrfs_cow_block(trans
, root
, right
,
2206 wret
= balance_node_right(trans
, root
,
2213 struct btrfs_disk_key disk_key
;
2215 btrfs_node_key(right
, &disk_key
, 0);
2216 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2218 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
2219 btrfs_mark_buffer_dirty(parent
);
2221 if (btrfs_header_nritems(mid
) <= orig_slot
) {
2222 path
->nodes
[level
] = right
;
2223 path
->slots
[level
+ 1] += 1;
2224 path
->slots
[level
] = orig_slot
-
2225 btrfs_header_nritems(mid
);
2226 btrfs_tree_unlock(mid
);
2227 free_extent_buffer(mid
);
2229 btrfs_tree_unlock(right
);
2230 free_extent_buffer(right
);
2234 btrfs_tree_unlock(right
);
2235 free_extent_buffer(right
);
2241 * readahead one full node of leaves, finding things that are close
2242 * to the block in 'slot', and triggering ra on them.
2244 static void reada_for_search(struct btrfs_root
*root
,
2245 struct btrfs_path
*path
,
2246 int level
, int slot
, u64 objectid
)
2248 struct extent_buffer
*node
;
2249 struct btrfs_disk_key disk_key
;
2255 int direction
= path
->reada
;
2256 struct extent_buffer
*eb
;
2264 if (!path
->nodes
[level
])
2267 node
= path
->nodes
[level
];
2269 search
= btrfs_node_blockptr(node
, slot
);
2270 blocksize
= root
->nodesize
;
2271 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
2273 free_extent_buffer(eb
);
2279 nritems
= btrfs_header_nritems(node
);
2283 if (direction
< 0) {
2287 } else if (direction
> 0) {
2292 if (path
->reada
< 0 && objectid
) {
2293 btrfs_node_key(node
, &disk_key
, nr
);
2294 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
2297 search
= btrfs_node_blockptr(node
, nr
);
2298 if ((search
<= target
&& target
- search
<= 65536) ||
2299 (search
> target
&& search
- target
<= 65536)) {
2300 gen
= btrfs_node_ptr_generation(node
, nr
);
2301 readahead_tree_block(root
, search
, blocksize
, gen
);
2305 if ((nread
> 65536 || nscan
> 32))
2310 static noinline
void reada_for_balance(struct btrfs_root
*root
,
2311 struct btrfs_path
*path
, int level
)
2315 struct extent_buffer
*parent
;
2316 struct extent_buffer
*eb
;
2322 parent
= path
->nodes
[level
+ 1];
2326 nritems
= btrfs_header_nritems(parent
);
2327 slot
= path
->slots
[level
+ 1];
2328 blocksize
= root
->nodesize
;
2331 block1
= btrfs_node_blockptr(parent
, slot
- 1);
2332 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
2333 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
2335 * if we get -eagain from btrfs_buffer_uptodate, we
2336 * don't want to return eagain here. That will loop
2339 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2341 free_extent_buffer(eb
);
2343 if (slot
+ 1 < nritems
) {
2344 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
2345 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
2346 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
2347 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2349 free_extent_buffer(eb
);
2353 readahead_tree_block(root
, block1
, blocksize
, 0);
2355 readahead_tree_block(root
, block2
, blocksize
, 0);
2360 * when we walk down the tree, it is usually safe to unlock the higher layers
2361 * in the tree. The exceptions are when our path goes through slot 0, because
2362 * operations on the tree might require changing key pointers higher up in the
2365 * callers might also have set path->keep_locks, which tells this code to keep
2366 * the lock if the path points to the last slot in the block. This is part of
2367 * walking through the tree, and selecting the next slot in the higher block.
2369 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2370 * if lowest_unlock is 1, level 0 won't be unlocked
2372 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
2373 int lowest_unlock
, int min_write_lock_level
,
2374 int *write_lock_level
)
2377 int skip_level
= level
;
2379 struct extent_buffer
*t
;
2381 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2382 if (!path
->nodes
[i
])
2384 if (!path
->locks
[i
])
2386 if (!no_skips
&& path
->slots
[i
] == 0) {
2390 if (!no_skips
&& path
->keep_locks
) {
2393 nritems
= btrfs_header_nritems(t
);
2394 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
2399 if (skip_level
< i
&& i
>= lowest_unlock
)
2403 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
2404 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
2406 if (write_lock_level
&&
2407 i
> min_write_lock_level
&&
2408 i
<= *write_lock_level
) {
2409 *write_lock_level
= i
- 1;
2416 * This releases any locks held in the path starting at level and
2417 * going all the way up to the root.
2419 * btrfs_search_slot will keep the lock held on higher nodes in a few
2420 * corner cases, such as COW of the block at slot zero in the node. This
2421 * ignores those rules, and it should only be called when there are no
2422 * more updates to be done higher up in the tree.
2424 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
2428 if (path
->keep_locks
)
2431 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2432 if (!path
->nodes
[i
])
2434 if (!path
->locks
[i
])
2436 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
2442 * helper function for btrfs_search_slot. The goal is to find a block
2443 * in cache without setting the path to blocking. If we find the block
2444 * we return zero and the path is unchanged.
2446 * If we can't find the block, we set the path blocking and do some
2447 * reada. -EAGAIN is returned and the search must be repeated.
2450 read_block_for_search(struct btrfs_trans_handle
*trans
,
2451 struct btrfs_root
*root
, struct btrfs_path
*p
,
2452 struct extent_buffer
**eb_ret
, int level
, int slot
,
2453 struct btrfs_key
*key
, u64 time_seq
)
2458 struct extent_buffer
*b
= *eb_ret
;
2459 struct extent_buffer
*tmp
;
2462 blocknr
= btrfs_node_blockptr(b
, slot
);
2463 gen
= btrfs_node_ptr_generation(b
, slot
);
2464 blocksize
= root
->nodesize
;
2466 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
2468 /* first we do an atomic uptodate check */
2469 if (btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
2474 /* the pages were up to date, but we failed
2475 * the generation number check. Do a full
2476 * read for the generation number that is correct.
2477 * We must do this without dropping locks so
2478 * we can trust our generation number
2480 btrfs_set_path_blocking(p
);
2482 /* now we're allowed to do a blocking uptodate check */
2483 ret
= btrfs_read_buffer(tmp
, gen
);
2488 free_extent_buffer(tmp
);
2489 btrfs_release_path(p
);
2494 * reduce lock contention at high levels
2495 * of the btree by dropping locks before
2496 * we read. Don't release the lock on the current
2497 * level because we need to walk this node to figure
2498 * out which blocks to read.
2500 btrfs_unlock_up_safe(p
, level
+ 1);
2501 btrfs_set_path_blocking(p
);
2503 free_extent_buffer(tmp
);
2505 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
2507 btrfs_release_path(p
);
2510 tmp
= read_tree_block(root
, blocknr
, blocksize
, 0);
2513 * If the read above didn't mark this buffer up to date,
2514 * it will never end up being up to date. Set ret to EIO now
2515 * and give up so that our caller doesn't loop forever
2518 if (!btrfs_buffer_uptodate(tmp
, 0, 0))
2520 free_extent_buffer(tmp
);
2526 * helper function for btrfs_search_slot. This does all of the checks
2527 * for node-level blocks and does any balancing required based on
2530 * If no extra work was required, zero is returned. If we had to
2531 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2535 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
2536 struct btrfs_root
*root
, struct btrfs_path
*p
,
2537 struct extent_buffer
*b
, int level
, int ins_len
,
2538 int *write_lock_level
)
2541 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
2542 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
2545 if (*write_lock_level
< level
+ 1) {
2546 *write_lock_level
= level
+ 1;
2547 btrfs_release_path(p
);
2551 btrfs_set_path_blocking(p
);
2552 reada_for_balance(root
, p
, level
);
2553 sret
= split_node(trans
, root
, p
, level
);
2554 btrfs_clear_path_blocking(p
, NULL
, 0);
2561 b
= p
->nodes
[level
];
2562 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
2563 BTRFS_NODEPTRS_PER_BLOCK(root
) / 2) {
2566 if (*write_lock_level
< level
+ 1) {
2567 *write_lock_level
= level
+ 1;
2568 btrfs_release_path(p
);
2572 btrfs_set_path_blocking(p
);
2573 reada_for_balance(root
, p
, level
);
2574 sret
= balance_level(trans
, root
, p
, level
);
2575 btrfs_clear_path_blocking(p
, NULL
, 0);
2581 b
= p
->nodes
[level
];
2583 btrfs_release_path(p
);
2586 BUG_ON(btrfs_header_nritems(b
) == 1);
2596 static void key_search_validate(struct extent_buffer
*b
,
2597 struct btrfs_key
*key
,
2600 #ifdef CONFIG_BTRFS_ASSERT
2601 struct btrfs_disk_key disk_key
;
2603 btrfs_cpu_key_to_disk(&disk_key
, key
);
2606 ASSERT(!memcmp_extent_buffer(b
, &disk_key
,
2607 offsetof(struct btrfs_leaf
, items
[0].key
),
2610 ASSERT(!memcmp_extent_buffer(b
, &disk_key
,
2611 offsetof(struct btrfs_node
, ptrs
[0].key
),
2616 static int key_search(struct extent_buffer
*b
, struct btrfs_key
*key
,
2617 int level
, int *prev_cmp
, int *slot
)
2619 if (*prev_cmp
!= 0) {
2620 *prev_cmp
= bin_search(b
, key
, level
, slot
);
2624 key_search_validate(b
, key
, level
);
2630 int btrfs_find_item(struct btrfs_root
*fs_root
, struct btrfs_path
*found_path
,
2631 u64 iobjectid
, u64 ioff
, u8 key_type
,
2632 struct btrfs_key
*found_key
)
2635 struct btrfs_key key
;
2636 struct extent_buffer
*eb
;
2637 struct btrfs_path
*path
;
2639 key
.type
= key_type
;
2640 key
.objectid
= iobjectid
;
2643 if (found_path
== NULL
) {
2644 path
= btrfs_alloc_path();
2650 ret
= btrfs_search_slot(NULL
, fs_root
, &key
, path
, 0, 0);
2651 if ((ret
< 0) || (found_key
== NULL
)) {
2652 if (path
!= found_path
)
2653 btrfs_free_path(path
);
2657 eb
= path
->nodes
[0];
2658 if (ret
&& path
->slots
[0] >= btrfs_header_nritems(eb
)) {
2659 ret
= btrfs_next_leaf(fs_root
, path
);
2662 eb
= path
->nodes
[0];
2665 btrfs_item_key_to_cpu(eb
, found_key
, path
->slots
[0]);
2666 if (found_key
->type
!= key
.type
||
2667 found_key
->objectid
!= key
.objectid
)
2674 * look for key in the tree. path is filled in with nodes along the way
2675 * if key is found, we return zero and you can find the item in the leaf
2676 * level of the path (level 0)
2678 * If the key isn't found, the path points to the slot where it should
2679 * be inserted, and 1 is returned. If there are other errors during the
2680 * search a negative error number is returned.
2682 * if ins_len > 0, nodes and leaves will be split as we walk down the
2683 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2686 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2687 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
2690 struct extent_buffer
*b
;
2695 int lowest_unlock
= 1;
2697 /* everything at write_lock_level or lower must be write locked */
2698 int write_lock_level
= 0;
2699 u8 lowest_level
= 0;
2700 int min_write_lock_level
;
2703 lowest_level
= p
->lowest_level
;
2704 WARN_ON(lowest_level
&& ins_len
> 0);
2705 WARN_ON(p
->nodes
[0] != NULL
);
2706 BUG_ON(!cow
&& ins_len
);
2711 /* when we are removing items, we might have to go up to level
2712 * two as we update tree pointers Make sure we keep write
2713 * for those levels as well
2715 write_lock_level
= 2;
2716 } else if (ins_len
> 0) {
2718 * for inserting items, make sure we have a write lock on
2719 * level 1 so we can update keys
2721 write_lock_level
= 1;
2725 write_lock_level
= -1;
2727 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
2728 write_lock_level
= BTRFS_MAX_LEVEL
;
2730 min_write_lock_level
= write_lock_level
;
2735 * we try very hard to do read locks on the root
2737 root_lock
= BTRFS_READ_LOCK
;
2739 if (p
->search_commit_root
) {
2741 * the commit roots are read only
2742 * so we always do read locks
2744 if (p
->need_commit_sem
)
2745 down_read(&root
->fs_info
->commit_root_sem
);
2746 b
= root
->commit_root
;
2747 extent_buffer_get(b
);
2748 level
= btrfs_header_level(b
);
2749 if (p
->need_commit_sem
)
2750 up_read(&root
->fs_info
->commit_root_sem
);
2751 if (!p
->skip_locking
)
2752 btrfs_tree_read_lock(b
);
2754 if (p
->skip_locking
) {
2755 b
= btrfs_root_node(root
);
2756 level
= btrfs_header_level(b
);
2758 /* we don't know the level of the root node
2759 * until we actually have it read locked
2761 b
= btrfs_read_lock_root_node(root
);
2762 level
= btrfs_header_level(b
);
2763 if (level
<= write_lock_level
) {
2764 /* whoops, must trade for write lock */
2765 btrfs_tree_read_unlock(b
);
2766 free_extent_buffer(b
);
2767 b
= btrfs_lock_root_node(root
);
2768 root_lock
= BTRFS_WRITE_LOCK
;
2770 /* the level might have changed, check again */
2771 level
= btrfs_header_level(b
);
2775 p
->nodes
[level
] = b
;
2776 if (!p
->skip_locking
)
2777 p
->locks
[level
] = root_lock
;
2780 level
= btrfs_header_level(b
);
2783 * setup the path here so we can release it under lock
2784 * contention with the cow code
2788 * if we don't really need to cow this block
2789 * then we don't want to set the path blocking,
2790 * so we test it here
2792 if (!should_cow_block(trans
, root
, b
))
2795 btrfs_set_path_blocking(p
);
2798 * must have write locks on this node and the
2801 if (level
> write_lock_level
||
2802 (level
+ 1 > write_lock_level
&&
2803 level
+ 1 < BTRFS_MAX_LEVEL
&&
2804 p
->nodes
[level
+ 1])) {
2805 write_lock_level
= level
+ 1;
2806 btrfs_release_path(p
);
2810 err
= btrfs_cow_block(trans
, root
, b
,
2811 p
->nodes
[level
+ 1],
2812 p
->slots
[level
+ 1], &b
);
2819 p
->nodes
[level
] = b
;
2820 btrfs_clear_path_blocking(p
, NULL
, 0);
2823 * we have a lock on b and as long as we aren't changing
2824 * the tree, there is no way to for the items in b to change.
2825 * It is safe to drop the lock on our parent before we
2826 * go through the expensive btree search on b.
2828 * If we're inserting or deleting (ins_len != 0), then we might
2829 * be changing slot zero, which may require changing the parent.
2830 * So, we can't drop the lock until after we know which slot
2831 * we're operating on.
2833 if (!ins_len
&& !p
->keep_locks
) {
2836 if (u
< BTRFS_MAX_LEVEL
&& p
->locks
[u
]) {
2837 btrfs_tree_unlock_rw(p
->nodes
[u
], p
->locks
[u
]);
2842 ret
= key_search(b
, key
, level
, &prev_cmp
, &slot
);
2846 if (ret
&& slot
> 0) {
2850 p
->slots
[level
] = slot
;
2851 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
2852 ins_len
, &write_lock_level
);
2859 b
= p
->nodes
[level
];
2860 slot
= p
->slots
[level
];
2863 * slot 0 is special, if we change the key
2864 * we have to update the parent pointer
2865 * which means we must have a write lock
2868 if (slot
== 0 && ins_len
&&
2869 write_lock_level
< level
+ 1) {
2870 write_lock_level
= level
+ 1;
2871 btrfs_release_path(p
);
2875 unlock_up(p
, level
, lowest_unlock
,
2876 min_write_lock_level
, &write_lock_level
);
2878 if (level
== lowest_level
) {
2884 err
= read_block_for_search(trans
, root
, p
,
2885 &b
, level
, slot
, key
, 0);
2893 if (!p
->skip_locking
) {
2894 level
= btrfs_header_level(b
);
2895 if (level
<= write_lock_level
) {
2896 err
= btrfs_try_tree_write_lock(b
);
2898 btrfs_set_path_blocking(p
);
2900 btrfs_clear_path_blocking(p
, b
,
2903 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
2905 err
= btrfs_try_tree_read_lock(b
);
2907 btrfs_set_path_blocking(p
);
2908 btrfs_tree_read_lock(b
);
2909 btrfs_clear_path_blocking(p
, b
,
2912 p
->locks
[level
] = BTRFS_READ_LOCK
;
2914 p
->nodes
[level
] = b
;
2917 p
->slots
[level
] = slot
;
2919 btrfs_leaf_free_space(root
, b
) < ins_len
) {
2920 if (write_lock_level
< 1) {
2921 write_lock_level
= 1;
2922 btrfs_release_path(p
);
2926 btrfs_set_path_blocking(p
);
2927 err
= split_leaf(trans
, root
, key
,
2928 p
, ins_len
, ret
== 0);
2929 btrfs_clear_path_blocking(p
, NULL
, 0);
2937 if (!p
->search_for_split
)
2938 unlock_up(p
, level
, lowest_unlock
,
2939 min_write_lock_level
, &write_lock_level
);
2946 * we don't really know what they plan on doing with the path
2947 * from here on, so for now just mark it as blocking
2949 if (!p
->leave_spinning
)
2950 btrfs_set_path_blocking(p
);
2952 btrfs_release_path(p
);
2957 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2958 * current state of the tree together with the operations recorded in the tree
2959 * modification log to search for the key in a previous version of this tree, as
2960 * denoted by the time_seq parameter.
2962 * Naturally, there is no support for insert, delete or cow operations.
2964 * The resulting path and return value will be set up as if we called
2965 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2967 int btrfs_search_old_slot(struct btrfs_root
*root
, struct btrfs_key
*key
,
2968 struct btrfs_path
*p
, u64 time_seq
)
2970 struct extent_buffer
*b
;
2975 int lowest_unlock
= 1;
2976 u8 lowest_level
= 0;
2979 lowest_level
= p
->lowest_level
;
2980 WARN_ON(p
->nodes
[0] != NULL
);
2982 if (p
->search_commit_root
) {
2984 return btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2988 b
= get_old_root(root
, time_seq
);
2989 level
= btrfs_header_level(b
);
2990 p
->locks
[level
] = BTRFS_READ_LOCK
;
2993 level
= btrfs_header_level(b
);
2994 p
->nodes
[level
] = b
;
2995 btrfs_clear_path_blocking(p
, NULL
, 0);
2998 * we have a lock on b and as long as we aren't changing
2999 * the tree, there is no way to for the items in b to change.
3000 * It is safe to drop the lock on our parent before we
3001 * go through the expensive btree search on b.
3003 btrfs_unlock_up_safe(p
, level
+ 1);
3006 * Since we can unwind eb's we want to do a real search every
3010 ret
= key_search(b
, key
, level
, &prev_cmp
, &slot
);
3014 if (ret
&& slot
> 0) {
3018 p
->slots
[level
] = slot
;
3019 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
3021 if (level
== lowest_level
) {
3027 err
= read_block_for_search(NULL
, root
, p
, &b
, level
,
3028 slot
, key
, time_seq
);
3036 level
= btrfs_header_level(b
);
3037 err
= btrfs_try_tree_read_lock(b
);
3039 btrfs_set_path_blocking(p
);
3040 btrfs_tree_read_lock(b
);
3041 btrfs_clear_path_blocking(p
, b
,
3044 b
= tree_mod_log_rewind(root
->fs_info
, p
, b
, time_seq
);
3049 p
->locks
[level
] = BTRFS_READ_LOCK
;
3050 p
->nodes
[level
] = b
;
3052 p
->slots
[level
] = slot
;
3053 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
3059 if (!p
->leave_spinning
)
3060 btrfs_set_path_blocking(p
);
3062 btrfs_release_path(p
);
3068 * helper to use instead of search slot if no exact match is needed but
3069 * instead the next or previous item should be returned.
3070 * When find_higher is true, the next higher item is returned, the next lower
3072 * When return_any and find_higher are both true, and no higher item is found,
3073 * return the next lower instead.
3074 * When return_any is true and find_higher is false, and no lower item is found,
3075 * return the next higher instead.
3076 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3079 int btrfs_search_slot_for_read(struct btrfs_root
*root
,
3080 struct btrfs_key
*key
, struct btrfs_path
*p
,
3081 int find_higher
, int return_any
)
3084 struct extent_buffer
*leaf
;
3087 ret
= btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
3091 * a return value of 1 means the path is at the position where the
3092 * item should be inserted. Normally this is the next bigger item,
3093 * but in case the previous item is the last in a leaf, path points
3094 * to the first free slot in the previous leaf, i.e. at an invalid
3100 if (p
->slots
[0] >= btrfs_header_nritems(leaf
)) {
3101 ret
= btrfs_next_leaf(root
, p
);
3107 * no higher item found, return the next
3112 btrfs_release_path(p
);
3116 if (p
->slots
[0] == 0) {
3117 ret
= btrfs_prev_leaf(root
, p
);
3122 if (p
->slots
[0] == btrfs_header_nritems(leaf
))
3129 * no lower item found, return the next
3134 btrfs_release_path(p
);
3144 * adjust the pointers going up the tree, starting at level
3145 * making sure the right key of each node is points to 'key'.
3146 * This is used after shifting pointers to the left, so it stops
3147 * fixing up pointers when a given leaf/node is not in slot 0 of the
3151 static void fixup_low_keys(struct btrfs_root
*root
, struct btrfs_path
*path
,
3152 struct btrfs_disk_key
*key
, int level
)
3155 struct extent_buffer
*t
;
3157 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
3158 int tslot
= path
->slots
[i
];
3159 if (!path
->nodes
[i
])
3162 tree_mod_log_set_node_key(root
->fs_info
, t
, tslot
, 1);
3163 btrfs_set_node_key(t
, key
, tslot
);
3164 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
3173 * This function isn't completely safe. It's the caller's responsibility
3174 * that the new key won't break the order
3176 void btrfs_set_item_key_safe(struct btrfs_root
*root
, struct btrfs_path
*path
,
3177 struct btrfs_key
*new_key
)
3179 struct btrfs_disk_key disk_key
;
3180 struct extent_buffer
*eb
;
3183 eb
= path
->nodes
[0];
3184 slot
= path
->slots
[0];
3186 btrfs_item_key(eb
, &disk_key
, slot
- 1);
3187 BUG_ON(comp_keys(&disk_key
, new_key
) >= 0);
3189 if (slot
< btrfs_header_nritems(eb
) - 1) {
3190 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
3191 BUG_ON(comp_keys(&disk_key
, new_key
) <= 0);
3194 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3195 btrfs_set_item_key(eb
, &disk_key
, slot
);
3196 btrfs_mark_buffer_dirty(eb
);
3198 fixup_low_keys(root
, path
, &disk_key
, 1);
3202 * try to push data from one node into the next node left in the
3205 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3206 * error, and > 0 if there was no room in the left hand block.
3208 static int push_node_left(struct btrfs_trans_handle
*trans
,
3209 struct btrfs_root
*root
, struct extent_buffer
*dst
,
3210 struct extent_buffer
*src
, int empty
)
3217 src_nritems
= btrfs_header_nritems(src
);
3218 dst_nritems
= btrfs_header_nritems(dst
);
3219 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
3220 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3221 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3223 if (!empty
&& src_nritems
<= 8)
3226 if (push_items
<= 0)
3230 push_items
= min(src_nritems
, push_items
);
3231 if (push_items
< src_nritems
) {
3232 /* leave at least 8 pointers in the node if
3233 * we aren't going to empty it
3235 if (src_nritems
- push_items
< 8) {
3236 if (push_items
<= 8)
3242 push_items
= min(src_nritems
- 8, push_items
);
3244 ret
= tree_mod_log_eb_copy(root
->fs_info
, dst
, src
, dst_nritems
, 0,
3247 btrfs_abort_transaction(trans
, root
, ret
);
3250 copy_extent_buffer(dst
, src
,
3251 btrfs_node_key_ptr_offset(dst_nritems
),
3252 btrfs_node_key_ptr_offset(0),
3253 push_items
* sizeof(struct btrfs_key_ptr
));
3255 if (push_items
< src_nritems
) {
3257 * don't call tree_mod_log_eb_move here, key removal was already
3258 * fully logged by tree_mod_log_eb_copy above.
3260 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
3261 btrfs_node_key_ptr_offset(push_items
),
3262 (src_nritems
- push_items
) *
3263 sizeof(struct btrfs_key_ptr
));
3265 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3266 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3267 btrfs_mark_buffer_dirty(src
);
3268 btrfs_mark_buffer_dirty(dst
);
3274 * try to push data from one node into the next node right in the
3277 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3278 * error, and > 0 if there was no room in the right hand block.
3280 * this will only push up to 1/2 the contents of the left node over
3282 static int balance_node_right(struct btrfs_trans_handle
*trans
,
3283 struct btrfs_root
*root
,
3284 struct extent_buffer
*dst
,
3285 struct extent_buffer
*src
)
3293 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3294 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3296 src_nritems
= btrfs_header_nritems(src
);
3297 dst_nritems
= btrfs_header_nritems(dst
);
3298 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
3299 if (push_items
<= 0)
3302 if (src_nritems
< 4)
3305 max_push
= src_nritems
/ 2 + 1;
3306 /* don't try to empty the node */
3307 if (max_push
>= src_nritems
)
3310 if (max_push
< push_items
)
3311 push_items
= max_push
;
3313 tree_mod_log_eb_move(root
->fs_info
, dst
, push_items
, 0, dst_nritems
);
3314 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
3315 btrfs_node_key_ptr_offset(0),
3317 sizeof(struct btrfs_key_ptr
));
3319 ret
= tree_mod_log_eb_copy(root
->fs_info
, dst
, src
, 0,
3320 src_nritems
- push_items
, push_items
);
3322 btrfs_abort_transaction(trans
, root
, ret
);
3325 copy_extent_buffer(dst
, src
,
3326 btrfs_node_key_ptr_offset(0),
3327 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
3328 push_items
* sizeof(struct btrfs_key_ptr
));
3330 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3331 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3333 btrfs_mark_buffer_dirty(src
);
3334 btrfs_mark_buffer_dirty(dst
);
3340 * helper function to insert a new root level in the tree.
3341 * A new node is allocated, and a single item is inserted to
3342 * point to the existing root
3344 * returns zero on success or < 0 on failure.
3346 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
3347 struct btrfs_root
*root
,
3348 struct btrfs_path
*path
, int level
)
3351 struct extent_buffer
*lower
;
3352 struct extent_buffer
*c
;
3353 struct extent_buffer
*old
;
3354 struct btrfs_disk_key lower_key
;
3356 BUG_ON(path
->nodes
[level
]);
3357 BUG_ON(path
->nodes
[level
-1] != root
->node
);
3359 lower
= path
->nodes
[level
-1];
3361 btrfs_item_key(lower
, &lower_key
, 0);
3363 btrfs_node_key(lower
, &lower_key
, 0);
3365 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
3366 root
->root_key
.objectid
, &lower_key
,
3367 level
, root
->node
->start
, 0);
3371 root_add_used(root
, root
->nodesize
);
3373 memset_extent_buffer(c
, 0, 0, sizeof(struct btrfs_header
));
3374 btrfs_set_header_nritems(c
, 1);
3375 btrfs_set_header_level(c
, level
);
3376 btrfs_set_header_bytenr(c
, c
->start
);
3377 btrfs_set_header_generation(c
, trans
->transid
);
3378 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
3379 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
3381 write_extent_buffer(c
, root
->fs_info
->fsid
, btrfs_header_fsid(),
3384 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
3385 btrfs_header_chunk_tree_uuid(c
), BTRFS_UUID_SIZE
);
3387 btrfs_set_node_key(c
, &lower_key
, 0);
3388 btrfs_set_node_blockptr(c
, 0, lower
->start
);
3389 lower_gen
= btrfs_header_generation(lower
);
3390 WARN_ON(lower_gen
!= trans
->transid
);
3392 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
3394 btrfs_mark_buffer_dirty(c
);
3397 tree_mod_log_set_root_pointer(root
, c
, 0);
3398 rcu_assign_pointer(root
->node
, c
);
3400 /* the super has an extra ref to root->node */
3401 free_extent_buffer(old
);
3403 add_root_to_dirty_list(root
);
3404 extent_buffer_get(c
);
3405 path
->nodes
[level
] = c
;
3406 path
->locks
[level
] = BTRFS_WRITE_LOCK
;
3407 path
->slots
[level
] = 0;
3412 * worker function to insert a single pointer in a node.
3413 * the node should have enough room for the pointer already
3415 * slot and level indicate where you want the key to go, and
3416 * blocknr is the block the key points to.
3418 static void insert_ptr(struct btrfs_trans_handle
*trans
,
3419 struct btrfs_root
*root
, struct btrfs_path
*path
,
3420 struct btrfs_disk_key
*key
, u64 bytenr
,
3421 int slot
, int level
)
3423 struct extent_buffer
*lower
;
3427 BUG_ON(!path
->nodes
[level
]);
3428 btrfs_assert_tree_locked(path
->nodes
[level
]);
3429 lower
= path
->nodes
[level
];
3430 nritems
= btrfs_header_nritems(lower
);
3431 BUG_ON(slot
> nritems
);
3432 BUG_ON(nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
));
3433 if (slot
!= nritems
) {
3435 tree_mod_log_eb_move(root
->fs_info
, lower
, slot
+ 1,
3436 slot
, nritems
- slot
);
3437 memmove_extent_buffer(lower
,
3438 btrfs_node_key_ptr_offset(slot
+ 1),
3439 btrfs_node_key_ptr_offset(slot
),
3440 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
3443 ret
= tree_mod_log_insert_key(root
->fs_info
, lower
, slot
,
3444 MOD_LOG_KEY_ADD
, GFP_NOFS
);
3447 btrfs_set_node_key(lower
, key
, slot
);
3448 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
3449 WARN_ON(trans
->transid
== 0);
3450 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
3451 btrfs_set_header_nritems(lower
, nritems
+ 1);
3452 btrfs_mark_buffer_dirty(lower
);
3456 * split the node at the specified level in path in two.
3457 * The path is corrected to point to the appropriate node after the split
3459 * Before splitting this tries to make some room in the node by pushing
3460 * left and right, if either one works, it returns right away.
3462 * returns 0 on success and < 0 on failure
3464 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
3465 struct btrfs_root
*root
,
3466 struct btrfs_path
*path
, int level
)
3468 struct extent_buffer
*c
;
3469 struct extent_buffer
*split
;
3470 struct btrfs_disk_key disk_key
;
3475 c
= path
->nodes
[level
];
3476 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
3477 if (c
== root
->node
) {
3479 * trying to split the root, lets make a new one
3481 * tree mod log: We don't log_removal old root in
3482 * insert_new_root, because that root buffer will be kept as a
3483 * normal node. We are going to log removal of half of the
3484 * elements below with tree_mod_log_eb_copy. We're holding a
3485 * tree lock on the buffer, which is why we cannot race with
3486 * other tree_mod_log users.
3488 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
3492 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
3493 c
= path
->nodes
[level
];
3494 if (!ret
&& btrfs_header_nritems(c
) <
3495 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
3501 c_nritems
= btrfs_header_nritems(c
);
3502 mid
= (c_nritems
+ 1) / 2;
3503 btrfs_node_key(c
, &disk_key
, mid
);
3505 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
3506 root
->root_key
.objectid
,
3507 &disk_key
, level
, c
->start
, 0);
3509 return PTR_ERR(split
);
3511 root_add_used(root
, root
->nodesize
);
3513 memset_extent_buffer(split
, 0, 0, sizeof(struct btrfs_header
));
3514 btrfs_set_header_level(split
, btrfs_header_level(c
));
3515 btrfs_set_header_bytenr(split
, split
->start
);
3516 btrfs_set_header_generation(split
, trans
->transid
);
3517 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
3518 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
3519 write_extent_buffer(split
, root
->fs_info
->fsid
,
3520 btrfs_header_fsid(), BTRFS_FSID_SIZE
);
3521 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
3522 btrfs_header_chunk_tree_uuid(split
),
3525 ret
= tree_mod_log_eb_copy(root
->fs_info
, split
, c
, 0,
3526 mid
, c_nritems
- mid
);
3528 btrfs_abort_transaction(trans
, root
, ret
);
3531 copy_extent_buffer(split
, c
,
3532 btrfs_node_key_ptr_offset(0),
3533 btrfs_node_key_ptr_offset(mid
),
3534 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
3535 btrfs_set_header_nritems(split
, c_nritems
- mid
);
3536 btrfs_set_header_nritems(c
, mid
);
3539 btrfs_mark_buffer_dirty(c
);
3540 btrfs_mark_buffer_dirty(split
);
3542 insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
3543 path
->slots
[level
+ 1] + 1, level
+ 1);
3545 if (path
->slots
[level
] >= mid
) {
3546 path
->slots
[level
] -= mid
;
3547 btrfs_tree_unlock(c
);
3548 free_extent_buffer(c
);
3549 path
->nodes
[level
] = split
;
3550 path
->slots
[level
+ 1] += 1;
3552 btrfs_tree_unlock(split
);
3553 free_extent_buffer(split
);
3559 * how many bytes are required to store the items in a leaf. start
3560 * and nr indicate which items in the leaf to check. This totals up the
3561 * space used both by the item structs and the item data
3563 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
3565 struct btrfs_item
*start_item
;
3566 struct btrfs_item
*end_item
;
3567 struct btrfs_map_token token
;
3569 int nritems
= btrfs_header_nritems(l
);
3570 int end
= min(nritems
, start
+ nr
) - 1;
3574 btrfs_init_map_token(&token
);
3575 start_item
= btrfs_item_nr(start
);
3576 end_item
= btrfs_item_nr(end
);
3577 data_len
= btrfs_token_item_offset(l
, start_item
, &token
) +
3578 btrfs_token_item_size(l
, start_item
, &token
);
3579 data_len
= data_len
- btrfs_token_item_offset(l
, end_item
, &token
);
3580 data_len
+= sizeof(struct btrfs_item
) * nr
;
3581 WARN_ON(data_len
< 0);
3586 * The space between the end of the leaf items and
3587 * the start of the leaf data. IOW, how much room
3588 * the leaf has left for both items and data
3590 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
3591 struct extent_buffer
*leaf
)
3593 int nritems
= btrfs_header_nritems(leaf
);
3595 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
3597 btrfs_crit(root
->fs_info
,
3598 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3599 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
3600 leaf_space_used(leaf
, 0, nritems
), nritems
);
3606 * min slot controls the lowest index we're willing to push to the
3607 * right. We'll push up to and including min_slot, but no lower
3609 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
3610 struct btrfs_root
*root
,
3611 struct btrfs_path
*path
,
3612 int data_size
, int empty
,
3613 struct extent_buffer
*right
,
3614 int free_space
, u32 left_nritems
,
3617 struct extent_buffer
*left
= path
->nodes
[0];
3618 struct extent_buffer
*upper
= path
->nodes
[1];
3619 struct btrfs_map_token token
;
3620 struct btrfs_disk_key disk_key
;
3625 struct btrfs_item
*item
;
3631 btrfs_init_map_token(&token
);
3636 nr
= max_t(u32
, 1, min_slot
);
3638 if (path
->slots
[0] >= left_nritems
)
3639 push_space
+= data_size
;
3641 slot
= path
->slots
[1];
3642 i
= left_nritems
- 1;
3644 item
= btrfs_item_nr(i
);
3646 if (!empty
&& push_items
> 0) {
3647 if (path
->slots
[0] > i
)
3649 if (path
->slots
[0] == i
) {
3650 int space
= btrfs_leaf_free_space(root
, left
);
3651 if (space
+ push_space
* 2 > free_space
)
3656 if (path
->slots
[0] == i
)
3657 push_space
+= data_size
;
3659 this_item_size
= btrfs_item_size(left
, item
);
3660 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3664 push_space
+= this_item_size
+ sizeof(*item
);
3670 if (push_items
== 0)
3673 WARN_ON(!empty
&& push_items
== left_nritems
);
3675 /* push left to right */
3676 right_nritems
= btrfs_header_nritems(right
);
3678 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
3679 push_space
-= leaf_data_end(root
, left
);
3681 /* make room in the right data area */
3682 data_end
= leaf_data_end(root
, right
);
3683 memmove_extent_buffer(right
,
3684 btrfs_leaf_data(right
) + data_end
- push_space
,
3685 btrfs_leaf_data(right
) + data_end
,
3686 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
3688 /* copy from the left data area */
3689 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
3690 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3691 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
3694 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
3695 btrfs_item_nr_offset(0),
3696 right_nritems
* sizeof(struct btrfs_item
));
3698 /* copy the items from left to right */
3699 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
3700 btrfs_item_nr_offset(left_nritems
- push_items
),
3701 push_items
* sizeof(struct btrfs_item
));
3703 /* update the item pointers */
3704 right_nritems
+= push_items
;
3705 btrfs_set_header_nritems(right
, right_nritems
);
3706 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3707 for (i
= 0; i
< right_nritems
; i
++) {
3708 item
= btrfs_item_nr(i
);
3709 push_space
-= btrfs_token_item_size(right
, item
, &token
);
3710 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3713 left_nritems
-= push_items
;
3714 btrfs_set_header_nritems(left
, left_nritems
);
3717 btrfs_mark_buffer_dirty(left
);
3719 clean_tree_block(trans
, root
, left
);
3721 btrfs_mark_buffer_dirty(right
);
3723 btrfs_item_key(right
, &disk_key
, 0);
3724 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
3725 btrfs_mark_buffer_dirty(upper
);
3727 /* then fixup the leaf pointer in the path */
3728 if (path
->slots
[0] >= left_nritems
) {
3729 path
->slots
[0] -= left_nritems
;
3730 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
3731 clean_tree_block(trans
, root
, path
->nodes
[0]);
3732 btrfs_tree_unlock(path
->nodes
[0]);
3733 free_extent_buffer(path
->nodes
[0]);
3734 path
->nodes
[0] = right
;
3735 path
->slots
[1] += 1;
3737 btrfs_tree_unlock(right
);
3738 free_extent_buffer(right
);
3743 btrfs_tree_unlock(right
);
3744 free_extent_buffer(right
);
3749 * push some data in the path leaf to the right, trying to free up at
3750 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3752 * returns 1 if the push failed because the other node didn't have enough
3753 * room, 0 if everything worked out and < 0 if there were major errors.
3755 * this will push starting from min_slot to the end of the leaf. It won't
3756 * push any slot lower than min_slot
3758 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
3759 *root
, struct btrfs_path
*path
,
3760 int min_data_size
, int data_size
,
3761 int empty
, u32 min_slot
)
3763 struct extent_buffer
*left
= path
->nodes
[0];
3764 struct extent_buffer
*right
;
3765 struct extent_buffer
*upper
;
3771 if (!path
->nodes
[1])
3774 slot
= path
->slots
[1];
3775 upper
= path
->nodes
[1];
3776 if (slot
>= btrfs_header_nritems(upper
) - 1)
3779 btrfs_assert_tree_locked(path
->nodes
[1]);
3781 right
= read_node_slot(root
, upper
, slot
+ 1);
3785 btrfs_tree_lock(right
);
3786 btrfs_set_lock_blocking(right
);
3788 free_space
= btrfs_leaf_free_space(root
, right
);
3789 if (free_space
< data_size
)
3792 /* cow and double check */
3793 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
3798 free_space
= btrfs_leaf_free_space(root
, right
);
3799 if (free_space
< data_size
)
3802 left_nritems
= btrfs_header_nritems(left
);
3803 if (left_nritems
== 0)
3806 if (path
->slots
[0] == left_nritems
&& !empty
) {
3807 /* Key greater than all keys in the leaf, right neighbor has
3808 * enough room for it and we're not emptying our leaf to delete
3809 * it, therefore use right neighbor to insert the new item and
3810 * no need to touch/dirty our left leaft. */
3811 btrfs_tree_unlock(left
);
3812 free_extent_buffer(left
);
3813 path
->nodes
[0] = right
;
3819 return __push_leaf_right(trans
, root
, path
, min_data_size
, empty
,
3820 right
, free_space
, left_nritems
, min_slot
);
3822 btrfs_tree_unlock(right
);
3823 free_extent_buffer(right
);
3828 * push some data in the path leaf to the left, trying to free up at
3829 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3831 * max_slot can put a limit on how far into the leaf we'll push items. The
3832 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3835 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
3836 struct btrfs_root
*root
,
3837 struct btrfs_path
*path
, int data_size
,
3838 int empty
, struct extent_buffer
*left
,
3839 int free_space
, u32 right_nritems
,
3842 struct btrfs_disk_key disk_key
;
3843 struct extent_buffer
*right
= path
->nodes
[0];
3847 struct btrfs_item
*item
;
3848 u32 old_left_nritems
;
3852 u32 old_left_item_size
;
3853 struct btrfs_map_token token
;
3855 btrfs_init_map_token(&token
);
3858 nr
= min(right_nritems
, max_slot
);
3860 nr
= min(right_nritems
- 1, max_slot
);
3862 for (i
= 0; i
< nr
; i
++) {
3863 item
= btrfs_item_nr(i
);
3865 if (!empty
&& push_items
> 0) {
3866 if (path
->slots
[0] < i
)
3868 if (path
->slots
[0] == i
) {
3869 int space
= btrfs_leaf_free_space(root
, right
);
3870 if (space
+ push_space
* 2 > free_space
)
3875 if (path
->slots
[0] == i
)
3876 push_space
+= data_size
;
3878 this_item_size
= btrfs_item_size(right
, item
);
3879 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3883 push_space
+= this_item_size
+ sizeof(*item
);
3886 if (push_items
== 0) {
3890 WARN_ON(!empty
&& push_items
== btrfs_header_nritems(right
));
3892 /* push data from right to left */
3893 copy_extent_buffer(left
, right
,
3894 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
3895 btrfs_item_nr_offset(0),
3896 push_items
* sizeof(struct btrfs_item
));
3898 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
3899 btrfs_item_offset_nr(right
, push_items
- 1);
3901 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
3902 leaf_data_end(root
, left
) - push_space
,
3903 btrfs_leaf_data(right
) +
3904 btrfs_item_offset_nr(right
, push_items
- 1),
3906 old_left_nritems
= btrfs_header_nritems(left
);
3907 BUG_ON(old_left_nritems
<= 0);
3909 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
3910 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
3913 item
= btrfs_item_nr(i
);
3915 ioff
= btrfs_token_item_offset(left
, item
, &token
);
3916 btrfs_set_token_item_offset(left
, item
,
3917 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
),
3920 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
3922 /* fixup right node */
3923 if (push_items
> right_nritems
)
3924 WARN(1, KERN_CRIT
"push items %d nr %u\n", push_items
,
3927 if (push_items
< right_nritems
) {
3928 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
3929 leaf_data_end(root
, right
);
3930 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
3931 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3932 btrfs_leaf_data(right
) +
3933 leaf_data_end(root
, right
), push_space
);
3935 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
3936 btrfs_item_nr_offset(push_items
),
3937 (btrfs_header_nritems(right
) - push_items
) *
3938 sizeof(struct btrfs_item
));
3940 right_nritems
-= push_items
;
3941 btrfs_set_header_nritems(right
, right_nritems
);
3942 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3943 for (i
= 0; i
< right_nritems
; i
++) {
3944 item
= btrfs_item_nr(i
);
3946 push_space
= push_space
- btrfs_token_item_size(right
,
3948 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3951 btrfs_mark_buffer_dirty(left
);
3953 btrfs_mark_buffer_dirty(right
);
3955 clean_tree_block(trans
, root
, right
);
3957 btrfs_item_key(right
, &disk_key
, 0);
3958 fixup_low_keys(root
, path
, &disk_key
, 1);
3960 /* then fixup the leaf pointer in the path */
3961 if (path
->slots
[0] < push_items
) {
3962 path
->slots
[0] += old_left_nritems
;
3963 btrfs_tree_unlock(path
->nodes
[0]);
3964 free_extent_buffer(path
->nodes
[0]);
3965 path
->nodes
[0] = left
;
3966 path
->slots
[1] -= 1;
3968 btrfs_tree_unlock(left
);
3969 free_extent_buffer(left
);
3970 path
->slots
[0] -= push_items
;
3972 BUG_ON(path
->slots
[0] < 0);
3975 btrfs_tree_unlock(left
);
3976 free_extent_buffer(left
);
3981 * push some data in the path leaf to the left, trying to free up at
3982 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3984 * max_slot can put a limit on how far into the leaf we'll push items. The
3985 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3988 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
3989 *root
, struct btrfs_path
*path
, int min_data_size
,
3990 int data_size
, int empty
, u32 max_slot
)
3992 struct extent_buffer
*right
= path
->nodes
[0];
3993 struct extent_buffer
*left
;
3999 slot
= path
->slots
[1];
4002 if (!path
->nodes
[1])
4005 right_nritems
= btrfs_header_nritems(right
);
4006 if (right_nritems
== 0)
4009 btrfs_assert_tree_locked(path
->nodes
[1]);
4011 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
4015 btrfs_tree_lock(left
);
4016 btrfs_set_lock_blocking(left
);
4018 free_space
= btrfs_leaf_free_space(root
, left
);
4019 if (free_space
< data_size
) {
4024 /* cow and double check */
4025 ret
= btrfs_cow_block(trans
, root
, left
,
4026 path
->nodes
[1], slot
- 1, &left
);
4028 /* we hit -ENOSPC, but it isn't fatal here */
4034 free_space
= btrfs_leaf_free_space(root
, left
);
4035 if (free_space
< data_size
) {
4040 return __push_leaf_left(trans
, root
, path
, min_data_size
,
4041 empty
, left
, free_space
, right_nritems
,
4044 btrfs_tree_unlock(left
);
4045 free_extent_buffer(left
);
4050 * split the path's leaf in two, making sure there is at least data_size
4051 * available for the resulting leaf level of the path.
4053 static noinline
void copy_for_split(struct btrfs_trans_handle
*trans
,
4054 struct btrfs_root
*root
,
4055 struct btrfs_path
*path
,
4056 struct extent_buffer
*l
,
4057 struct extent_buffer
*right
,
4058 int slot
, int mid
, int nritems
)
4063 struct btrfs_disk_key disk_key
;
4064 struct btrfs_map_token token
;
4066 btrfs_init_map_token(&token
);
4068 nritems
= nritems
- mid
;
4069 btrfs_set_header_nritems(right
, nritems
);
4070 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
4072 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
4073 btrfs_item_nr_offset(mid
),
4074 nritems
* sizeof(struct btrfs_item
));
4076 copy_extent_buffer(right
, l
,
4077 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
4078 data_copy_size
, btrfs_leaf_data(l
) +
4079 leaf_data_end(root
, l
), data_copy_size
);
4081 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
4082 btrfs_item_end_nr(l
, mid
);
4084 for (i
= 0; i
< nritems
; i
++) {
4085 struct btrfs_item
*item
= btrfs_item_nr(i
);
4088 ioff
= btrfs_token_item_offset(right
, item
, &token
);
4089 btrfs_set_token_item_offset(right
, item
,
4090 ioff
+ rt_data_off
, &token
);
4093 btrfs_set_header_nritems(l
, mid
);
4094 btrfs_item_key(right
, &disk_key
, 0);
4095 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
4096 path
->slots
[1] + 1, 1);
4098 btrfs_mark_buffer_dirty(right
);
4099 btrfs_mark_buffer_dirty(l
);
4100 BUG_ON(path
->slots
[0] != slot
);
4103 btrfs_tree_unlock(path
->nodes
[0]);
4104 free_extent_buffer(path
->nodes
[0]);
4105 path
->nodes
[0] = right
;
4106 path
->slots
[0] -= mid
;
4107 path
->slots
[1] += 1;
4109 btrfs_tree_unlock(right
);
4110 free_extent_buffer(right
);
4113 BUG_ON(path
->slots
[0] < 0);
4117 * double splits happen when we need to insert a big item in the middle
4118 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4119 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4122 * We avoid this by trying to push the items on either side of our target
4123 * into the adjacent leaves. If all goes well we can avoid the double split
4126 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
4127 struct btrfs_root
*root
,
4128 struct btrfs_path
*path
,
4135 int space_needed
= data_size
;
4137 slot
= path
->slots
[0];
4138 if (slot
< btrfs_header_nritems(path
->nodes
[0]))
4139 space_needed
-= btrfs_leaf_free_space(root
, path
->nodes
[0]);
4142 * try to push all the items after our slot into the
4145 ret
= push_leaf_right(trans
, root
, path
, 1, space_needed
, 0, slot
);
4152 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4154 * our goal is to get our slot at the start or end of a leaf. If
4155 * we've done so we're done
4157 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
4160 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
4163 /* try to push all the items before our slot into the next leaf */
4164 slot
= path
->slots
[0];
4165 ret
= push_leaf_left(trans
, root
, path
, 1, space_needed
, 0, slot
);
4178 * split the path's leaf in two, making sure there is at least data_size
4179 * available for the resulting leaf level of the path.
4181 * returns 0 if all went well and < 0 on failure.
4183 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
4184 struct btrfs_root
*root
,
4185 struct btrfs_key
*ins_key
,
4186 struct btrfs_path
*path
, int data_size
,
4189 struct btrfs_disk_key disk_key
;
4190 struct extent_buffer
*l
;
4194 struct extent_buffer
*right
;
4198 int num_doubles
= 0;
4199 int tried_avoid_double
= 0;
4202 slot
= path
->slots
[0];
4203 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
4204 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(root
))
4207 /* first try to make some room by pushing left and right */
4208 if (data_size
&& path
->nodes
[1]) {
4209 int space_needed
= data_size
;
4211 if (slot
< btrfs_header_nritems(l
))
4212 space_needed
-= btrfs_leaf_free_space(root
, l
);
4214 wret
= push_leaf_right(trans
, root
, path
, space_needed
,
4215 space_needed
, 0, 0);
4219 wret
= push_leaf_left(trans
, root
, path
, space_needed
,
4220 space_needed
, 0, (u32
)-1);
4226 /* did the pushes work? */
4227 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
4231 if (!path
->nodes
[1]) {
4232 ret
= insert_new_root(trans
, root
, path
, 1);
4239 slot
= path
->slots
[0];
4240 nritems
= btrfs_header_nritems(l
);
4241 mid
= (nritems
+ 1) / 2;
4245 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
4246 BTRFS_LEAF_DATA_SIZE(root
)) {
4247 if (slot
>= nritems
) {
4251 if (mid
!= nritems
&&
4252 leaf_space_used(l
, mid
, nritems
- mid
) +
4253 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
4254 if (data_size
&& !tried_avoid_double
)
4255 goto push_for_double
;
4261 if (leaf_space_used(l
, 0, mid
) + data_size
>
4262 BTRFS_LEAF_DATA_SIZE(root
)) {
4263 if (!extend
&& data_size
&& slot
== 0) {
4265 } else if ((extend
|| !data_size
) && slot
== 0) {
4269 if (mid
!= nritems
&&
4270 leaf_space_used(l
, mid
, nritems
- mid
) +
4271 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
4272 if (data_size
&& !tried_avoid_double
)
4273 goto push_for_double
;
4281 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
4283 btrfs_item_key(l
, &disk_key
, mid
);
4285 right
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
4286 root
->root_key
.objectid
,
4287 &disk_key
, 0, l
->start
, 0);
4289 return PTR_ERR(right
);
4291 root_add_used(root
, root
->nodesize
);
4293 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
4294 btrfs_set_header_bytenr(right
, right
->start
);
4295 btrfs_set_header_generation(right
, trans
->transid
);
4296 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
4297 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
4298 btrfs_set_header_level(right
, 0);
4299 write_extent_buffer(right
, root
->fs_info
->fsid
,
4300 btrfs_header_fsid(), BTRFS_FSID_SIZE
);
4302 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
4303 btrfs_header_chunk_tree_uuid(right
),
4308 btrfs_set_header_nritems(right
, 0);
4309 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
4310 path
->slots
[1] + 1, 1);
4311 btrfs_tree_unlock(path
->nodes
[0]);
4312 free_extent_buffer(path
->nodes
[0]);
4313 path
->nodes
[0] = right
;
4315 path
->slots
[1] += 1;
4317 btrfs_set_header_nritems(right
, 0);
4318 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
4320 btrfs_tree_unlock(path
->nodes
[0]);
4321 free_extent_buffer(path
->nodes
[0]);
4322 path
->nodes
[0] = right
;
4324 if (path
->slots
[1] == 0)
4325 fixup_low_keys(root
, path
, &disk_key
, 1);
4327 btrfs_mark_buffer_dirty(right
);
4331 copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
4334 BUG_ON(num_doubles
!= 0);
4342 push_for_double_split(trans
, root
, path
, data_size
);
4343 tried_avoid_double
= 1;
4344 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
4349 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
4350 struct btrfs_root
*root
,
4351 struct btrfs_path
*path
, int ins_len
)
4353 struct btrfs_key key
;
4354 struct extent_buffer
*leaf
;
4355 struct btrfs_file_extent_item
*fi
;
4360 leaf
= path
->nodes
[0];
4361 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4363 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
4364 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
4366 if (btrfs_leaf_free_space(root
, leaf
) >= ins_len
)
4369 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4370 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4371 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4372 struct btrfs_file_extent_item
);
4373 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
4375 btrfs_release_path(path
);
4377 path
->keep_locks
= 1;
4378 path
->search_for_split
= 1;
4379 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4380 path
->search_for_split
= 0;
4385 leaf
= path
->nodes
[0];
4386 /* if our item isn't there or got smaller, return now */
4387 if (ret
> 0 || item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
4390 /* the leaf has changed, it now has room. return now */
4391 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= ins_len
)
4394 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4395 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4396 struct btrfs_file_extent_item
);
4397 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
4401 btrfs_set_path_blocking(path
);
4402 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
4406 path
->keep_locks
= 0;
4407 btrfs_unlock_up_safe(path
, 1);
4410 path
->keep_locks
= 0;
4414 static noinline
int split_item(struct btrfs_trans_handle
*trans
,
4415 struct btrfs_root
*root
,
4416 struct btrfs_path
*path
,
4417 struct btrfs_key
*new_key
,
4418 unsigned long split_offset
)
4420 struct extent_buffer
*leaf
;
4421 struct btrfs_item
*item
;
4422 struct btrfs_item
*new_item
;
4428 struct btrfs_disk_key disk_key
;
4430 leaf
= path
->nodes
[0];
4431 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
4433 btrfs_set_path_blocking(path
);
4435 item
= btrfs_item_nr(path
->slots
[0]);
4436 orig_offset
= btrfs_item_offset(leaf
, item
);
4437 item_size
= btrfs_item_size(leaf
, item
);
4439 buf
= kmalloc(item_size
, GFP_NOFS
);
4443 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
4444 path
->slots
[0]), item_size
);
4446 slot
= path
->slots
[0] + 1;
4447 nritems
= btrfs_header_nritems(leaf
);
4448 if (slot
!= nritems
) {
4449 /* shift the items */
4450 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
4451 btrfs_item_nr_offset(slot
),
4452 (nritems
- slot
) * sizeof(struct btrfs_item
));
4455 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
4456 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4458 new_item
= btrfs_item_nr(slot
);
4460 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
4461 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
4463 btrfs_set_item_offset(leaf
, item
,
4464 orig_offset
+ item_size
- split_offset
);
4465 btrfs_set_item_size(leaf
, item
, split_offset
);
4467 btrfs_set_header_nritems(leaf
, nritems
+ 1);
4469 /* write the data for the start of the original item */
4470 write_extent_buffer(leaf
, buf
,
4471 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4474 /* write the data for the new item */
4475 write_extent_buffer(leaf
, buf
+ split_offset
,
4476 btrfs_item_ptr_offset(leaf
, slot
),
4477 item_size
- split_offset
);
4478 btrfs_mark_buffer_dirty(leaf
);
4480 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < 0);
4486 * This function splits a single item into two items,
4487 * giving 'new_key' to the new item and splitting the
4488 * old one at split_offset (from the start of the item).
4490 * The path may be released by this operation. After
4491 * the split, the path is pointing to the old item. The
4492 * new item is going to be in the same node as the old one.
4494 * Note, the item being split must be smaller enough to live alone on
4495 * a tree block with room for one extra struct btrfs_item
4497 * This allows us to split the item in place, keeping a lock on the
4498 * leaf the entire time.
4500 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
4501 struct btrfs_root
*root
,
4502 struct btrfs_path
*path
,
4503 struct btrfs_key
*new_key
,
4504 unsigned long split_offset
)
4507 ret
= setup_leaf_for_split(trans
, root
, path
,
4508 sizeof(struct btrfs_item
));
4512 ret
= split_item(trans
, root
, path
, new_key
, split_offset
);
4517 * This function duplicate a item, giving 'new_key' to the new item.
4518 * It guarantees both items live in the same tree leaf and the new item
4519 * is contiguous with the original item.
4521 * This allows us to split file extent in place, keeping a lock on the
4522 * leaf the entire time.
4524 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
4525 struct btrfs_root
*root
,
4526 struct btrfs_path
*path
,
4527 struct btrfs_key
*new_key
)
4529 struct extent_buffer
*leaf
;
4533 leaf
= path
->nodes
[0];
4534 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4535 ret
= setup_leaf_for_split(trans
, root
, path
,
4536 item_size
+ sizeof(struct btrfs_item
));
4541 setup_items_for_insert(root
, path
, new_key
, &item_size
,
4542 item_size
, item_size
+
4543 sizeof(struct btrfs_item
), 1);
4544 leaf
= path
->nodes
[0];
4545 memcpy_extent_buffer(leaf
,
4546 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4547 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
4553 * make the item pointed to by the path smaller. new_size indicates
4554 * how small to make it, and from_end tells us if we just chop bytes
4555 * off the end of the item or if we shift the item to chop bytes off
4558 void btrfs_truncate_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
4559 u32 new_size
, int from_end
)
4562 struct extent_buffer
*leaf
;
4563 struct btrfs_item
*item
;
4565 unsigned int data_end
;
4566 unsigned int old_data_start
;
4567 unsigned int old_size
;
4568 unsigned int size_diff
;
4570 struct btrfs_map_token token
;
4572 btrfs_init_map_token(&token
);
4574 leaf
= path
->nodes
[0];
4575 slot
= path
->slots
[0];
4577 old_size
= btrfs_item_size_nr(leaf
, slot
);
4578 if (old_size
== new_size
)
4581 nritems
= btrfs_header_nritems(leaf
);
4582 data_end
= leaf_data_end(root
, leaf
);
4584 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
4586 size_diff
= old_size
- new_size
;
4589 BUG_ON(slot
>= nritems
);
4592 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4594 /* first correct the data pointers */
4595 for (i
= slot
; i
< nritems
; i
++) {
4597 item
= btrfs_item_nr(i
);
4599 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4600 btrfs_set_token_item_offset(leaf
, item
,
4601 ioff
+ size_diff
, &token
);
4604 /* shift the data */
4606 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4607 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4608 data_end
, old_data_start
+ new_size
- data_end
);
4610 struct btrfs_disk_key disk_key
;
4613 btrfs_item_key(leaf
, &disk_key
, slot
);
4615 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
4617 struct btrfs_file_extent_item
*fi
;
4619 fi
= btrfs_item_ptr(leaf
, slot
,
4620 struct btrfs_file_extent_item
);
4621 fi
= (struct btrfs_file_extent_item
*)(
4622 (unsigned long)fi
- size_diff
);
4624 if (btrfs_file_extent_type(leaf
, fi
) ==
4625 BTRFS_FILE_EXTENT_INLINE
) {
4626 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
4627 memmove_extent_buffer(leaf
, ptr
,
4629 offsetof(struct btrfs_file_extent_item
,
4634 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4635 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4636 data_end
, old_data_start
- data_end
);
4638 offset
= btrfs_disk_key_offset(&disk_key
);
4639 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
4640 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4642 fixup_low_keys(root
, path
, &disk_key
, 1);
4645 item
= btrfs_item_nr(slot
);
4646 btrfs_set_item_size(leaf
, item
, new_size
);
4647 btrfs_mark_buffer_dirty(leaf
);
4649 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4650 btrfs_print_leaf(root
, leaf
);
4656 * make the item pointed to by the path bigger, data_size is the added size.
4658 void btrfs_extend_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
4662 struct extent_buffer
*leaf
;
4663 struct btrfs_item
*item
;
4665 unsigned int data_end
;
4666 unsigned int old_data
;
4667 unsigned int old_size
;
4669 struct btrfs_map_token token
;
4671 btrfs_init_map_token(&token
);
4673 leaf
= path
->nodes
[0];
4675 nritems
= btrfs_header_nritems(leaf
);
4676 data_end
= leaf_data_end(root
, leaf
);
4678 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
4679 btrfs_print_leaf(root
, leaf
);
4682 slot
= path
->slots
[0];
4683 old_data
= btrfs_item_end_nr(leaf
, slot
);
4686 if (slot
>= nritems
) {
4687 btrfs_print_leaf(root
, leaf
);
4688 btrfs_crit(root
->fs_info
, "slot %d too large, nritems %d",
4694 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4696 /* first correct the data pointers */
4697 for (i
= slot
; i
< nritems
; i
++) {
4699 item
= btrfs_item_nr(i
);
4701 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4702 btrfs_set_token_item_offset(leaf
, item
,
4703 ioff
- data_size
, &token
);
4706 /* shift the data */
4707 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4708 data_end
- data_size
, btrfs_leaf_data(leaf
) +
4709 data_end
, old_data
- data_end
);
4711 data_end
= old_data
;
4712 old_size
= btrfs_item_size_nr(leaf
, slot
);
4713 item
= btrfs_item_nr(slot
);
4714 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
4715 btrfs_mark_buffer_dirty(leaf
);
4717 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4718 btrfs_print_leaf(root
, leaf
);
4724 * this is a helper for btrfs_insert_empty_items, the main goal here is
4725 * to save stack depth by doing the bulk of the work in a function
4726 * that doesn't call btrfs_search_slot
4728 void setup_items_for_insert(struct btrfs_root
*root
, struct btrfs_path
*path
,
4729 struct btrfs_key
*cpu_key
, u32
*data_size
,
4730 u32 total_data
, u32 total_size
, int nr
)
4732 struct btrfs_item
*item
;
4735 unsigned int data_end
;
4736 struct btrfs_disk_key disk_key
;
4737 struct extent_buffer
*leaf
;
4739 struct btrfs_map_token token
;
4741 if (path
->slots
[0] == 0) {
4742 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4743 fixup_low_keys(root
, path
, &disk_key
, 1);
4745 btrfs_unlock_up_safe(path
, 1);
4747 btrfs_init_map_token(&token
);
4749 leaf
= path
->nodes
[0];
4750 slot
= path
->slots
[0];
4752 nritems
= btrfs_header_nritems(leaf
);
4753 data_end
= leaf_data_end(root
, leaf
);
4755 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
4756 btrfs_print_leaf(root
, leaf
);
4757 btrfs_crit(root
->fs_info
, "not enough freespace need %u have %d",
4758 total_size
, btrfs_leaf_free_space(root
, leaf
));
4762 if (slot
!= nritems
) {
4763 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4765 if (old_data
< data_end
) {
4766 btrfs_print_leaf(root
, leaf
);
4767 btrfs_crit(root
->fs_info
, "slot %d old_data %d data_end %d",
4768 slot
, old_data
, data_end
);
4772 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4774 /* first correct the data pointers */
4775 for (i
= slot
; i
< nritems
; i
++) {
4778 item
= btrfs_item_nr( i
);
4779 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4780 btrfs_set_token_item_offset(leaf
, item
,
4781 ioff
- total_data
, &token
);
4783 /* shift the items */
4784 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4785 btrfs_item_nr_offset(slot
),
4786 (nritems
- slot
) * sizeof(struct btrfs_item
));
4788 /* shift the data */
4789 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4790 data_end
- total_data
, btrfs_leaf_data(leaf
) +
4791 data_end
, old_data
- data_end
);
4792 data_end
= old_data
;
4795 /* setup the item for the new data */
4796 for (i
= 0; i
< nr
; i
++) {
4797 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4798 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4799 item
= btrfs_item_nr(slot
+ i
);
4800 btrfs_set_token_item_offset(leaf
, item
,
4801 data_end
- data_size
[i
], &token
);
4802 data_end
-= data_size
[i
];
4803 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
4806 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4807 btrfs_mark_buffer_dirty(leaf
);
4809 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4810 btrfs_print_leaf(root
, leaf
);
4816 * Given a key and some data, insert items into the tree.
4817 * This does all the path init required, making room in the tree if needed.
4819 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
4820 struct btrfs_root
*root
,
4821 struct btrfs_path
*path
,
4822 struct btrfs_key
*cpu_key
, u32
*data_size
,
4831 for (i
= 0; i
< nr
; i
++)
4832 total_data
+= data_size
[i
];
4834 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4835 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4841 slot
= path
->slots
[0];
4844 setup_items_for_insert(root
, path
, cpu_key
, data_size
,
4845 total_data
, total_size
, nr
);
4850 * Given a key and some data, insert an item into the tree.
4851 * This does all the path init required, making room in the tree if needed.
4853 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
4854 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
4858 struct btrfs_path
*path
;
4859 struct extent_buffer
*leaf
;
4862 path
= btrfs_alloc_path();
4865 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
4867 leaf
= path
->nodes
[0];
4868 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
4869 write_extent_buffer(leaf
, data
, ptr
, data_size
);
4870 btrfs_mark_buffer_dirty(leaf
);
4872 btrfs_free_path(path
);
4877 * delete the pointer from a given node.
4879 * the tree should have been previously balanced so the deletion does not
4882 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
4883 int level
, int slot
)
4885 struct extent_buffer
*parent
= path
->nodes
[level
];
4889 nritems
= btrfs_header_nritems(parent
);
4890 if (slot
!= nritems
- 1) {
4892 tree_mod_log_eb_move(root
->fs_info
, parent
, slot
,
4893 slot
+ 1, nritems
- slot
- 1);
4894 memmove_extent_buffer(parent
,
4895 btrfs_node_key_ptr_offset(slot
),
4896 btrfs_node_key_ptr_offset(slot
+ 1),
4897 sizeof(struct btrfs_key_ptr
) *
4898 (nritems
- slot
- 1));
4900 ret
= tree_mod_log_insert_key(root
->fs_info
, parent
, slot
,
4901 MOD_LOG_KEY_REMOVE
, GFP_NOFS
);
4906 btrfs_set_header_nritems(parent
, nritems
);
4907 if (nritems
== 0 && parent
== root
->node
) {
4908 BUG_ON(btrfs_header_level(root
->node
) != 1);
4909 /* just turn the root into a leaf and break */
4910 btrfs_set_header_level(root
->node
, 0);
4911 } else if (slot
== 0) {
4912 struct btrfs_disk_key disk_key
;
4914 btrfs_node_key(parent
, &disk_key
, 0);
4915 fixup_low_keys(root
, path
, &disk_key
, level
+ 1);
4917 btrfs_mark_buffer_dirty(parent
);
4921 * a helper function to delete the leaf pointed to by path->slots[1] and
4924 * This deletes the pointer in path->nodes[1] and frees the leaf
4925 * block extent. zero is returned if it all worked out, < 0 otherwise.
4927 * The path must have already been setup for deleting the leaf, including
4928 * all the proper balancing. path->nodes[1] must be locked.
4930 static noinline
void btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
4931 struct btrfs_root
*root
,
4932 struct btrfs_path
*path
,
4933 struct extent_buffer
*leaf
)
4935 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
4936 del_ptr(root
, path
, 1, path
->slots
[1]);
4939 * btrfs_free_extent is expensive, we want to make sure we
4940 * aren't holding any locks when we call it
4942 btrfs_unlock_up_safe(path
, 0);
4944 root_sub_used(root
, leaf
->len
);
4946 extent_buffer_get(leaf
);
4947 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1);
4948 free_extent_buffer_stale(leaf
);
4951 * delete the item at the leaf level in path. If that empties
4952 * the leaf, remove it from the tree
4954 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4955 struct btrfs_path
*path
, int slot
, int nr
)
4957 struct extent_buffer
*leaf
;
4958 struct btrfs_item
*item
;
4965 struct btrfs_map_token token
;
4967 btrfs_init_map_token(&token
);
4969 leaf
= path
->nodes
[0];
4970 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
4972 for (i
= 0; i
< nr
; i
++)
4973 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
4975 nritems
= btrfs_header_nritems(leaf
);
4977 if (slot
+ nr
!= nritems
) {
4978 int data_end
= leaf_data_end(root
, leaf
);
4980 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4982 btrfs_leaf_data(leaf
) + data_end
,
4983 last_off
- data_end
);
4985 for (i
= slot
+ nr
; i
< nritems
; i
++) {
4988 item
= btrfs_item_nr(i
);
4989 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4990 btrfs_set_token_item_offset(leaf
, item
,
4991 ioff
+ dsize
, &token
);
4994 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
4995 btrfs_item_nr_offset(slot
+ nr
),
4996 sizeof(struct btrfs_item
) *
4997 (nritems
- slot
- nr
));
4999 btrfs_set_header_nritems(leaf
, nritems
- nr
);
5002 /* delete the leaf if we've emptied it */
5004 if (leaf
== root
->node
) {
5005 btrfs_set_header_level(leaf
, 0);
5007 btrfs_set_path_blocking(path
);
5008 clean_tree_block(trans
, root
, leaf
);
5009 btrfs_del_leaf(trans
, root
, path
, leaf
);
5012 int used
= leaf_space_used(leaf
, 0, nritems
);
5014 struct btrfs_disk_key disk_key
;
5016 btrfs_item_key(leaf
, &disk_key
, 0);
5017 fixup_low_keys(root
, path
, &disk_key
, 1);
5020 /* delete the leaf if it is mostly empty */
5021 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 3) {
5022 /* push_leaf_left fixes the path.
5023 * make sure the path still points to our leaf
5024 * for possible call to del_ptr below
5026 slot
= path
->slots
[1];
5027 extent_buffer_get(leaf
);
5029 btrfs_set_path_blocking(path
);
5030 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
5032 if (wret
< 0 && wret
!= -ENOSPC
)
5035 if (path
->nodes
[0] == leaf
&&
5036 btrfs_header_nritems(leaf
)) {
5037 wret
= push_leaf_right(trans
, root
, path
, 1,
5039 if (wret
< 0 && wret
!= -ENOSPC
)
5043 if (btrfs_header_nritems(leaf
) == 0) {
5044 path
->slots
[1] = slot
;
5045 btrfs_del_leaf(trans
, root
, path
, leaf
);
5046 free_extent_buffer(leaf
);
5049 /* if we're still in the path, make sure
5050 * we're dirty. Otherwise, one of the
5051 * push_leaf functions must have already
5052 * dirtied this buffer
5054 if (path
->nodes
[0] == leaf
)
5055 btrfs_mark_buffer_dirty(leaf
);
5056 free_extent_buffer(leaf
);
5059 btrfs_mark_buffer_dirty(leaf
);
5066 * search the tree again to find a leaf with lesser keys
5067 * returns 0 if it found something or 1 if there are no lesser leaves.
5068 * returns < 0 on io errors.
5070 * This may release the path, and so you may lose any locks held at the
5073 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5075 struct btrfs_key key
;
5076 struct btrfs_disk_key found_key
;
5079 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
5081 if (key
.offset
> 0) {
5083 } else if (key
.type
> 0) {
5085 key
.offset
= (u64
)-1;
5086 } else if (key
.objectid
> 0) {
5089 key
.offset
= (u64
)-1;
5094 btrfs_release_path(path
);
5095 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5098 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
5099 ret
= comp_keys(&found_key
, &key
);
5101 * We might have had an item with the previous key in the tree right
5102 * before we released our path. And after we released our path, that
5103 * item might have been pushed to the first slot (0) of the leaf we
5104 * were holding due to a tree balance. Alternatively, an item with the
5105 * previous key can exist as the only element of a leaf (big fat item).
5106 * Therefore account for these 2 cases, so that our callers (like
5107 * btrfs_previous_item) don't miss an existing item with a key matching
5108 * the previous key we computed above.
5116 * A helper function to walk down the tree starting at min_key, and looking
5117 * for nodes or leaves that are have a minimum transaction id.
5118 * This is used by the btree defrag code, and tree logging
5120 * This does not cow, but it does stuff the starting key it finds back
5121 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5122 * key and get a writable path.
5124 * This does lock as it descends, and path->keep_locks should be set
5125 * to 1 by the caller.
5127 * This honors path->lowest_level to prevent descent past a given level
5130 * min_trans indicates the oldest transaction that you are interested
5131 * in walking through. Any nodes or leaves older than min_trans are
5132 * skipped over (without reading them).
5134 * returns zero if something useful was found, < 0 on error and 1 if there
5135 * was nothing in the tree that matched the search criteria.
5137 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
5138 struct btrfs_path
*path
,
5141 struct extent_buffer
*cur
;
5142 struct btrfs_key found_key
;
5149 WARN_ON(!path
->keep_locks
);
5151 cur
= btrfs_read_lock_root_node(root
);
5152 level
= btrfs_header_level(cur
);
5153 WARN_ON(path
->nodes
[level
]);
5154 path
->nodes
[level
] = cur
;
5155 path
->locks
[level
] = BTRFS_READ_LOCK
;
5157 if (btrfs_header_generation(cur
) < min_trans
) {
5162 nritems
= btrfs_header_nritems(cur
);
5163 level
= btrfs_header_level(cur
);
5164 sret
= bin_search(cur
, min_key
, level
, &slot
);
5166 /* at the lowest level, we're done, setup the path and exit */
5167 if (level
== path
->lowest_level
) {
5168 if (slot
>= nritems
)
5171 path
->slots
[level
] = slot
;
5172 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
5175 if (sret
&& slot
> 0)
5178 * check this node pointer against the min_trans parameters.
5179 * If it is too old, old, skip to the next one.
5181 while (slot
< nritems
) {
5184 gen
= btrfs_node_ptr_generation(cur
, slot
);
5185 if (gen
< min_trans
) {
5193 * we didn't find a candidate key in this node, walk forward
5194 * and find another one
5196 if (slot
>= nritems
) {
5197 path
->slots
[level
] = slot
;
5198 btrfs_set_path_blocking(path
);
5199 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
5202 btrfs_release_path(path
);
5208 /* save our key for returning back */
5209 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
5210 path
->slots
[level
] = slot
;
5211 if (level
== path
->lowest_level
) {
5213 unlock_up(path
, level
, 1, 0, NULL
);
5216 btrfs_set_path_blocking(path
);
5217 cur
= read_node_slot(root
, cur
, slot
);
5218 BUG_ON(!cur
); /* -ENOMEM */
5220 btrfs_tree_read_lock(cur
);
5222 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
5223 path
->nodes
[level
- 1] = cur
;
5224 unlock_up(path
, level
, 1, 0, NULL
);
5225 btrfs_clear_path_blocking(path
, NULL
, 0);
5229 memcpy(min_key
, &found_key
, sizeof(found_key
));
5230 btrfs_set_path_blocking(path
);
5234 static void tree_move_down(struct btrfs_root
*root
,
5235 struct btrfs_path
*path
,
5236 int *level
, int root_level
)
5238 BUG_ON(*level
== 0);
5239 path
->nodes
[*level
- 1] = read_node_slot(root
, path
->nodes
[*level
],
5240 path
->slots
[*level
]);
5241 path
->slots
[*level
- 1] = 0;
5245 static int tree_move_next_or_upnext(struct btrfs_root
*root
,
5246 struct btrfs_path
*path
,
5247 int *level
, int root_level
)
5251 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
5253 path
->slots
[*level
]++;
5255 while (path
->slots
[*level
] >= nritems
) {
5256 if (*level
== root_level
)
5260 path
->slots
[*level
] = 0;
5261 free_extent_buffer(path
->nodes
[*level
]);
5262 path
->nodes
[*level
] = NULL
;
5264 path
->slots
[*level
]++;
5266 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
5273 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5276 static int tree_advance(struct btrfs_root
*root
,
5277 struct btrfs_path
*path
,
5278 int *level
, int root_level
,
5280 struct btrfs_key
*key
)
5284 if (*level
== 0 || !allow_down
) {
5285 ret
= tree_move_next_or_upnext(root
, path
, level
, root_level
);
5287 tree_move_down(root
, path
, level
, root_level
);
5292 btrfs_item_key_to_cpu(path
->nodes
[*level
], key
,
5293 path
->slots
[*level
]);
5295 btrfs_node_key_to_cpu(path
->nodes
[*level
], key
,
5296 path
->slots
[*level
]);
5301 static int tree_compare_item(struct btrfs_root
*left_root
,
5302 struct btrfs_path
*left_path
,
5303 struct btrfs_path
*right_path
,
5308 unsigned long off1
, off2
;
5310 len1
= btrfs_item_size_nr(left_path
->nodes
[0], left_path
->slots
[0]);
5311 len2
= btrfs_item_size_nr(right_path
->nodes
[0], right_path
->slots
[0]);
5315 off1
= btrfs_item_ptr_offset(left_path
->nodes
[0], left_path
->slots
[0]);
5316 off2
= btrfs_item_ptr_offset(right_path
->nodes
[0],
5317 right_path
->slots
[0]);
5319 read_extent_buffer(left_path
->nodes
[0], tmp_buf
, off1
, len1
);
5321 cmp
= memcmp_extent_buffer(right_path
->nodes
[0], tmp_buf
, off2
, len1
);
5328 #define ADVANCE_ONLY_NEXT -1
5331 * This function compares two trees and calls the provided callback for
5332 * every changed/new/deleted item it finds.
5333 * If shared tree blocks are encountered, whole subtrees are skipped, making
5334 * the compare pretty fast on snapshotted subvolumes.
5336 * This currently works on commit roots only. As commit roots are read only,
5337 * we don't do any locking. The commit roots are protected with transactions.
5338 * Transactions are ended and rejoined when a commit is tried in between.
5340 * This function checks for modifications done to the trees while comparing.
5341 * If it detects a change, it aborts immediately.
5343 int btrfs_compare_trees(struct btrfs_root
*left_root
,
5344 struct btrfs_root
*right_root
,
5345 btrfs_changed_cb_t changed_cb
, void *ctx
)
5349 struct btrfs_path
*left_path
= NULL
;
5350 struct btrfs_path
*right_path
= NULL
;
5351 struct btrfs_key left_key
;
5352 struct btrfs_key right_key
;
5353 char *tmp_buf
= NULL
;
5354 int left_root_level
;
5355 int right_root_level
;
5358 int left_end_reached
;
5359 int right_end_reached
;
5367 left_path
= btrfs_alloc_path();
5372 right_path
= btrfs_alloc_path();
5378 tmp_buf
= kmalloc(left_root
->nodesize
, GFP_NOFS
);
5384 left_path
->search_commit_root
= 1;
5385 left_path
->skip_locking
= 1;
5386 right_path
->search_commit_root
= 1;
5387 right_path
->skip_locking
= 1;
5390 * Strategy: Go to the first items of both trees. Then do
5392 * If both trees are at level 0
5393 * Compare keys of current items
5394 * If left < right treat left item as new, advance left tree
5396 * If left > right treat right item as deleted, advance right tree
5398 * If left == right do deep compare of items, treat as changed if
5399 * needed, advance both trees and repeat
5400 * If both trees are at the same level but not at level 0
5401 * Compare keys of current nodes/leafs
5402 * If left < right advance left tree and repeat
5403 * If left > right advance right tree and repeat
5404 * If left == right compare blockptrs of the next nodes/leafs
5405 * If they match advance both trees but stay at the same level
5407 * If they don't match advance both trees while allowing to go
5409 * If tree levels are different
5410 * Advance the tree that needs it and repeat
5412 * Advancing a tree means:
5413 * If we are at level 0, try to go to the next slot. If that's not
5414 * possible, go one level up and repeat. Stop when we found a level
5415 * where we could go to the next slot. We may at this point be on a
5418 * If we are not at level 0 and not on shared tree blocks, go one
5421 * If we are not at level 0 and on shared tree blocks, go one slot to
5422 * the right if possible or go up and right.
5425 down_read(&left_root
->fs_info
->commit_root_sem
);
5426 left_level
= btrfs_header_level(left_root
->commit_root
);
5427 left_root_level
= left_level
;
5428 left_path
->nodes
[left_level
] = left_root
->commit_root
;
5429 extent_buffer_get(left_path
->nodes
[left_level
]);
5431 right_level
= btrfs_header_level(right_root
->commit_root
);
5432 right_root_level
= right_level
;
5433 right_path
->nodes
[right_level
] = right_root
->commit_root
;
5434 extent_buffer_get(right_path
->nodes
[right_level
]);
5435 up_read(&left_root
->fs_info
->commit_root_sem
);
5437 if (left_level
== 0)
5438 btrfs_item_key_to_cpu(left_path
->nodes
[left_level
],
5439 &left_key
, left_path
->slots
[left_level
]);
5441 btrfs_node_key_to_cpu(left_path
->nodes
[left_level
],
5442 &left_key
, left_path
->slots
[left_level
]);
5443 if (right_level
== 0)
5444 btrfs_item_key_to_cpu(right_path
->nodes
[right_level
],
5445 &right_key
, right_path
->slots
[right_level
]);
5447 btrfs_node_key_to_cpu(right_path
->nodes
[right_level
],
5448 &right_key
, right_path
->slots
[right_level
]);
5450 left_end_reached
= right_end_reached
= 0;
5451 advance_left
= advance_right
= 0;
5454 if (advance_left
&& !left_end_reached
) {
5455 ret
= tree_advance(left_root
, left_path
, &left_level
,
5457 advance_left
!= ADVANCE_ONLY_NEXT
,
5460 left_end_reached
= ADVANCE
;
5463 if (advance_right
&& !right_end_reached
) {
5464 ret
= tree_advance(right_root
, right_path
, &right_level
,
5466 advance_right
!= ADVANCE_ONLY_NEXT
,
5469 right_end_reached
= ADVANCE
;
5473 if (left_end_reached
&& right_end_reached
) {
5476 } else if (left_end_reached
) {
5477 if (right_level
== 0) {
5478 ret
= changed_cb(left_root
, right_root
,
5479 left_path
, right_path
,
5481 BTRFS_COMPARE_TREE_DELETED
,
5486 advance_right
= ADVANCE
;
5488 } else if (right_end_reached
) {
5489 if (left_level
== 0) {
5490 ret
= changed_cb(left_root
, right_root
,
5491 left_path
, right_path
,
5493 BTRFS_COMPARE_TREE_NEW
,
5498 advance_left
= ADVANCE
;
5502 if (left_level
== 0 && right_level
== 0) {
5503 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5505 ret
= changed_cb(left_root
, right_root
,
5506 left_path
, right_path
,
5508 BTRFS_COMPARE_TREE_NEW
,
5512 advance_left
= ADVANCE
;
5513 } else if (cmp
> 0) {
5514 ret
= changed_cb(left_root
, right_root
,
5515 left_path
, right_path
,
5517 BTRFS_COMPARE_TREE_DELETED
,
5521 advance_right
= ADVANCE
;
5523 enum btrfs_compare_tree_result cmp
;
5525 WARN_ON(!extent_buffer_uptodate(left_path
->nodes
[0]));
5526 ret
= tree_compare_item(left_root
, left_path
,
5527 right_path
, tmp_buf
);
5529 cmp
= BTRFS_COMPARE_TREE_CHANGED
;
5531 cmp
= BTRFS_COMPARE_TREE_SAME
;
5532 ret
= changed_cb(left_root
, right_root
,
5533 left_path
, right_path
,
5534 &left_key
, cmp
, ctx
);
5537 advance_left
= ADVANCE
;
5538 advance_right
= ADVANCE
;
5540 } else if (left_level
== right_level
) {
5541 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5543 advance_left
= ADVANCE
;
5544 } else if (cmp
> 0) {
5545 advance_right
= ADVANCE
;
5547 left_blockptr
= btrfs_node_blockptr(
5548 left_path
->nodes
[left_level
],
5549 left_path
->slots
[left_level
]);
5550 right_blockptr
= btrfs_node_blockptr(
5551 right_path
->nodes
[right_level
],
5552 right_path
->slots
[right_level
]);
5553 left_gen
= btrfs_node_ptr_generation(
5554 left_path
->nodes
[left_level
],
5555 left_path
->slots
[left_level
]);
5556 right_gen
= btrfs_node_ptr_generation(
5557 right_path
->nodes
[right_level
],
5558 right_path
->slots
[right_level
]);
5559 if (left_blockptr
== right_blockptr
&&
5560 left_gen
== right_gen
) {
5562 * As we're on a shared block, don't
5563 * allow to go deeper.
5565 advance_left
= ADVANCE_ONLY_NEXT
;
5566 advance_right
= ADVANCE_ONLY_NEXT
;
5568 advance_left
= ADVANCE
;
5569 advance_right
= ADVANCE
;
5572 } else if (left_level
< right_level
) {
5573 advance_right
= ADVANCE
;
5575 advance_left
= ADVANCE
;
5580 btrfs_free_path(left_path
);
5581 btrfs_free_path(right_path
);
5587 * this is similar to btrfs_next_leaf, but does not try to preserve
5588 * and fixup the path. It looks for and returns the next key in the
5589 * tree based on the current path and the min_trans parameters.
5591 * 0 is returned if another key is found, < 0 if there are any errors
5592 * and 1 is returned if there are no higher keys in the tree
5594 * path->keep_locks should be set to 1 on the search made before
5595 * calling this function.
5597 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
5598 struct btrfs_key
*key
, int level
, u64 min_trans
)
5601 struct extent_buffer
*c
;
5603 WARN_ON(!path
->keep_locks
);
5604 while (level
< BTRFS_MAX_LEVEL
) {
5605 if (!path
->nodes
[level
])
5608 slot
= path
->slots
[level
] + 1;
5609 c
= path
->nodes
[level
];
5611 if (slot
>= btrfs_header_nritems(c
)) {
5614 struct btrfs_key cur_key
;
5615 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
5616 !path
->nodes
[level
+ 1])
5619 if (path
->locks
[level
+ 1]) {
5624 slot
= btrfs_header_nritems(c
) - 1;
5626 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
5628 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
5630 orig_lowest
= path
->lowest_level
;
5631 btrfs_release_path(path
);
5632 path
->lowest_level
= level
;
5633 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
5635 path
->lowest_level
= orig_lowest
;
5639 c
= path
->nodes
[level
];
5640 slot
= path
->slots
[level
];
5647 btrfs_item_key_to_cpu(c
, key
, slot
);
5649 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
5651 if (gen
< min_trans
) {
5655 btrfs_node_key_to_cpu(c
, key
, slot
);
5663 * search the tree again to find a leaf with greater keys
5664 * returns 0 if it found something or 1 if there are no greater leaves.
5665 * returns < 0 on io errors.
5667 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5669 return btrfs_next_old_leaf(root
, path
, 0);
5672 int btrfs_next_old_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
5677 struct extent_buffer
*c
;
5678 struct extent_buffer
*next
;
5679 struct btrfs_key key
;
5682 int old_spinning
= path
->leave_spinning
;
5683 int next_rw_lock
= 0;
5685 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5689 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
5694 btrfs_release_path(path
);
5696 path
->keep_locks
= 1;
5697 path
->leave_spinning
= 1;
5700 ret
= btrfs_search_old_slot(root
, &key
, path
, time_seq
);
5702 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5703 path
->keep_locks
= 0;
5708 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5710 * by releasing the path above we dropped all our locks. A balance
5711 * could have added more items next to the key that used to be
5712 * at the very end of the block. So, check again here and
5713 * advance the path if there are now more items available.
5715 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
5722 * So the above check misses one case:
5723 * - after releasing the path above, someone has removed the item that
5724 * used to be at the very end of the block, and balance between leafs
5725 * gets another one with bigger key.offset to replace it.
5727 * This one should be returned as well, or we can get leaf corruption
5728 * later(esp. in __btrfs_drop_extents()).
5730 * And a bit more explanation about this check,
5731 * with ret > 0, the key isn't found, the path points to the slot
5732 * where it should be inserted, so the path->slots[0] item must be the
5735 if (nritems
> 0 && ret
> 0 && path
->slots
[0] == nritems
- 1) {
5740 while (level
< BTRFS_MAX_LEVEL
) {
5741 if (!path
->nodes
[level
]) {
5746 slot
= path
->slots
[level
] + 1;
5747 c
= path
->nodes
[level
];
5748 if (slot
>= btrfs_header_nritems(c
)) {
5750 if (level
== BTRFS_MAX_LEVEL
) {
5758 btrfs_tree_unlock_rw(next
, next_rw_lock
);
5759 free_extent_buffer(next
);
5763 next_rw_lock
= path
->locks
[level
];
5764 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
5770 btrfs_release_path(path
);
5774 if (!path
->skip_locking
) {
5775 ret
= btrfs_try_tree_read_lock(next
);
5776 if (!ret
&& time_seq
) {
5778 * If we don't get the lock, we may be racing
5779 * with push_leaf_left, holding that lock while
5780 * itself waiting for the leaf we've currently
5781 * locked. To solve this situation, we give up
5782 * on our lock and cycle.
5784 free_extent_buffer(next
);
5785 btrfs_release_path(path
);
5790 btrfs_set_path_blocking(path
);
5791 btrfs_tree_read_lock(next
);
5792 btrfs_clear_path_blocking(path
, next
,
5795 next_rw_lock
= BTRFS_READ_LOCK
;
5799 path
->slots
[level
] = slot
;
5802 c
= path
->nodes
[level
];
5803 if (path
->locks
[level
])
5804 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
5806 free_extent_buffer(c
);
5807 path
->nodes
[level
] = next
;
5808 path
->slots
[level
] = 0;
5809 if (!path
->skip_locking
)
5810 path
->locks
[level
] = next_rw_lock
;
5814 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
5820 btrfs_release_path(path
);
5824 if (!path
->skip_locking
) {
5825 ret
= btrfs_try_tree_read_lock(next
);
5827 btrfs_set_path_blocking(path
);
5828 btrfs_tree_read_lock(next
);
5829 btrfs_clear_path_blocking(path
, next
,
5832 next_rw_lock
= BTRFS_READ_LOCK
;
5837 unlock_up(path
, 0, 1, 0, NULL
);
5838 path
->leave_spinning
= old_spinning
;
5840 btrfs_set_path_blocking(path
);
5846 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5847 * searching until it gets past min_objectid or finds an item of 'type'
5849 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5851 int btrfs_previous_item(struct btrfs_root
*root
,
5852 struct btrfs_path
*path
, u64 min_objectid
,
5855 struct btrfs_key found_key
;
5856 struct extent_buffer
*leaf
;
5861 if (path
->slots
[0] == 0) {
5862 btrfs_set_path_blocking(path
);
5863 ret
= btrfs_prev_leaf(root
, path
);
5869 leaf
= path
->nodes
[0];
5870 nritems
= btrfs_header_nritems(leaf
);
5873 if (path
->slots
[0] == nritems
)
5876 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5877 if (found_key
.objectid
< min_objectid
)
5879 if (found_key
.type
== type
)
5881 if (found_key
.objectid
== min_objectid
&&
5882 found_key
.type
< type
)
5889 * search in extent tree to find a previous Metadata/Data extent item with
5892 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5894 int btrfs_previous_extent_item(struct btrfs_root
*root
,
5895 struct btrfs_path
*path
, u64 min_objectid
)
5897 struct btrfs_key found_key
;
5898 struct extent_buffer
*leaf
;
5903 if (path
->slots
[0] == 0) {
5904 btrfs_set_path_blocking(path
);
5905 ret
= btrfs_prev_leaf(root
, path
);
5911 leaf
= path
->nodes
[0];
5912 nritems
= btrfs_header_nritems(leaf
);
5915 if (path
->slots
[0] == nritems
)
5918 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5919 if (found_key
.objectid
< min_objectid
)
5921 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
||
5922 found_key
.type
== BTRFS_METADATA_ITEM_KEY
)
5924 if (found_key
.objectid
== min_objectid
&&
5925 found_key
.type
< BTRFS_EXTENT_ITEM_KEY
)