2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include <linux/vmalloc.h>
25 #include "transaction.h"
26 #include "print-tree.h"
29 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
30 *root
, struct btrfs_path
*path
, int level
);
31 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
32 *root
, struct btrfs_key
*ins_key
,
33 struct btrfs_path
*path
, int data_size
, int extend
);
34 static int push_node_left(struct btrfs_trans_handle
*trans
,
35 struct btrfs_root
*root
, struct extent_buffer
*dst
,
36 struct extent_buffer
*src
, int empty
);
37 static int balance_node_right(struct btrfs_trans_handle
*trans
,
38 struct btrfs_root
*root
,
39 struct extent_buffer
*dst_buf
,
40 struct extent_buffer
*src_buf
);
41 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
43 static int tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
44 struct extent_buffer
*eb
);
46 struct btrfs_path
*btrfs_alloc_path(void)
48 struct btrfs_path
*path
;
49 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
54 * set all locked nodes in the path to blocking locks. This should
55 * be done before scheduling
57 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
60 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
61 if (!p
->nodes
[i
] || !p
->locks
[i
])
63 btrfs_set_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
64 if (p
->locks
[i
] == BTRFS_READ_LOCK
)
65 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
66 else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
)
67 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
72 * reset all the locked nodes in the patch to spinning locks.
74 * held is used to keep lockdep happy, when lockdep is enabled
75 * we set held to a blocking lock before we go around and
76 * retake all the spinlocks in the path. You can safely use NULL
79 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
80 struct extent_buffer
*held
, int held_rw
)
85 btrfs_set_lock_blocking_rw(held
, held_rw
);
86 if (held_rw
== BTRFS_WRITE_LOCK
)
87 held_rw
= BTRFS_WRITE_LOCK_BLOCKING
;
88 else if (held_rw
== BTRFS_READ_LOCK
)
89 held_rw
= BTRFS_READ_LOCK_BLOCKING
;
91 btrfs_set_path_blocking(p
);
93 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
94 if (p
->nodes
[i
] && p
->locks
[i
]) {
95 btrfs_clear_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
96 if (p
->locks
[i
] == BTRFS_WRITE_LOCK_BLOCKING
)
97 p
->locks
[i
] = BTRFS_WRITE_LOCK
;
98 else if (p
->locks
[i
] == BTRFS_READ_LOCK_BLOCKING
)
99 p
->locks
[i
] = BTRFS_READ_LOCK
;
104 btrfs_clear_lock_blocking_rw(held
, held_rw
);
107 /* this also releases the path */
108 void btrfs_free_path(struct btrfs_path
*p
)
112 btrfs_release_path(p
);
113 kmem_cache_free(btrfs_path_cachep
, p
);
117 * path release drops references on the extent buffers in the path
118 * and it drops any locks held by this path
120 * It is safe to call this on paths that no locks or extent buffers held.
122 noinline
void btrfs_release_path(struct btrfs_path
*p
)
126 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
131 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
134 free_extent_buffer(p
->nodes
[i
]);
140 * safely gets a reference on the root node of a tree. A lock
141 * is not taken, so a concurrent writer may put a different node
142 * at the root of the tree. See btrfs_lock_root_node for the
145 * The extent buffer returned by this has a reference taken, so
146 * it won't disappear. It may stop being the root of the tree
147 * at any time because there are no locks held.
149 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
151 struct extent_buffer
*eb
;
155 eb
= rcu_dereference(root
->node
);
158 * RCU really hurts here, we could free up the root node because
159 * it was COWed but we may not get the new root node yet so do
160 * the inc_not_zero dance and if it doesn't work then
161 * synchronize_rcu and try again.
163 if (atomic_inc_not_zero(&eb
->refs
)) {
173 /* loop around taking references on and locking the root node of the
174 * tree until you end up with a lock on the root. A locked buffer
175 * is returned, with a reference held.
177 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
179 struct extent_buffer
*eb
;
182 eb
= btrfs_root_node(root
);
184 if (eb
== root
->node
)
186 btrfs_tree_unlock(eb
);
187 free_extent_buffer(eb
);
192 /* loop around taking references on and locking the root node of the
193 * tree until you end up with a lock on the root. A locked buffer
194 * is returned, with a reference held.
196 static struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
198 struct extent_buffer
*eb
;
201 eb
= btrfs_root_node(root
);
202 btrfs_tree_read_lock(eb
);
203 if (eb
== root
->node
)
205 btrfs_tree_read_unlock(eb
);
206 free_extent_buffer(eb
);
211 /* cowonly root (everything not a reference counted cow subvolume), just get
212 * put onto a simple dirty list. transaction.c walks this to make sure they
213 * get properly updated on disk.
215 static void add_root_to_dirty_list(struct btrfs_root
*root
)
217 if (test_bit(BTRFS_ROOT_DIRTY
, &root
->state
) ||
218 !test_bit(BTRFS_ROOT_TRACK_DIRTY
, &root
->state
))
221 spin_lock(&root
->fs_info
->trans_lock
);
222 if (!test_and_set_bit(BTRFS_ROOT_DIRTY
, &root
->state
)) {
223 /* Want the extent tree to be the last on the list */
224 if (root
->objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
225 list_move_tail(&root
->dirty_list
,
226 &root
->fs_info
->dirty_cowonly_roots
);
228 list_move(&root
->dirty_list
,
229 &root
->fs_info
->dirty_cowonly_roots
);
231 spin_unlock(&root
->fs_info
->trans_lock
);
235 * used by snapshot creation to make a copy of a root for a tree with
236 * a given objectid. The buffer with the new root node is returned in
237 * cow_ret, and this func returns zero on success or a negative error code.
239 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
240 struct btrfs_root
*root
,
241 struct extent_buffer
*buf
,
242 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
244 struct extent_buffer
*cow
;
247 struct btrfs_disk_key disk_key
;
249 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
250 trans
->transid
!= root
->fs_info
->running_transaction
->transid
);
251 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
252 trans
->transid
!= root
->last_trans
);
254 level
= btrfs_header_level(buf
);
256 btrfs_item_key(buf
, &disk_key
, 0);
258 btrfs_node_key(buf
, &disk_key
, 0);
260 cow
= btrfs_alloc_tree_block(trans
, root
, 0, new_root_objectid
,
261 &disk_key
, level
, buf
->start
, 0);
265 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
266 btrfs_set_header_bytenr(cow
, cow
->start
);
267 btrfs_set_header_generation(cow
, trans
->transid
);
268 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
269 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
270 BTRFS_HEADER_FLAG_RELOC
);
271 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
272 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
274 btrfs_set_header_owner(cow
, new_root_objectid
);
276 write_extent_buffer(cow
, root
->fs_info
->fsid
, btrfs_header_fsid(),
279 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
280 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
281 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
283 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
288 btrfs_mark_buffer_dirty(cow
);
297 MOD_LOG_KEY_REMOVE_WHILE_FREEING
,
298 MOD_LOG_KEY_REMOVE_WHILE_MOVING
,
300 MOD_LOG_ROOT_REPLACE
,
303 struct tree_mod_move
{
308 struct tree_mod_root
{
313 struct tree_mod_elem
{
319 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
322 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
325 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
326 struct btrfs_disk_key key
;
329 /* this is used for op == MOD_LOG_MOVE_KEYS */
330 struct tree_mod_move move
;
332 /* this is used for op == MOD_LOG_ROOT_REPLACE */
333 struct tree_mod_root old_root
;
336 static inline void tree_mod_log_read_lock(struct btrfs_fs_info
*fs_info
)
338 read_lock(&fs_info
->tree_mod_log_lock
);
341 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info
*fs_info
)
343 read_unlock(&fs_info
->tree_mod_log_lock
);
346 static inline void tree_mod_log_write_lock(struct btrfs_fs_info
*fs_info
)
348 write_lock(&fs_info
->tree_mod_log_lock
);
351 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info
*fs_info
)
353 write_unlock(&fs_info
->tree_mod_log_lock
);
357 * Pull a new tree mod seq number for our operation.
359 static inline u64
btrfs_inc_tree_mod_seq(struct btrfs_fs_info
*fs_info
)
361 return atomic64_inc_return(&fs_info
->tree_mod_seq
);
365 * This adds a new blocker to the tree mod log's blocker list if the @elem
366 * passed does not already have a sequence number set. So when a caller expects
367 * to record tree modifications, it should ensure to set elem->seq to zero
368 * before calling btrfs_get_tree_mod_seq.
369 * Returns a fresh, unused tree log modification sequence number, even if no new
372 u64
btrfs_get_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
373 struct seq_list
*elem
)
375 tree_mod_log_write_lock(fs_info
);
376 spin_lock(&fs_info
->tree_mod_seq_lock
);
378 elem
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
379 list_add_tail(&elem
->list
, &fs_info
->tree_mod_seq_list
);
381 spin_unlock(&fs_info
->tree_mod_seq_lock
);
382 tree_mod_log_write_unlock(fs_info
);
387 void btrfs_put_tree_mod_seq(struct btrfs_fs_info
*fs_info
,
388 struct seq_list
*elem
)
390 struct rb_root
*tm_root
;
391 struct rb_node
*node
;
392 struct rb_node
*next
;
393 struct seq_list
*cur_elem
;
394 struct tree_mod_elem
*tm
;
395 u64 min_seq
= (u64
)-1;
396 u64 seq_putting
= elem
->seq
;
401 spin_lock(&fs_info
->tree_mod_seq_lock
);
402 list_del(&elem
->list
);
405 list_for_each_entry(cur_elem
, &fs_info
->tree_mod_seq_list
, list
) {
406 if (cur_elem
->seq
< min_seq
) {
407 if (seq_putting
> cur_elem
->seq
) {
409 * blocker with lower sequence number exists, we
410 * cannot remove anything from the log
412 spin_unlock(&fs_info
->tree_mod_seq_lock
);
415 min_seq
= cur_elem
->seq
;
418 spin_unlock(&fs_info
->tree_mod_seq_lock
);
421 * anything that's lower than the lowest existing (read: blocked)
422 * sequence number can be removed from the tree.
424 tree_mod_log_write_lock(fs_info
);
425 tm_root
= &fs_info
->tree_mod_log
;
426 for (node
= rb_first(tm_root
); node
; node
= next
) {
427 next
= rb_next(node
);
428 tm
= container_of(node
, struct tree_mod_elem
, node
);
429 if (tm
->seq
> min_seq
)
431 rb_erase(node
, tm_root
);
434 tree_mod_log_write_unlock(fs_info
);
438 * key order of the log:
439 * node/leaf start address -> sequence
441 * The 'start address' is the logical address of the *new* root node
442 * for root replace operations, or the logical address of the affected
443 * block for all other operations.
445 * Note: must be called with write lock (tree_mod_log_write_lock).
448 __tree_mod_log_insert(struct btrfs_fs_info
*fs_info
, struct tree_mod_elem
*tm
)
450 struct rb_root
*tm_root
;
451 struct rb_node
**new;
452 struct rb_node
*parent
= NULL
;
453 struct tree_mod_elem
*cur
;
457 tm
->seq
= btrfs_inc_tree_mod_seq(fs_info
);
459 tm_root
= &fs_info
->tree_mod_log
;
460 new = &tm_root
->rb_node
;
462 cur
= container_of(*new, struct tree_mod_elem
, node
);
464 if (cur
->logical
< tm
->logical
)
465 new = &((*new)->rb_left
);
466 else if (cur
->logical
> tm
->logical
)
467 new = &((*new)->rb_right
);
468 else if (cur
->seq
< tm
->seq
)
469 new = &((*new)->rb_left
);
470 else if (cur
->seq
> tm
->seq
)
471 new = &((*new)->rb_right
);
476 rb_link_node(&tm
->node
, parent
, new);
477 rb_insert_color(&tm
->node
, tm_root
);
482 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
483 * returns zero with the tree_mod_log_lock acquired. The caller must hold
484 * this until all tree mod log insertions are recorded in the rb tree and then
485 * call tree_mod_log_write_unlock() to release.
487 static inline int tree_mod_dont_log(struct btrfs_fs_info
*fs_info
,
488 struct extent_buffer
*eb
) {
490 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
492 if (eb
&& btrfs_header_level(eb
) == 0)
495 tree_mod_log_write_lock(fs_info
);
496 if (list_empty(&(fs_info
)->tree_mod_seq_list
)) {
497 tree_mod_log_write_unlock(fs_info
);
504 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
505 static inline int tree_mod_need_log(const struct btrfs_fs_info
*fs_info
,
506 struct extent_buffer
*eb
)
509 if (list_empty(&(fs_info
)->tree_mod_seq_list
))
511 if (eb
&& btrfs_header_level(eb
) == 0)
517 static struct tree_mod_elem
*
518 alloc_tree_mod_elem(struct extent_buffer
*eb
, int slot
,
519 enum mod_log_op op
, gfp_t flags
)
521 struct tree_mod_elem
*tm
;
523 tm
= kzalloc(sizeof(*tm
), flags
);
527 tm
->logical
= eb
->start
;
528 if (op
!= MOD_LOG_KEY_ADD
) {
529 btrfs_node_key(eb
, &tm
->key
, slot
);
530 tm
->blockptr
= btrfs_node_blockptr(eb
, slot
);
534 tm
->generation
= btrfs_node_ptr_generation(eb
, slot
);
535 RB_CLEAR_NODE(&tm
->node
);
541 tree_mod_log_insert_key(struct btrfs_fs_info
*fs_info
,
542 struct extent_buffer
*eb
, int slot
,
543 enum mod_log_op op
, gfp_t flags
)
545 struct tree_mod_elem
*tm
;
548 if (!tree_mod_need_log(fs_info
, eb
))
551 tm
= alloc_tree_mod_elem(eb
, slot
, op
, flags
);
555 if (tree_mod_dont_log(fs_info
, eb
)) {
560 ret
= __tree_mod_log_insert(fs_info
, tm
);
561 tree_mod_log_write_unlock(fs_info
);
569 tree_mod_log_insert_move(struct btrfs_fs_info
*fs_info
,
570 struct extent_buffer
*eb
, int dst_slot
, int src_slot
,
571 int nr_items
, gfp_t flags
)
573 struct tree_mod_elem
*tm
= NULL
;
574 struct tree_mod_elem
**tm_list
= NULL
;
579 if (!tree_mod_need_log(fs_info
, eb
))
582 tm_list
= kcalloc(nr_items
, sizeof(struct tree_mod_elem
*), flags
);
586 tm
= kzalloc(sizeof(*tm
), flags
);
592 tm
->logical
= eb
->start
;
594 tm
->move
.dst_slot
= dst_slot
;
595 tm
->move
.nr_items
= nr_items
;
596 tm
->op
= MOD_LOG_MOVE_KEYS
;
598 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
599 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
+ dst_slot
,
600 MOD_LOG_KEY_REMOVE_WHILE_MOVING
, flags
);
607 if (tree_mod_dont_log(fs_info
, eb
))
612 * When we override something during the move, we log these removals.
613 * This can only happen when we move towards the beginning of the
614 * buffer, i.e. dst_slot < src_slot.
616 for (i
= 0; i
+ dst_slot
< src_slot
&& i
< nr_items
; i
++) {
617 ret
= __tree_mod_log_insert(fs_info
, tm_list
[i
]);
622 ret
= __tree_mod_log_insert(fs_info
, tm
);
625 tree_mod_log_write_unlock(fs_info
);
630 for (i
= 0; i
< nr_items
; i
++) {
631 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
632 rb_erase(&tm_list
[i
]->node
, &fs_info
->tree_mod_log
);
636 tree_mod_log_write_unlock(fs_info
);
644 __tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
,
645 struct tree_mod_elem
**tm_list
,
651 for (i
= nritems
- 1; i
>= 0; i
--) {
652 ret
= __tree_mod_log_insert(fs_info
, tm_list
[i
]);
654 for (j
= nritems
- 1; j
> i
; j
--)
655 rb_erase(&tm_list
[j
]->node
,
656 &fs_info
->tree_mod_log
);
665 tree_mod_log_insert_root(struct btrfs_fs_info
*fs_info
,
666 struct extent_buffer
*old_root
,
667 struct extent_buffer
*new_root
, gfp_t flags
,
670 struct tree_mod_elem
*tm
= NULL
;
671 struct tree_mod_elem
**tm_list
= NULL
;
676 if (!tree_mod_need_log(fs_info
, NULL
))
679 if (log_removal
&& btrfs_header_level(old_root
) > 0) {
680 nritems
= btrfs_header_nritems(old_root
);
681 tm_list
= kcalloc(nritems
, sizeof(struct tree_mod_elem
*),
687 for (i
= 0; i
< nritems
; i
++) {
688 tm_list
[i
] = alloc_tree_mod_elem(old_root
, i
,
689 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, flags
);
697 tm
= kzalloc(sizeof(*tm
), flags
);
703 tm
->logical
= new_root
->start
;
704 tm
->old_root
.logical
= old_root
->start
;
705 tm
->old_root
.level
= btrfs_header_level(old_root
);
706 tm
->generation
= btrfs_header_generation(old_root
);
707 tm
->op
= MOD_LOG_ROOT_REPLACE
;
709 if (tree_mod_dont_log(fs_info
, NULL
))
713 ret
= __tree_mod_log_free_eb(fs_info
, tm_list
, nritems
);
715 ret
= __tree_mod_log_insert(fs_info
, tm
);
717 tree_mod_log_write_unlock(fs_info
);
726 for (i
= 0; i
< nritems
; i
++)
735 static struct tree_mod_elem
*
736 __tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
,
739 struct rb_root
*tm_root
;
740 struct rb_node
*node
;
741 struct tree_mod_elem
*cur
= NULL
;
742 struct tree_mod_elem
*found
= NULL
;
744 tree_mod_log_read_lock(fs_info
);
745 tm_root
= &fs_info
->tree_mod_log
;
746 node
= tm_root
->rb_node
;
748 cur
= container_of(node
, struct tree_mod_elem
, node
);
749 if (cur
->logical
< start
) {
750 node
= node
->rb_left
;
751 } else if (cur
->logical
> start
) {
752 node
= node
->rb_right
;
753 } else if (cur
->seq
< min_seq
) {
754 node
= node
->rb_left
;
755 } else if (!smallest
) {
756 /* we want the node with the highest seq */
758 BUG_ON(found
->seq
> cur
->seq
);
760 node
= node
->rb_left
;
761 } else if (cur
->seq
> min_seq
) {
762 /* we want the node with the smallest seq */
764 BUG_ON(found
->seq
< cur
->seq
);
766 node
= node
->rb_right
;
772 tree_mod_log_read_unlock(fs_info
);
778 * this returns the element from the log with the smallest time sequence
779 * value that's in the log (the oldest log item). any element with a time
780 * sequence lower than min_seq will be ignored.
782 static struct tree_mod_elem
*
783 tree_mod_log_search_oldest(struct btrfs_fs_info
*fs_info
, u64 start
,
786 return __tree_mod_log_search(fs_info
, start
, min_seq
, 1);
790 * this returns the element from the log with the largest time sequence
791 * value that's in the log (the most recent log item). any element with
792 * a time sequence lower than min_seq will be ignored.
794 static struct tree_mod_elem
*
795 tree_mod_log_search(struct btrfs_fs_info
*fs_info
, u64 start
, u64 min_seq
)
797 return __tree_mod_log_search(fs_info
, start
, min_seq
, 0);
801 tree_mod_log_eb_copy(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
802 struct extent_buffer
*src
, unsigned long dst_offset
,
803 unsigned long src_offset
, int nr_items
)
806 struct tree_mod_elem
**tm_list
= NULL
;
807 struct tree_mod_elem
**tm_list_add
, **tm_list_rem
;
811 if (!tree_mod_need_log(fs_info
, NULL
))
814 if (btrfs_header_level(dst
) == 0 && btrfs_header_level(src
) == 0)
817 tm_list
= kcalloc(nr_items
* 2, sizeof(struct tree_mod_elem
*),
822 tm_list_add
= tm_list
;
823 tm_list_rem
= tm_list
+ nr_items
;
824 for (i
= 0; i
< nr_items
; i
++) {
825 tm_list_rem
[i
] = alloc_tree_mod_elem(src
, i
+ src_offset
,
826 MOD_LOG_KEY_REMOVE
, GFP_NOFS
);
827 if (!tm_list_rem
[i
]) {
832 tm_list_add
[i
] = alloc_tree_mod_elem(dst
, i
+ dst_offset
,
833 MOD_LOG_KEY_ADD
, GFP_NOFS
);
834 if (!tm_list_add
[i
]) {
840 if (tree_mod_dont_log(fs_info
, NULL
))
844 for (i
= 0; i
< nr_items
; i
++) {
845 ret
= __tree_mod_log_insert(fs_info
, tm_list_rem
[i
]);
848 ret
= __tree_mod_log_insert(fs_info
, tm_list_add
[i
]);
853 tree_mod_log_write_unlock(fs_info
);
859 for (i
= 0; i
< nr_items
* 2; i
++) {
860 if (tm_list
[i
] && !RB_EMPTY_NODE(&tm_list
[i
]->node
))
861 rb_erase(&tm_list
[i
]->node
, &fs_info
->tree_mod_log
);
865 tree_mod_log_write_unlock(fs_info
);
872 tree_mod_log_eb_move(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*dst
,
873 int dst_offset
, int src_offset
, int nr_items
)
876 ret
= tree_mod_log_insert_move(fs_info
, dst
, dst_offset
, src_offset
,
882 tree_mod_log_set_node_key(struct btrfs_fs_info
*fs_info
,
883 struct extent_buffer
*eb
, int slot
, int atomic
)
887 ret
= tree_mod_log_insert_key(fs_info
, eb
, slot
,
889 atomic
? GFP_ATOMIC
: GFP_NOFS
);
894 tree_mod_log_free_eb(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
)
896 struct tree_mod_elem
**tm_list
= NULL
;
901 if (btrfs_header_level(eb
) == 0)
904 if (!tree_mod_need_log(fs_info
, NULL
))
907 nritems
= btrfs_header_nritems(eb
);
908 tm_list
= kcalloc(nritems
, sizeof(struct tree_mod_elem
*), GFP_NOFS
);
912 for (i
= 0; i
< nritems
; i
++) {
913 tm_list
[i
] = alloc_tree_mod_elem(eb
, i
,
914 MOD_LOG_KEY_REMOVE_WHILE_FREEING
, GFP_NOFS
);
921 if (tree_mod_dont_log(fs_info
, eb
))
924 ret
= __tree_mod_log_free_eb(fs_info
, tm_list
, nritems
);
925 tree_mod_log_write_unlock(fs_info
);
933 for (i
= 0; i
< nritems
; i
++)
941 tree_mod_log_set_root_pointer(struct btrfs_root
*root
,
942 struct extent_buffer
*new_root_node
,
946 ret
= tree_mod_log_insert_root(root
->fs_info
, root
->node
,
947 new_root_node
, GFP_NOFS
, log_removal
);
952 * check if the tree block can be shared by multiple trees
954 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
955 struct extent_buffer
*buf
)
958 * Tree blocks not in reference counted trees and tree roots
959 * are never shared. If a block was allocated after the last
960 * snapshot and the block was not allocated by tree relocation,
961 * we know the block is not shared.
963 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
964 buf
!= root
->node
&& buf
!= root
->commit_root
&&
965 (btrfs_header_generation(buf
) <=
966 btrfs_root_last_snapshot(&root
->root_item
) ||
967 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
969 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
970 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
971 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
977 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
978 struct btrfs_root
*root
,
979 struct extent_buffer
*buf
,
980 struct extent_buffer
*cow
,
990 * Backrefs update rules:
992 * Always use full backrefs for extent pointers in tree block
993 * allocated by tree relocation.
995 * If a shared tree block is no longer referenced by its owner
996 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
997 * use full backrefs for extent pointers in tree block.
999 * If a tree block is been relocating
1000 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1001 * use full backrefs for extent pointers in tree block.
1002 * The reason for this is some operations (such as drop tree)
1003 * are only allowed for blocks use full backrefs.
1006 if (btrfs_block_can_be_shared(root
, buf
)) {
1007 ret
= btrfs_lookup_extent_info(trans
, root
, buf
->start
,
1008 btrfs_header_level(buf
), 1,
1014 btrfs_handle_fs_error(root
->fs_info
, ret
, NULL
);
1019 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1020 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1021 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
1026 owner
= btrfs_header_owner(buf
);
1027 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
1028 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
1031 if ((owner
== root
->root_key
.objectid
||
1032 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
1033 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
1034 ret
= btrfs_inc_ref(trans
, root
, buf
, 1);
1035 BUG_ON(ret
); /* -ENOMEM */
1037 if (root
->root_key
.objectid
==
1038 BTRFS_TREE_RELOC_OBJECTID
) {
1039 ret
= btrfs_dec_ref(trans
, root
, buf
, 0);
1040 BUG_ON(ret
); /* -ENOMEM */
1041 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
1042 BUG_ON(ret
); /* -ENOMEM */
1044 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
1047 if (root
->root_key
.objectid
==
1048 BTRFS_TREE_RELOC_OBJECTID
)
1049 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
1051 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
1052 BUG_ON(ret
); /* -ENOMEM */
1054 if (new_flags
!= 0) {
1055 int level
= btrfs_header_level(buf
);
1057 ret
= btrfs_set_disk_extent_flags(trans
, root
,
1060 new_flags
, level
, 0);
1065 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
1066 if (root
->root_key
.objectid
==
1067 BTRFS_TREE_RELOC_OBJECTID
)
1068 ret
= btrfs_inc_ref(trans
, root
, cow
, 1);
1070 ret
= btrfs_inc_ref(trans
, root
, cow
, 0);
1071 BUG_ON(ret
); /* -ENOMEM */
1072 ret
= btrfs_dec_ref(trans
, root
, buf
, 1);
1073 BUG_ON(ret
); /* -ENOMEM */
1075 clean_tree_block(trans
, root
->fs_info
, buf
);
1082 * does the dirty work in cow of a single block. The parent block (if
1083 * supplied) is updated to point to the new cow copy. The new buffer is marked
1084 * dirty and returned locked. If you modify the block it needs to be marked
1087 * search_start -- an allocation hint for the new block
1089 * empty_size -- a hint that you plan on doing more cow. This is the size in
1090 * bytes the allocator should try to find free next to the block it returns.
1091 * This is just a hint and may be ignored by the allocator.
1093 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1094 struct btrfs_root
*root
,
1095 struct extent_buffer
*buf
,
1096 struct extent_buffer
*parent
, int parent_slot
,
1097 struct extent_buffer
**cow_ret
,
1098 u64 search_start
, u64 empty_size
)
1100 struct btrfs_disk_key disk_key
;
1101 struct extent_buffer
*cow
;
1104 int unlock_orig
= 0;
1107 if (*cow_ret
== buf
)
1110 btrfs_assert_tree_locked(buf
);
1112 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
1113 trans
->transid
!= root
->fs_info
->running_transaction
->transid
);
1114 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) &&
1115 trans
->transid
!= root
->last_trans
);
1117 level
= btrfs_header_level(buf
);
1120 btrfs_item_key(buf
, &disk_key
, 0);
1122 btrfs_node_key(buf
, &disk_key
, 0);
1124 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
1126 parent_start
= parent
->start
;
1132 cow
= btrfs_alloc_tree_block(trans
, root
, parent_start
,
1133 root
->root_key
.objectid
, &disk_key
, level
,
1134 search_start
, empty_size
);
1136 return PTR_ERR(cow
);
1138 /* cow is set to blocking by btrfs_init_new_buffer */
1140 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
1141 btrfs_set_header_bytenr(cow
, cow
->start
);
1142 btrfs_set_header_generation(cow
, trans
->transid
);
1143 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
1144 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
1145 BTRFS_HEADER_FLAG_RELOC
);
1146 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1147 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
1149 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
1151 write_extent_buffer(cow
, root
->fs_info
->fsid
, btrfs_header_fsid(),
1154 ret
= update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
1156 btrfs_abort_transaction(trans
, root
, ret
);
1160 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
)) {
1161 ret
= btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
1163 btrfs_abort_transaction(trans
, root
, ret
);
1168 if (buf
== root
->node
) {
1169 WARN_ON(parent
&& parent
!= buf
);
1170 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
1171 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
1172 parent_start
= buf
->start
;
1176 extent_buffer_get(cow
);
1177 tree_mod_log_set_root_pointer(root
, cow
, 1);
1178 rcu_assign_pointer(root
->node
, cow
);
1180 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1182 free_extent_buffer(buf
);
1183 add_root_to_dirty_list(root
);
1185 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
1186 parent_start
= parent
->start
;
1190 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
1191 tree_mod_log_insert_key(root
->fs_info
, parent
, parent_slot
,
1192 MOD_LOG_KEY_REPLACE
, GFP_NOFS
);
1193 btrfs_set_node_blockptr(parent
, parent_slot
,
1195 btrfs_set_node_ptr_generation(parent
, parent_slot
,
1197 btrfs_mark_buffer_dirty(parent
);
1199 ret
= tree_mod_log_free_eb(root
->fs_info
, buf
);
1201 btrfs_abort_transaction(trans
, root
, ret
);
1205 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
1209 btrfs_tree_unlock(buf
);
1210 free_extent_buffer_stale(buf
);
1211 btrfs_mark_buffer_dirty(cow
);
1217 * returns the logical address of the oldest predecessor of the given root.
1218 * entries older than time_seq are ignored.
1220 static struct tree_mod_elem
*
1221 __tree_mod_log_oldest_root(struct btrfs_fs_info
*fs_info
,
1222 struct extent_buffer
*eb_root
, u64 time_seq
)
1224 struct tree_mod_elem
*tm
;
1225 struct tree_mod_elem
*found
= NULL
;
1226 u64 root_logical
= eb_root
->start
;
1233 * the very last operation that's logged for a root is the
1234 * replacement operation (if it is replaced at all). this has
1235 * the logical address of the *new* root, making it the very
1236 * first operation that's logged for this root.
1239 tm
= tree_mod_log_search_oldest(fs_info
, root_logical
,
1244 * if there are no tree operation for the oldest root, we simply
1245 * return it. this should only happen if that (old) root is at
1252 * if there's an operation that's not a root replacement, we
1253 * found the oldest version of our root. normally, we'll find a
1254 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1256 if (tm
->op
!= MOD_LOG_ROOT_REPLACE
)
1260 root_logical
= tm
->old_root
.logical
;
1264 /* if there's no old root to return, return what we found instead */
1272 * tm is a pointer to the first operation to rewind within eb. then, all
1273 * previous operations will be rewound (until we reach something older than
1277 __tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct extent_buffer
*eb
,
1278 u64 time_seq
, struct tree_mod_elem
*first_tm
)
1281 struct rb_node
*next
;
1282 struct tree_mod_elem
*tm
= first_tm
;
1283 unsigned long o_dst
;
1284 unsigned long o_src
;
1285 unsigned long p_size
= sizeof(struct btrfs_key_ptr
);
1287 n
= btrfs_header_nritems(eb
);
1288 tree_mod_log_read_lock(fs_info
);
1289 while (tm
&& tm
->seq
>= time_seq
) {
1291 * all the operations are recorded with the operator used for
1292 * the modification. as we're going backwards, we do the
1293 * opposite of each operation here.
1296 case MOD_LOG_KEY_REMOVE_WHILE_FREEING
:
1297 BUG_ON(tm
->slot
< n
);
1299 case MOD_LOG_KEY_REMOVE_WHILE_MOVING
:
1300 case MOD_LOG_KEY_REMOVE
:
1301 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1302 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1303 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1307 case MOD_LOG_KEY_REPLACE
:
1308 BUG_ON(tm
->slot
>= n
);
1309 btrfs_set_node_key(eb
, &tm
->key
, tm
->slot
);
1310 btrfs_set_node_blockptr(eb
, tm
->slot
, tm
->blockptr
);
1311 btrfs_set_node_ptr_generation(eb
, tm
->slot
,
1314 case MOD_LOG_KEY_ADD
:
1315 /* if a move operation is needed it's in the log */
1318 case MOD_LOG_MOVE_KEYS
:
1319 o_dst
= btrfs_node_key_ptr_offset(tm
->slot
);
1320 o_src
= btrfs_node_key_ptr_offset(tm
->move
.dst_slot
);
1321 memmove_extent_buffer(eb
, o_dst
, o_src
,
1322 tm
->move
.nr_items
* p_size
);
1324 case MOD_LOG_ROOT_REPLACE
:
1326 * this operation is special. for roots, this must be
1327 * handled explicitly before rewinding.
1328 * for non-roots, this operation may exist if the node
1329 * was a root: root A -> child B; then A gets empty and
1330 * B is promoted to the new root. in the mod log, we'll
1331 * have a root-replace operation for B, a tree block
1332 * that is no root. we simply ignore that operation.
1336 next
= rb_next(&tm
->node
);
1339 tm
= container_of(next
, struct tree_mod_elem
, node
);
1340 if (tm
->logical
!= first_tm
->logical
)
1343 tree_mod_log_read_unlock(fs_info
);
1344 btrfs_set_header_nritems(eb
, n
);
1348 * Called with eb read locked. If the buffer cannot be rewound, the same buffer
1349 * is returned. If rewind operations happen, a fresh buffer is returned. The
1350 * returned buffer is always read-locked. If the returned buffer is not the
1351 * input buffer, the lock on the input buffer is released and the input buffer
1352 * is freed (its refcount is decremented).
1354 static struct extent_buffer
*
1355 tree_mod_log_rewind(struct btrfs_fs_info
*fs_info
, struct btrfs_path
*path
,
1356 struct extent_buffer
*eb
, u64 time_seq
)
1358 struct extent_buffer
*eb_rewin
;
1359 struct tree_mod_elem
*tm
;
1364 if (btrfs_header_level(eb
) == 0)
1367 tm
= tree_mod_log_search(fs_info
, eb
->start
, time_seq
);
1371 btrfs_set_path_blocking(path
);
1372 btrfs_set_lock_blocking_rw(eb
, BTRFS_READ_LOCK
);
1374 if (tm
->op
== MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1375 BUG_ON(tm
->slot
!= 0);
1376 eb_rewin
= alloc_dummy_extent_buffer(fs_info
, eb
->start
,
1379 btrfs_tree_read_unlock_blocking(eb
);
1380 free_extent_buffer(eb
);
1383 btrfs_set_header_bytenr(eb_rewin
, eb
->start
);
1384 btrfs_set_header_backref_rev(eb_rewin
,
1385 btrfs_header_backref_rev(eb
));
1386 btrfs_set_header_owner(eb_rewin
, btrfs_header_owner(eb
));
1387 btrfs_set_header_level(eb_rewin
, btrfs_header_level(eb
));
1389 eb_rewin
= btrfs_clone_extent_buffer(eb
);
1391 btrfs_tree_read_unlock_blocking(eb
);
1392 free_extent_buffer(eb
);
1397 btrfs_clear_path_blocking(path
, NULL
, BTRFS_READ_LOCK
);
1398 btrfs_tree_read_unlock_blocking(eb
);
1399 free_extent_buffer(eb
);
1401 extent_buffer_get(eb_rewin
);
1402 btrfs_tree_read_lock(eb_rewin
);
1403 __tree_mod_log_rewind(fs_info
, eb_rewin
, time_seq
, tm
);
1404 WARN_ON(btrfs_header_nritems(eb_rewin
) >
1405 BTRFS_NODEPTRS_PER_BLOCK(fs_info
->tree_root
));
1411 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1412 * value. If there are no changes, the current root->root_node is returned. If
1413 * anything changed in between, there's a fresh buffer allocated on which the
1414 * rewind operations are done. In any case, the returned buffer is read locked.
1415 * Returns NULL on error (with no locks held).
1417 static inline struct extent_buffer
*
1418 get_old_root(struct btrfs_root
*root
, u64 time_seq
)
1420 struct tree_mod_elem
*tm
;
1421 struct extent_buffer
*eb
= NULL
;
1422 struct extent_buffer
*eb_root
;
1423 struct extent_buffer
*old
;
1424 struct tree_mod_root
*old_root
= NULL
;
1425 u64 old_generation
= 0;
1428 eb_root
= btrfs_read_lock_root_node(root
);
1429 tm
= __tree_mod_log_oldest_root(root
->fs_info
, eb_root
, time_seq
);
1433 if (tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1434 old_root
= &tm
->old_root
;
1435 old_generation
= tm
->generation
;
1436 logical
= old_root
->logical
;
1438 logical
= eb_root
->start
;
1441 tm
= tree_mod_log_search(root
->fs_info
, logical
, time_seq
);
1442 if (old_root
&& tm
&& tm
->op
!= MOD_LOG_KEY_REMOVE_WHILE_FREEING
) {
1443 btrfs_tree_read_unlock(eb_root
);
1444 free_extent_buffer(eb_root
);
1445 old
= read_tree_block(root
, logical
, 0);
1446 if (WARN_ON(IS_ERR(old
) || !extent_buffer_uptodate(old
))) {
1448 free_extent_buffer(old
);
1449 btrfs_warn(root
->fs_info
,
1450 "failed to read tree block %llu from get_old_root", logical
);
1452 eb
= btrfs_clone_extent_buffer(old
);
1453 free_extent_buffer(old
);
1455 } else if (old_root
) {
1456 btrfs_tree_read_unlock(eb_root
);
1457 free_extent_buffer(eb_root
);
1458 eb
= alloc_dummy_extent_buffer(root
->fs_info
, logical
,
1461 btrfs_set_lock_blocking_rw(eb_root
, BTRFS_READ_LOCK
);
1462 eb
= btrfs_clone_extent_buffer(eb_root
);
1463 btrfs_tree_read_unlock_blocking(eb_root
);
1464 free_extent_buffer(eb_root
);
1469 extent_buffer_get(eb
);
1470 btrfs_tree_read_lock(eb
);
1472 btrfs_set_header_bytenr(eb
, eb
->start
);
1473 btrfs_set_header_backref_rev(eb
, BTRFS_MIXED_BACKREF_REV
);
1474 btrfs_set_header_owner(eb
, btrfs_header_owner(eb_root
));
1475 btrfs_set_header_level(eb
, old_root
->level
);
1476 btrfs_set_header_generation(eb
, old_generation
);
1479 __tree_mod_log_rewind(root
->fs_info
, eb
, time_seq
, tm
);
1481 WARN_ON(btrfs_header_level(eb
) != 0);
1482 WARN_ON(btrfs_header_nritems(eb
) > BTRFS_NODEPTRS_PER_BLOCK(root
));
1487 int btrfs_old_root_level(struct btrfs_root
*root
, u64 time_seq
)
1489 struct tree_mod_elem
*tm
;
1491 struct extent_buffer
*eb_root
= btrfs_root_node(root
);
1493 tm
= __tree_mod_log_oldest_root(root
->fs_info
, eb_root
, time_seq
);
1494 if (tm
&& tm
->op
== MOD_LOG_ROOT_REPLACE
) {
1495 level
= tm
->old_root
.level
;
1497 level
= btrfs_header_level(eb_root
);
1499 free_extent_buffer(eb_root
);
1504 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
1505 struct btrfs_root
*root
,
1506 struct extent_buffer
*buf
)
1508 if (btrfs_test_is_dummy_root(root
))
1511 /* ensure we can see the force_cow */
1515 * We do not need to cow a block if
1516 * 1) this block is not created or changed in this transaction;
1517 * 2) this block does not belong to TREE_RELOC tree;
1518 * 3) the root is not forced COW.
1520 * What is forced COW:
1521 * when we create snapshot during committing the transaction,
1522 * after we've finished coping src root, we must COW the shared
1523 * block to ensure the metadata consistency.
1525 if (btrfs_header_generation(buf
) == trans
->transid
&&
1526 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
1527 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
1528 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
1529 !test_bit(BTRFS_ROOT_FORCE_COW
, &root
->state
))
1535 * cows a single block, see __btrfs_cow_block for the real work.
1536 * This version of it has extra checks so that a block isn't COWed more than
1537 * once per transaction, as long as it hasn't been written yet
1539 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
1540 struct btrfs_root
*root
, struct extent_buffer
*buf
,
1541 struct extent_buffer
*parent
, int parent_slot
,
1542 struct extent_buffer
**cow_ret
)
1547 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
1548 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1550 root
->fs_info
->running_transaction
->transid
);
1552 if (trans
->transid
!= root
->fs_info
->generation
)
1553 WARN(1, KERN_CRIT
"trans %llu running %llu\n",
1554 trans
->transid
, root
->fs_info
->generation
);
1556 if (!should_cow_block(trans
, root
, buf
)) {
1561 search_start
= buf
->start
& ~((u64
)SZ_1G
- 1);
1564 btrfs_set_lock_blocking(parent
);
1565 btrfs_set_lock_blocking(buf
);
1567 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
1568 parent_slot
, cow_ret
, search_start
, 0);
1570 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
1576 * helper function for defrag to decide if two blocks pointed to by a
1577 * node are actually close by
1579 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
1581 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
1583 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
1589 * compare two keys in a memcmp fashion
1591 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
1593 struct btrfs_key k1
;
1595 btrfs_disk_key_to_cpu(&k1
, disk
);
1597 return btrfs_comp_cpu_keys(&k1
, k2
);
1601 * same as comp_keys only with two btrfs_key's
1603 int btrfs_comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
1605 if (k1
->objectid
> k2
->objectid
)
1607 if (k1
->objectid
< k2
->objectid
)
1609 if (k1
->type
> k2
->type
)
1611 if (k1
->type
< k2
->type
)
1613 if (k1
->offset
> k2
->offset
)
1615 if (k1
->offset
< k2
->offset
)
1621 * this is used by the defrag code to go through all the
1622 * leaves pointed to by a node and reallocate them so that
1623 * disk order is close to key order
1625 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
1626 struct btrfs_root
*root
, struct extent_buffer
*parent
,
1627 int start_slot
, u64
*last_ret
,
1628 struct btrfs_key
*progress
)
1630 struct extent_buffer
*cur
;
1633 u64 search_start
= *last_ret
;
1643 int progress_passed
= 0;
1644 struct btrfs_disk_key disk_key
;
1646 parent_level
= btrfs_header_level(parent
);
1648 WARN_ON(trans
->transaction
!= root
->fs_info
->running_transaction
);
1649 WARN_ON(trans
->transid
!= root
->fs_info
->generation
);
1651 parent_nritems
= btrfs_header_nritems(parent
);
1652 blocksize
= root
->nodesize
;
1653 end_slot
= parent_nritems
- 1;
1655 if (parent_nritems
<= 1)
1658 btrfs_set_lock_blocking(parent
);
1660 for (i
= start_slot
; i
<= end_slot
; i
++) {
1663 btrfs_node_key(parent
, &disk_key
, i
);
1664 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
1667 progress_passed
= 1;
1668 blocknr
= btrfs_node_blockptr(parent
, i
);
1669 gen
= btrfs_node_ptr_generation(parent
, i
);
1670 if (last_block
== 0)
1671 last_block
= blocknr
;
1674 other
= btrfs_node_blockptr(parent
, i
- 1);
1675 close
= close_blocks(blocknr
, other
, blocksize
);
1677 if (!close
&& i
< end_slot
) {
1678 other
= btrfs_node_blockptr(parent
, i
+ 1);
1679 close
= close_blocks(blocknr
, other
, blocksize
);
1682 last_block
= blocknr
;
1686 cur
= btrfs_find_tree_block(root
->fs_info
, blocknr
);
1688 uptodate
= btrfs_buffer_uptodate(cur
, gen
, 0);
1691 if (!cur
|| !uptodate
) {
1693 cur
= read_tree_block(root
, blocknr
, gen
);
1695 return PTR_ERR(cur
);
1696 } else if (!extent_buffer_uptodate(cur
)) {
1697 free_extent_buffer(cur
);
1700 } else if (!uptodate
) {
1701 err
= btrfs_read_buffer(cur
, gen
);
1703 free_extent_buffer(cur
);
1708 if (search_start
== 0)
1709 search_start
= last_block
;
1711 btrfs_tree_lock(cur
);
1712 btrfs_set_lock_blocking(cur
);
1713 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
1716 (end_slot
- i
) * blocksize
));
1718 btrfs_tree_unlock(cur
);
1719 free_extent_buffer(cur
);
1722 search_start
= cur
->start
;
1723 last_block
= cur
->start
;
1724 *last_ret
= search_start
;
1725 btrfs_tree_unlock(cur
);
1726 free_extent_buffer(cur
);
1732 * The leaf data grows from end-to-front in the node.
1733 * this returns the address of the start of the last item,
1734 * which is the stop of the leaf data stack
1736 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
1737 struct extent_buffer
*leaf
)
1739 u32 nr
= btrfs_header_nritems(leaf
);
1741 return BTRFS_LEAF_DATA_SIZE(root
);
1742 return btrfs_item_offset_nr(leaf
, nr
- 1);
1747 * search for key in the extent_buffer. The items start at offset p,
1748 * and they are item_size apart. There are 'max' items in p.
1750 * the slot in the array is returned via slot, and it points to
1751 * the place where you would insert key if it is not found in
1754 * slot may point to max if the key is bigger than all of the keys
1756 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
1758 int item_size
, struct btrfs_key
*key
,
1765 struct btrfs_disk_key
*tmp
= NULL
;
1766 struct btrfs_disk_key unaligned
;
1767 unsigned long offset
;
1769 unsigned long map_start
= 0;
1770 unsigned long map_len
= 0;
1773 while (low
< high
) {
1774 mid
= (low
+ high
) / 2;
1775 offset
= p
+ mid
* item_size
;
1777 if (!kaddr
|| offset
< map_start
||
1778 (offset
+ sizeof(struct btrfs_disk_key
)) >
1779 map_start
+ map_len
) {
1781 err
= map_private_extent_buffer(eb
, offset
,
1782 sizeof(struct btrfs_disk_key
),
1783 &kaddr
, &map_start
, &map_len
);
1786 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1789 read_extent_buffer(eb
, &unaligned
,
1790 offset
, sizeof(unaligned
));
1795 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
1798 ret
= comp_keys(tmp
, key
);
1814 * simple bin_search frontend that does the right thing for
1817 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1818 int level
, int *slot
)
1821 return generic_bin_search(eb
,
1822 offsetof(struct btrfs_leaf
, items
),
1823 sizeof(struct btrfs_item
),
1824 key
, btrfs_header_nritems(eb
),
1827 return generic_bin_search(eb
,
1828 offsetof(struct btrfs_node
, ptrs
),
1829 sizeof(struct btrfs_key_ptr
),
1830 key
, btrfs_header_nritems(eb
),
1834 int btrfs_bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
1835 int level
, int *slot
)
1837 return bin_search(eb
, key
, level
, slot
);
1840 static void root_add_used(struct btrfs_root
*root
, u32 size
)
1842 spin_lock(&root
->accounting_lock
);
1843 btrfs_set_root_used(&root
->root_item
,
1844 btrfs_root_used(&root
->root_item
) + size
);
1845 spin_unlock(&root
->accounting_lock
);
1848 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
1850 spin_lock(&root
->accounting_lock
);
1851 btrfs_set_root_used(&root
->root_item
,
1852 btrfs_root_used(&root
->root_item
) - size
);
1853 spin_unlock(&root
->accounting_lock
);
1856 /* given a node and slot number, this reads the blocks it points to. The
1857 * extent buffer is returned with a reference taken (but unlocked).
1858 * NULL is returned on error.
1860 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
1861 struct extent_buffer
*parent
, int slot
)
1863 int level
= btrfs_header_level(parent
);
1864 struct extent_buffer
*eb
;
1868 if (slot
>= btrfs_header_nritems(parent
))
1873 eb
= read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
1874 btrfs_node_ptr_generation(parent
, slot
));
1875 if (IS_ERR(eb
) || !extent_buffer_uptodate(eb
)) {
1877 free_extent_buffer(eb
);
1885 * node level balancing, used to make sure nodes are in proper order for
1886 * item deletion. We balance from the top down, so we have to make sure
1887 * that a deletion won't leave an node completely empty later on.
1889 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
1890 struct btrfs_root
*root
,
1891 struct btrfs_path
*path
, int level
)
1893 struct extent_buffer
*right
= NULL
;
1894 struct extent_buffer
*mid
;
1895 struct extent_buffer
*left
= NULL
;
1896 struct extent_buffer
*parent
= NULL
;
1900 int orig_slot
= path
->slots
[level
];
1906 mid
= path
->nodes
[level
];
1908 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
1909 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
1910 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1912 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
1914 if (level
< BTRFS_MAX_LEVEL
- 1) {
1915 parent
= path
->nodes
[level
+ 1];
1916 pslot
= path
->slots
[level
+ 1];
1920 * deal with the case where there is only one pointer in the root
1921 * by promoting the node below to a root
1924 struct extent_buffer
*child
;
1926 if (btrfs_header_nritems(mid
) != 1)
1929 /* promote the child to a root */
1930 child
= read_node_slot(root
, mid
, 0);
1933 btrfs_handle_fs_error(root
->fs_info
, ret
, NULL
);
1937 btrfs_tree_lock(child
);
1938 btrfs_set_lock_blocking(child
);
1939 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
1941 btrfs_tree_unlock(child
);
1942 free_extent_buffer(child
);
1946 tree_mod_log_set_root_pointer(root
, child
, 1);
1947 rcu_assign_pointer(root
->node
, child
);
1949 add_root_to_dirty_list(root
);
1950 btrfs_tree_unlock(child
);
1952 path
->locks
[level
] = 0;
1953 path
->nodes
[level
] = NULL
;
1954 clean_tree_block(trans
, root
->fs_info
, mid
);
1955 btrfs_tree_unlock(mid
);
1956 /* once for the path */
1957 free_extent_buffer(mid
);
1959 root_sub_used(root
, mid
->len
);
1960 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
1961 /* once for the root ptr */
1962 free_extent_buffer_stale(mid
);
1965 if (btrfs_header_nritems(mid
) >
1966 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
1969 left
= read_node_slot(root
, parent
, pslot
- 1);
1971 btrfs_tree_lock(left
);
1972 btrfs_set_lock_blocking(left
);
1973 wret
= btrfs_cow_block(trans
, root
, left
,
1974 parent
, pslot
- 1, &left
);
1980 right
= read_node_slot(root
, parent
, pslot
+ 1);
1982 btrfs_tree_lock(right
);
1983 btrfs_set_lock_blocking(right
);
1984 wret
= btrfs_cow_block(trans
, root
, right
,
1985 parent
, pslot
+ 1, &right
);
1992 /* first, try to make some room in the middle buffer */
1994 orig_slot
+= btrfs_header_nritems(left
);
1995 wret
= push_node_left(trans
, root
, left
, mid
, 1);
2001 * then try to empty the right most buffer into the middle
2004 wret
= push_node_left(trans
, root
, mid
, right
, 1);
2005 if (wret
< 0 && wret
!= -ENOSPC
)
2007 if (btrfs_header_nritems(right
) == 0) {
2008 clean_tree_block(trans
, root
->fs_info
, right
);
2009 btrfs_tree_unlock(right
);
2010 del_ptr(root
, path
, level
+ 1, pslot
+ 1);
2011 root_sub_used(root
, right
->len
);
2012 btrfs_free_tree_block(trans
, root
, right
, 0, 1);
2013 free_extent_buffer_stale(right
);
2016 struct btrfs_disk_key right_key
;
2017 btrfs_node_key(right
, &right_key
, 0);
2018 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2020 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
2021 btrfs_mark_buffer_dirty(parent
);
2024 if (btrfs_header_nritems(mid
) == 1) {
2026 * we're not allowed to leave a node with one item in the
2027 * tree during a delete. A deletion from lower in the tree
2028 * could try to delete the only pointer in this node.
2029 * So, pull some keys from the left.
2030 * There has to be a left pointer at this point because
2031 * otherwise we would have pulled some pointers from the
2036 btrfs_handle_fs_error(root
->fs_info
, ret
, NULL
);
2039 wret
= balance_node_right(trans
, root
, mid
, left
);
2045 wret
= push_node_left(trans
, root
, left
, mid
, 1);
2051 if (btrfs_header_nritems(mid
) == 0) {
2052 clean_tree_block(trans
, root
->fs_info
, mid
);
2053 btrfs_tree_unlock(mid
);
2054 del_ptr(root
, path
, level
+ 1, pslot
);
2055 root_sub_used(root
, mid
->len
);
2056 btrfs_free_tree_block(trans
, root
, mid
, 0, 1);
2057 free_extent_buffer_stale(mid
);
2060 /* update the parent key to reflect our changes */
2061 struct btrfs_disk_key mid_key
;
2062 btrfs_node_key(mid
, &mid_key
, 0);
2063 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2065 btrfs_set_node_key(parent
, &mid_key
, pslot
);
2066 btrfs_mark_buffer_dirty(parent
);
2069 /* update the path */
2071 if (btrfs_header_nritems(left
) > orig_slot
) {
2072 extent_buffer_get(left
);
2073 /* left was locked after cow */
2074 path
->nodes
[level
] = left
;
2075 path
->slots
[level
+ 1] -= 1;
2076 path
->slots
[level
] = orig_slot
;
2078 btrfs_tree_unlock(mid
);
2079 free_extent_buffer(mid
);
2082 orig_slot
-= btrfs_header_nritems(left
);
2083 path
->slots
[level
] = orig_slot
;
2086 /* double check we haven't messed things up */
2088 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
2092 btrfs_tree_unlock(right
);
2093 free_extent_buffer(right
);
2096 if (path
->nodes
[level
] != left
)
2097 btrfs_tree_unlock(left
);
2098 free_extent_buffer(left
);
2103 /* Node balancing for insertion. Here we only split or push nodes around
2104 * when they are completely full. This is also done top down, so we
2105 * have to be pessimistic.
2107 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
2108 struct btrfs_root
*root
,
2109 struct btrfs_path
*path
, int level
)
2111 struct extent_buffer
*right
= NULL
;
2112 struct extent_buffer
*mid
;
2113 struct extent_buffer
*left
= NULL
;
2114 struct extent_buffer
*parent
= NULL
;
2118 int orig_slot
= path
->slots
[level
];
2123 mid
= path
->nodes
[level
];
2124 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
2126 if (level
< BTRFS_MAX_LEVEL
- 1) {
2127 parent
= path
->nodes
[level
+ 1];
2128 pslot
= path
->slots
[level
+ 1];
2134 left
= read_node_slot(root
, parent
, pslot
- 1);
2136 /* first, try to make some room in the middle buffer */
2140 btrfs_tree_lock(left
);
2141 btrfs_set_lock_blocking(left
);
2143 left_nr
= btrfs_header_nritems(left
);
2144 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
2147 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
2152 wret
= push_node_left(trans
, root
,
2159 struct btrfs_disk_key disk_key
;
2160 orig_slot
+= left_nr
;
2161 btrfs_node_key(mid
, &disk_key
, 0);
2162 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2164 btrfs_set_node_key(parent
, &disk_key
, pslot
);
2165 btrfs_mark_buffer_dirty(parent
);
2166 if (btrfs_header_nritems(left
) > orig_slot
) {
2167 path
->nodes
[level
] = left
;
2168 path
->slots
[level
+ 1] -= 1;
2169 path
->slots
[level
] = orig_slot
;
2170 btrfs_tree_unlock(mid
);
2171 free_extent_buffer(mid
);
2174 btrfs_header_nritems(left
);
2175 path
->slots
[level
] = orig_slot
;
2176 btrfs_tree_unlock(left
);
2177 free_extent_buffer(left
);
2181 btrfs_tree_unlock(left
);
2182 free_extent_buffer(left
);
2184 right
= read_node_slot(root
, parent
, pslot
+ 1);
2187 * then try to empty the right most buffer into the middle
2192 btrfs_tree_lock(right
);
2193 btrfs_set_lock_blocking(right
);
2195 right_nr
= btrfs_header_nritems(right
);
2196 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
2199 ret
= btrfs_cow_block(trans
, root
, right
,
2205 wret
= balance_node_right(trans
, root
,
2212 struct btrfs_disk_key disk_key
;
2214 btrfs_node_key(right
, &disk_key
, 0);
2215 tree_mod_log_set_node_key(root
->fs_info
, parent
,
2217 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
2218 btrfs_mark_buffer_dirty(parent
);
2220 if (btrfs_header_nritems(mid
) <= orig_slot
) {
2221 path
->nodes
[level
] = right
;
2222 path
->slots
[level
+ 1] += 1;
2223 path
->slots
[level
] = orig_slot
-
2224 btrfs_header_nritems(mid
);
2225 btrfs_tree_unlock(mid
);
2226 free_extent_buffer(mid
);
2228 btrfs_tree_unlock(right
);
2229 free_extent_buffer(right
);
2233 btrfs_tree_unlock(right
);
2234 free_extent_buffer(right
);
2240 * readahead one full node of leaves, finding things that are close
2241 * to the block in 'slot', and triggering ra on them.
2243 static void reada_for_search(struct btrfs_root
*root
,
2244 struct btrfs_path
*path
,
2245 int level
, int slot
, u64 objectid
)
2247 struct extent_buffer
*node
;
2248 struct btrfs_disk_key disk_key
;
2254 struct extent_buffer
*eb
;
2262 if (!path
->nodes
[level
])
2265 node
= path
->nodes
[level
];
2267 search
= btrfs_node_blockptr(node
, slot
);
2268 blocksize
= root
->nodesize
;
2269 eb
= btrfs_find_tree_block(root
->fs_info
, search
);
2271 free_extent_buffer(eb
);
2277 nritems
= btrfs_header_nritems(node
);
2281 if (path
->reada
== READA_BACK
) {
2285 } else if (path
->reada
== READA_FORWARD
) {
2290 if (path
->reada
== READA_BACK
&& objectid
) {
2291 btrfs_node_key(node
, &disk_key
, nr
);
2292 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
2295 search
= btrfs_node_blockptr(node
, nr
);
2296 if ((search
<= target
&& target
- search
<= 65536) ||
2297 (search
> target
&& search
- target
<= 65536)) {
2298 gen
= btrfs_node_ptr_generation(node
, nr
);
2299 readahead_tree_block(root
, search
);
2303 if ((nread
> 65536 || nscan
> 32))
2308 static noinline
void reada_for_balance(struct btrfs_root
*root
,
2309 struct btrfs_path
*path
, int level
)
2313 struct extent_buffer
*parent
;
2314 struct extent_buffer
*eb
;
2319 parent
= path
->nodes
[level
+ 1];
2323 nritems
= btrfs_header_nritems(parent
);
2324 slot
= path
->slots
[level
+ 1];
2327 block1
= btrfs_node_blockptr(parent
, slot
- 1);
2328 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
2329 eb
= btrfs_find_tree_block(root
->fs_info
, block1
);
2331 * if we get -eagain from btrfs_buffer_uptodate, we
2332 * don't want to return eagain here. That will loop
2335 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2337 free_extent_buffer(eb
);
2339 if (slot
+ 1 < nritems
) {
2340 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
2341 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
2342 eb
= btrfs_find_tree_block(root
->fs_info
, block2
);
2343 if (eb
&& btrfs_buffer_uptodate(eb
, gen
, 1) != 0)
2345 free_extent_buffer(eb
);
2349 readahead_tree_block(root
, block1
);
2351 readahead_tree_block(root
, block2
);
2356 * when we walk down the tree, it is usually safe to unlock the higher layers
2357 * in the tree. The exceptions are when our path goes through slot 0, because
2358 * operations on the tree might require changing key pointers higher up in the
2361 * callers might also have set path->keep_locks, which tells this code to keep
2362 * the lock if the path points to the last slot in the block. This is part of
2363 * walking through the tree, and selecting the next slot in the higher block.
2365 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2366 * if lowest_unlock is 1, level 0 won't be unlocked
2368 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
2369 int lowest_unlock
, int min_write_lock_level
,
2370 int *write_lock_level
)
2373 int skip_level
= level
;
2375 struct extent_buffer
*t
;
2377 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2378 if (!path
->nodes
[i
])
2380 if (!path
->locks
[i
])
2382 if (!no_skips
&& path
->slots
[i
] == 0) {
2386 if (!no_skips
&& path
->keep_locks
) {
2389 nritems
= btrfs_header_nritems(t
);
2390 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
2395 if (skip_level
< i
&& i
>= lowest_unlock
)
2399 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
2400 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
2402 if (write_lock_level
&&
2403 i
> min_write_lock_level
&&
2404 i
<= *write_lock_level
) {
2405 *write_lock_level
= i
- 1;
2412 * This releases any locks held in the path starting at level and
2413 * going all the way up to the root.
2415 * btrfs_search_slot will keep the lock held on higher nodes in a few
2416 * corner cases, such as COW of the block at slot zero in the node. This
2417 * ignores those rules, and it should only be called when there are no
2418 * more updates to be done higher up in the tree.
2420 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
2424 if (path
->keep_locks
)
2427 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
2428 if (!path
->nodes
[i
])
2430 if (!path
->locks
[i
])
2432 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
2438 * helper function for btrfs_search_slot. The goal is to find a block
2439 * in cache without setting the path to blocking. If we find the block
2440 * we return zero and the path is unchanged.
2442 * If we can't find the block, we set the path blocking and do some
2443 * reada. -EAGAIN is returned and the search must be repeated.
2446 read_block_for_search(struct btrfs_trans_handle
*trans
,
2447 struct btrfs_root
*root
, struct btrfs_path
*p
,
2448 struct extent_buffer
**eb_ret
, int level
, int slot
,
2449 struct btrfs_key
*key
, u64 time_seq
)
2453 struct extent_buffer
*b
= *eb_ret
;
2454 struct extent_buffer
*tmp
;
2457 blocknr
= btrfs_node_blockptr(b
, slot
);
2458 gen
= btrfs_node_ptr_generation(b
, slot
);
2460 tmp
= btrfs_find_tree_block(root
->fs_info
, blocknr
);
2462 /* first we do an atomic uptodate check */
2463 if (btrfs_buffer_uptodate(tmp
, gen
, 1) > 0) {
2468 /* the pages were up to date, but we failed
2469 * the generation number check. Do a full
2470 * read for the generation number that is correct.
2471 * We must do this without dropping locks so
2472 * we can trust our generation number
2474 btrfs_set_path_blocking(p
);
2476 /* now we're allowed to do a blocking uptodate check */
2477 ret
= btrfs_read_buffer(tmp
, gen
);
2482 free_extent_buffer(tmp
);
2483 btrfs_release_path(p
);
2488 * reduce lock contention at high levels
2489 * of the btree by dropping locks before
2490 * we read. Don't release the lock on the current
2491 * level because we need to walk this node to figure
2492 * out which blocks to read.
2494 btrfs_unlock_up_safe(p
, level
+ 1);
2495 btrfs_set_path_blocking(p
);
2497 free_extent_buffer(tmp
);
2498 if (p
->reada
!= READA_NONE
)
2499 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
2501 btrfs_release_path(p
);
2504 tmp
= read_tree_block(root
, blocknr
, 0);
2507 * If the read above didn't mark this buffer up to date,
2508 * it will never end up being up to date. Set ret to EIO now
2509 * and give up so that our caller doesn't loop forever
2512 if (!btrfs_buffer_uptodate(tmp
, 0, 0))
2514 free_extent_buffer(tmp
);
2522 * helper function for btrfs_search_slot. This does all of the checks
2523 * for node-level blocks and does any balancing required based on
2526 * If no extra work was required, zero is returned. If we had to
2527 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2531 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
2532 struct btrfs_root
*root
, struct btrfs_path
*p
,
2533 struct extent_buffer
*b
, int level
, int ins_len
,
2534 int *write_lock_level
)
2537 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
2538 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
2541 if (*write_lock_level
< level
+ 1) {
2542 *write_lock_level
= level
+ 1;
2543 btrfs_release_path(p
);
2547 btrfs_set_path_blocking(p
);
2548 reada_for_balance(root
, p
, level
);
2549 sret
= split_node(trans
, root
, p
, level
);
2550 btrfs_clear_path_blocking(p
, NULL
, 0);
2557 b
= p
->nodes
[level
];
2558 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
2559 BTRFS_NODEPTRS_PER_BLOCK(root
) / 2) {
2562 if (*write_lock_level
< level
+ 1) {
2563 *write_lock_level
= level
+ 1;
2564 btrfs_release_path(p
);
2568 btrfs_set_path_blocking(p
);
2569 reada_for_balance(root
, p
, level
);
2570 sret
= balance_level(trans
, root
, p
, level
);
2571 btrfs_clear_path_blocking(p
, NULL
, 0);
2577 b
= p
->nodes
[level
];
2579 btrfs_release_path(p
);
2582 BUG_ON(btrfs_header_nritems(b
) == 1);
2592 static void key_search_validate(struct extent_buffer
*b
,
2593 struct btrfs_key
*key
,
2596 #ifdef CONFIG_BTRFS_ASSERT
2597 struct btrfs_disk_key disk_key
;
2599 btrfs_cpu_key_to_disk(&disk_key
, key
);
2602 ASSERT(!memcmp_extent_buffer(b
, &disk_key
,
2603 offsetof(struct btrfs_leaf
, items
[0].key
),
2606 ASSERT(!memcmp_extent_buffer(b
, &disk_key
,
2607 offsetof(struct btrfs_node
, ptrs
[0].key
),
2612 static int key_search(struct extent_buffer
*b
, struct btrfs_key
*key
,
2613 int level
, int *prev_cmp
, int *slot
)
2615 if (*prev_cmp
!= 0) {
2616 *prev_cmp
= bin_search(b
, key
, level
, slot
);
2620 key_search_validate(b
, key
, level
);
2626 int btrfs_find_item(struct btrfs_root
*fs_root
, struct btrfs_path
*path
,
2627 u64 iobjectid
, u64 ioff
, u8 key_type
,
2628 struct btrfs_key
*found_key
)
2631 struct btrfs_key key
;
2632 struct extent_buffer
*eb
;
2637 key
.type
= key_type
;
2638 key
.objectid
= iobjectid
;
2641 ret
= btrfs_search_slot(NULL
, fs_root
, &key
, path
, 0, 0);
2645 eb
= path
->nodes
[0];
2646 if (ret
&& path
->slots
[0] >= btrfs_header_nritems(eb
)) {
2647 ret
= btrfs_next_leaf(fs_root
, path
);
2650 eb
= path
->nodes
[0];
2653 btrfs_item_key_to_cpu(eb
, found_key
, path
->slots
[0]);
2654 if (found_key
->type
!= key
.type
||
2655 found_key
->objectid
!= key
.objectid
)
2662 * look for key in the tree. path is filled in with nodes along the way
2663 * if key is found, we return zero and you can find the item in the leaf
2664 * level of the path (level 0)
2666 * If the key isn't found, the path points to the slot where it should
2667 * be inserted, and 1 is returned. If there are other errors during the
2668 * search a negative error number is returned.
2670 * if ins_len > 0, nodes and leaves will be split as we walk down the
2671 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2674 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
2675 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
2678 struct extent_buffer
*b
;
2683 int lowest_unlock
= 1;
2685 /* everything at write_lock_level or lower must be write locked */
2686 int write_lock_level
= 0;
2687 u8 lowest_level
= 0;
2688 int min_write_lock_level
;
2691 lowest_level
= p
->lowest_level
;
2692 WARN_ON(lowest_level
&& ins_len
> 0);
2693 WARN_ON(p
->nodes
[0] != NULL
);
2694 BUG_ON(!cow
&& ins_len
);
2699 /* when we are removing items, we might have to go up to level
2700 * two as we update tree pointers Make sure we keep write
2701 * for those levels as well
2703 write_lock_level
= 2;
2704 } else if (ins_len
> 0) {
2706 * for inserting items, make sure we have a write lock on
2707 * level 1 so we can update keys
2709 write_lock_level
= 1;
2713 write_lock_level
= -1;
2715 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
2716 write_lock_level
= BTRFS_MAX_LEVEL
;
2718 min_write_lock_level
= write_lock_level
;
2723 * we try very hard to do read locks on the root
2725 root_lock
= BTRFS_READ_LOCK
;
2727 if (p
->search_commit_root
) {
2729 * the commit roots are read only
2730 * so we always do read locks
2732 if (p
->need_commit_sem
)
2733 down_read(&root
->fs_info
->commit_root_sem
);
2734 b
= root
->commit_root
;
2735 extent_buffer_get(b
);
2736 level
= btrfs_header_level(b
);
2737 if (p
->need_commit_sem
)
2738 up_read(&root
->fs_info
->commit_root_sem
);
2739 if (!p
->skip_locking
)
2740 btrfs_tree_read_lock(b
);
2742 if (p
->skip_locking
) {
2743 b
= btrfs_root_node(root
);
2744 level
= btrfs_header_level(b
);
2746 /* we don't know the level of the root node
2747 * until we actually have it read locked
2749 b
= btrfs_read_lock_root_node(root
);
2750 level
= btrfs_header_level(b
);
2751 if (level
<= write_lock_level
) {
2752 /* whoops, must trade for write lock */
2753 btrfs_tree_read_unlock(b
);
2754 free_extent_buffer(b
);
2755 b
= btrfs_lock_root_node(root
);
2756 root_lock
= BTRFS_WRITE_LOCK
;
2758 /* the level might have changed, check again */
2759 level
= btrfs_header_level(b
);
2763 p
->nodes
[level
] = b
;
2764 if (!p
->skip_locking
)
2765 p
->locks
[level
] = root_lock
;
2768 level
= btrfs_header_level(b
);
2771 * setup the path here so we can release it under lock
2772 * contention with the cow code
2776 * if we don't really need to cow this block
2777 * then we don't want to set the path blocking,
2778 * so we test it here
2780 if (!should_cow_block(trans
, root
, b
))
2784 * must have write locks on this node and the
2787 if (level
> write_lock_level
||
2788 (level
+ 1 > write_lock_level
&&
2789 level
+ 1 < BTRFS_MAX_LEVEL
&&
2790 p
->nodes
[level
+ 1])) {
2791 write_lock_level
= level
+ 1;
2792 btrfs_release_path(p
);
2796 btrfs_set_path_blocking(p
);
2797 err
= btrfs_cow_block(trans
, root
, b
,
2798 p
->nodes
[level
+ 1],
2799 p
->slots
[level
+ 1], &b
);
2806 p
->nodes
[level
] = b
;
2807 btrfs_clear_path_blocking(p
, NULL
, 0);
2810 * we have a lock on b and as long as we aren't changing
2811 * the tree, there is no way to for the items in b to change.
2812 * It is safe to drop the lock on our parent before we
2813 * go through the expensive btree search on b.
2815 * If we're inserting or deleting (ins_len != 0), then we might
2816 * be changing slot zero, which may require changing the parent.
2817 * So, we can't drop the lock until after we know which slot
2818 * we're operating on.
2820 if (!ins_len
&& !p
->keep_locks
) {
2823 if (u
< BTRFS_MAX_LEVEL
&& p
->locks
[u
]) {
2824 btrfs_tree_unlock_rw(p
->nodes
[u
], p
->locks
[u
]);
2829 ret
= key_search(b
, key
, level
, &prev_cmp
, &slot
);
2833 if (ret
&& slot
> 0) {
2837 p
->slots
[level
] = slot
;
2838 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
2839 ins_len
, &write_lock_level
);
2846 b
= p
->nodes
[level
];
2847 slot
= p
->slots
[level
];
2850 * slot 0 is special, if we change the key
2851 * we have to update the parent pointer
2852 * which means we must have a write lock
2855 if (slot
== 0 && ins_len
&&
2856 write_lock_level
< level
+ 1) {
2857 write_lock_level
= level
+ 1;
2858 btrfs_release_path(p
);
2862 unlock_up(p
, level
, lowest_unlock
,
2863 min_write_lock_level
, &write_lock_level
);
2865 if (level
== lowest_level
) {
2871 err
= read_block_for_search(trans
, root
, p
,
2872 &b
, level
, slot
, key
, 0);
2880 if (!p
->skip_locking
) {
2881 level
= btrfs_header_level(b
);
2882 if (level
<= write_lock_level
) {
2883 err
= btrfs_try_tree_write_lock(b
);
2885 btrfs_set_path_blocking(p
);
2887 btrfs_clear_path_blocking(p
, b
,
2890 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
2892 err
= btrfs_tree_read_lock_atomic(b
);
2894 btrfs_set_path_blocking(p
);
2895 btrfs_tree_read_lock(b
);
2896 btrfs_clear_path_blocking(p
, b
,
2899 p
->locks
[level
] = BTRFS_READ_LOCK
;
2901 p
->nodes
[level
] = b
;
2904 p
->slots
[level
] = slot
;
2906 btrfs_leaf_free_space(root
, b
) < ins_len
) {
2907 if (write_lock_level
< 1) {
2908 write_lock_level
= 1;
2909 btrfs_release_path(p
);
2913 btrfs_set_path_blocking(p
);
2914 err
= split_leaf(trans
, root
, key
,
2915 p
, ins_len
, ret
== 0);
2916 btrfs_clear_path_blocking(p
, NULL
, 0);
2924 if (!p
->search_for_split
)
2925 unlock_up(p
, level
, lowest_unlock
,
2926 min_write_lock_level
, &write_lock_level
);
2933 * we don't really know what they plan on doing with the path
2934 * from here on, so for now just mark it as blocking
2936 if (!p
->leave_spinning
)
2937 btrfs_set_path_blocking(p
);
2938 if (ret
< 0 && !p
->skip_release_on_error
)
2939 btrfs_release_path(p
);
2944 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2945 * current state of the tree together with the operations recorded in the tree
2946 * modification log to search for the key in a previous version of this tree, as
2947 * denoted by the time_seq parameter.
2949 * Naturally, there is no support for insert, delete or cow operations.
2951 * The resulting path and return value will be set up as if we called
2952 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2954 int btrfs_search_old_slot(struct btrfs_root
*root
, struct btrfs_key
*key
,
2955 struct btrfs_path
*p
, u64 time_seq
)
2957 struct extent_buffer
*b
;
2962 int lowest_unlock
= 1;
2963 u8 lowest_level
= 0;
2966 lowest_level
= p
->lowest_level
;
2967 WARN_ON(p
->nodes
[0] != NULL
);
2969 if (p
->search_commit_root
) {
2971 return btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
2975 b
= get_old_root(root
, time_seq
);
2976 level
= btrfs_header_level(b
);
2977 p
->locks
[level
] = BTRFS_READ_LOCK
;
2980 level
= btrfs_header_level(b
);
2981 p
->nodes
[level
] = b
;
2982 btrfs_clear_path_blocking(p
, NULL
, 0);
2985 * we have a lock on b and as long as we aren't changing
2986 * the tree, there is no way to for the items in b to change.
2987 * It is safe to drop the lock on our parent before we
2988 * go through the expensive btree search on b.
2990 btrfs_unlock_up_safe(p
, level
+ 1);
2993 * Since we can unwind ebs we want to do a real search every
2997 ret
= key_search(b
, key
, level
, &prev_cmp
, &slot
);
3001 if (ret
&& slot
> 0) {
3005 p
->slots
[level
] = slot
;
3006 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
3008 if (level
== lowest_level
) {
3014 err
= read_block_for_search(NULL
, root
, p
, &b
, level
,
3015 slot
, key
, time_seq
);
3023 level
= btrfs_header_level(b
);
3024 err
= btrfs_tree_read_lock_atomic(b
);
3026 btrfs_set_path_blocking(p
);
3027 btrfs_tree_read_lock(b
);
3028 btrfs_clear_path_blocking(p
, b
,
3031 b
= tree_mod_log_rewind(root
->fs_info
, p
, b
, time_seq
);
3036 p
->locks
[level
] = BTRFS_READ_LOCK
;
3037 p
->nodes
[level
] = b
;
3039 p
->slots
[level
] = slot
;
3040 unlock_up(p
, level
, lowest_unlock
, 0, NULL
);
3046 if (!p
->leave_spinning
)
3047 btrfs_set_path_blocking(p
);
3049 btrfs_release_path(p
);
3055 * helper to use instead of search slot if no exact match is needed but
3056 * instead the next or previous item should be returned.
3057 * When find_higher is true, the next higher item is returned, the next lower
3059 * When return_any and find_higher are both true, and no higher item is found,
3060 * return the next lower instead.
3061 * When return_any is true and find_higher is false, and no lower item is found,
3062 * return the next higher instead.
3063 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3066 int btrfs_search_slot_for_read(struct btrfs_root
*root
,
3067 struct btrfs_key
*key
, struct btrfs_path
*p
,
3068 int find_higher
, int return_any
)
3071 struct extent_buffer
*leaf
;
3074 ret
= btrfs_search_slot(NULL
, root
, key
, p
, 0, 0);
3078 * a return value of 1 means the path is at the position where the
3079 * item should be inserted. Normally this is the next bigger item,
3080 * but in case the previous item is the last in a leaf, path points
3081 * to the first free slot in the previous leaf, i.e. at an invalid
3087 if (p
->slots
[0] >= btrfs_header_nritems(leaf
)) {
3088 ret
= btrfs_next_leaf(root
, p
);
3094 * no higher item found, return the next
3099 btrfs_release_path(p
);
3103 if (p
->slots
[0] == 0) {
3104 ret
= btrfs_prev_leaf(root
, p
);
3109 if (p
->slots
[0] == btrfs_header_nritems(leaf
))
3116 * no lower item found, return the next
3121 btrfs_release_path(p
);
3131 * adjust the pointers going up the tree, starting at level
3132 * making sure the right key of each node is points to 'key'.
3133 * This is used after shifting pointers to the left, so it stops
3134 * fixing up pointers when a given leaf/node is not in slot 0 of the
3138 static void fixup_low_keys(struct btrfs_fs_info
*fs_info
,
3139 struct btrfs_path
*path
,
3140 struct btrfs_disk_key
*key
, int level
)
3143 struct extent_buffer
*t
;
3145 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
3146 int tslot
= path
->slots
[i
];
3147 if (!path
->nodes
[i
])
3150 tree_mod_log_set_node_key(fs_info
, t
, tslot
, 1);
3151 btrfs_set_node_key(t
, key
, tslot
);
3152 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
3161 * This function isn't completely safe. It's the caller's responsibility
3162 * that the new key won't break the order
3164 void btrfs_set_item_key_safe(struct btrfs_fs_info
*fs_info
,
3165 struct btrfs_path
*path
,
3166 struct btrfs_key
*new_key
)
3168 struct btrfs_disk_key disk_key
;
3169 struct extent_buffer
*eb
;
3172 eb
= path
->nodes
[0];
3173 slot
= path
->slots
[0];
3175 btrfs_item_key(eb
, &disk_key
, slot
- 1);
3176 BUG_ON(comp_keys(&disk_key
, new_key
) >= 0);
3178 if (slot
< btrfs_header_nritems(eb
) - 1) {
3179 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
3180 BUG_ON(comp_keys(&disk_key
, new_key
) <= 0);
3183 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3184 btrfs_set_item_key(eb
, &disk_key
, slot
);
3185 btrfs_mark_buffer_dirty(eb
);
3187 fixup_low_keys(fs_info
, path
, &disk_key
, 1);
3191 * try to push data from one node into the next node left in the
3194 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3195 * error, and > 0 if there was no room in the left hand block.
3197 static int push_node_left(struct btrfs_trans_handle
*trans
,
3198 struct btrfs_root
*root
, struct extent_buffer
*dst
,
3199 struct extent_buffer
*src
, int empty
)
3206 src_nritems
= btrfs_header_nritems(src
);
3207 dst_nritems
= btrfs_header_nritems(dst
);
3208 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
3209 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3210 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3212 if (!empty
&& src_nritems
<= 8)
3215 if (push_items
<= 0)
3219 push_items
= min(src_nritems
, push_items
);
3220 if (push_items
< src_nritems
) {
3221 /* leave at least 8 pointers in the node if
3222 * we aren't going to empty it
3224 if (src_nritems
- push_items
< 8) {
3225 if (push_items
<= 8)
3231 push_items
= min(src_nritems
- 8, push_items
);
3233 ret
= tree_mod_log_eb_copy(root
->fs_info
, dst
, src
, dst_nritems
, 0,
3236 btrfs_abort_transaction(trans
, root
, ret
);
3239 copy_extent_buffer(dst
, src
,
3240 btrfs_node_key_ptr_offset(dst_nritems
),
3241 btrfs_node_key_ptr_offset(0),
3242 push_items
* sizeof(struct btrfs_key_ptr
));
3244 if (push_items
< src_nritems
) {
3246 * don't call tree_mod_log_eb_move here, key removal was already
3247 * fully logged by tree_mod_log_eb_copy above.
3249 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
3250 btrfs_node_key_ptr_offset(push_items
),
3251 (src_nritems
- push_items
) *
3252 sizeof(struct btrfs_key_ptr
));
3254 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3255 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3256 btrfs_mark_buffer_dirty(src
);
3257 btrfs_mark_buffer_dirty(dst
);
3263 * try to push data from one node into the next node right in the
3266 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3267 * error, and > 0 if there was no room in the right hand block.
3269 * this will only push up to 1/2 the contents of the left node over
3271 static int balance_node_right(struct btrfs_trans_handle
*trans
,
3272 struct btrfs_root
*root
,
3273 struct extent_buffer
*dst
,
3274 struct extent_buffer
*src
)
3282 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
3283 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
3285 src_nritems
= btrfs_header_nritems(src
);
3286 dst_nritems
= btrfs_header_nritems(dst
);
3287 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
3288 if (push_items
<= 0)
3291 if (src_nritems
< 4)
3294 max_push
= src_nritems
/ 2 + 1;
3295 /* don't try to empty the node */
3296 if (max_push
>= src_nritems
)
3299 if (max_push
< push_items
)
3300 push_items
= max_push
;
3302 tree_mod_log_eb_move(root
->fs_info
, dst
, push_items
, 0, dst_nritems
);
3303 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
3304 btrfs_node_key_ptr_offset(0),
3306 sizeof(struct btrfs_key_ptr
));
3308 ret
= tree_mod_log_eb_copy(root
->fs_info
, dst
, src
, 0,
3309 src_nritems
- push_items
, push_items
);
3311 btrfs_abort_transaction(trans
, root
, ret
);
3314 copy_extent_buffer(dst
, src
,
3315 btrfs_node_key_ptr_offset(0),
3316 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
3317 push_items
* sizeof(struct btrfs_key_ptr
));
3319 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
3320 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
3322 btrfs_mark_buffer_dirty(src
);
3323 btrfs_mark_buffer_dirty(dst
);
3329 * helper function to insert a new root level in the tree.
3330 * A new node is allocated, and a single item is inserted to
3331 * point to the existing root
3333 * returns zero on success or < 0 on failure.
3335 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
3336 struct btrfs_root
*root
,
3337 struct btrfs_path
*path
, int level
)
3340 struct extent_buffer
*lower
;
3341 struct extent_buffer
*c
;
3342 struct extent_buffer
*old
;
3343 struct btrfs_disk_key lower_key
;
3345 BUG_ON(path
->nodes
[level
]);
3346 BUG_ON(path
->nodes
[level
-1] != root
->node
);
3348 lower
= path
->nodes
[level
-1];
3350 btrfs_item_key(lower
, &lower_key
, 0);
3352 btrfs_node_key(lower
, &lower_key
, 0);
3354 c
= btrfs_alloc_tree_block(trans
, root
, 0, root
->root_key
.objectid
,
3355 &lower_key
, level
, root
->node
->start
, 0);
3359 root_add_used(root
, root
->nodesize
);
3361 memset_extent_buffer(c
, 0, 0, sizeof(struct btrfs_header
));
3362 btrfs_set_header_nritems(c
, 1);
3363 btrfs_set_header_level(c
, level
);
3364 btrfs_set_header_bytenr(c
, c
->start
);
3365 btrfs_set_header_generation(c
, trans
->transid
);
3366 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
3367 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
3369 write_extent_buffer(c
, root
->fs_info
->fsid
, btrfs_header_fsid(),
3372 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
3373 btrfs_header_chunk_tree_uuid(c
), BTRFS_UUID_SIZE
);
3375 btrfs_set_node_key(c
, &lower_key
, 0);
3376 btrfs_set_node_blockptr(c
, 0, lower
->start
);
3377 lower_gen
= btrfs_header_generation(lower
);
3378 WARN_ON(lower_gen
!= trans
->transid
);
3380 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
3382 btrfs_mark_buffer_dirty(c
);
3385 tree_mod_log_set_root_pointer(root
, c
, 0);
3386 rcu_assign_pointer(root
->node
, c
);
3388 /* the super has an extra ref to root->node */
3389 free_extent_buffer(old
);
3391 add_root_to_dirty_list(root
);
3392 extent_buffer_get(c
);
3393 path
->nodes
[level
] = c
;
3394 path
->locks
[level
] = BTRFS_WRITE_LOCK_BLOCKING
;
3395 path
->slots
[level
] = 0;
3400 * worker function to insert a single pointer in a node.
3401 * the node should have enough room for the pointer already
3403 * slot and level indicate where you want the key to go, and
3404 * blocknr is the block the key points to.
3406 static void insert_ptr(struct btrfs_trans_handle
*trans
,
3407 struct btrfs_root
*root
, struct btrfs_path
*path
,
3408 struct btrfs_disk_key
*key
, u64 bytenr
,
3409 int slot
, int level
)
3411 struct extent_buffer
*lower
;
3415 BUG_ON(!path
->nodes
[level
]);
3416 btrfs_assert_tree_locked(path
->nodes
[level
]);
3417 lower
= path
->nodes
[level
];
3418 nritems
= btrfs_header_nritems(lower
);
3419 BUG_ON(slot
> nritems
);
3420 BUG_ON(nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
));
3421 if (slot
!= nritems
) {
3423 tree_mod_log_eb_move(root
->fs_info
, lower
, slot
+ 1,
3424 slot
, nritems
- slot
);
3425 memmove_extent_buffer(lower
,
3426 btrfs_node_key_ptr_offset(slot
+ 1),
3427 btrfs_node_key_ptr_offset(slot
),
3428 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
3431 ret
= tree_mod_log_insert_key(root
->fs_info
, lower
, slot
,
3432 MOD_LOG_KEY_ADD
, GFP_NOFS
);
3435 btrfs_set_node_key(lower
, key
, slot
);
3436 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
3437 WARN_ON(trans
->transid
== 0);
3438 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
3439 btrfs_set_header_nritems(lower
, nritems
+ 1);
3440 btrfs_mark_buffer_dirty(lower
);
3444 * split the node at the specified level in path in two.
3445 * The path is corrected to point to the appropriate node after the split
3447 * Before splitting this tries to make some room in the node by pushing
3448 * left and right, if either one works, it returns right away.
3450 * returns 0 on success and < 0 on failure
3452 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
3453 struct btrfs_root
*root
,
3454 struct btrfs_path
*path
, int level
)
3456 struct extent_buffer
*c
;
3457 struct extent_buffer
*split
;
3458 struct btrfs_disk_key disk_key
;
3463 c
= path
->nodes
[level
];
3464 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
3465 if (c
== root
->node
) {
3467 * trying to split the root, lets make a new one
3469 * tree mod log: We don't log_removal old root in
3470 * insert_new_root, because that root buffer will be kept as a
3471 * normal node. We are going to log removal of half of the
3472 * elements below with tree_mod_log_eb_copy. We're holding a
3473 * tree lock on the buffer, which is why we cannot race with
3474 * other tree_mod_log users.
3476 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
3480 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
3481 c
= path
->nodes
[level
];
3482 if (!ret
&& btrfs_header_nritems(c
) <
3483 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
3489 c_nritems
= btrfs_header_nritems(c
);
3490 mid
= (c_nritems
+ 1) / 2;
3491 btrfs_node_key(c
, &disk_key
, mid
);
3493 split
= btrfs_alloc_tree_block(trans
, root
, 0, root
->root_key
.objectid
,
3494 &disk_key
, level
, c
->start
, 0);
3496 return PTR_ERR(split
);
3498 root_add_used(root
, root
->nodesize
);
3500 memset_extent_buffer(split
, 0, 0, sizeof(struct btrfs_header
));
3501 btrfs_set_header_level(split
, btrfs_header_level(c
));
3502 btrfs_set_header_bytenr(split
, split
->start
);
3503 btrfs_set_header_generation(split
, trans
->transid
);
3504 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
3505 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
3506 write_extent_buffer(split
, root
->fs_info
->fsid
,
3507 btrfs_header_fsid(), BTRFS_FSID_SIZE
);
3508 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
3509 btrfs_header_chunk_tree_uuid(split
),
3512 ret
= tree_mod_log_eb_copy(root
->fs_info
, split
, c
, 0,
3513 mid
, c_nritems
- mid
);
3515 btrfs_abort_transaction(trans
, root
, ret
);
3518 copy_extent_buffer(split
, c
,
3519 btrfs_node_key_ptr_offset(0),
3520 btrfs_node_key_ptr_offset(mid
),
3521 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
3522 btrfs_set_header_nritems(split
, c_nritems
- mid
);
3523 btrfs_set_header_nritems(c
, mid
);
3526 btrfs_mark_buffer_dirty(c
);
3527 btrfs_mark_buffer_dirty(split
);
3529 insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
3530 path
->slots
[level
+ 1] + 1, level
+ 1);
3532 if (path
->slots
[level
] >= mid
) {
3533 path
->slots
[level
] -= mid
;
3534 btrfs_tree_unlock(c
);
3535 free_extent_buffer(c
);
3536 path
->nodes
[level
] = split
;
3537 path
->slots
[level
+ 1] += 1;
3539 btrfs_tree_unlock(split
);
3540 free_extent_buffer(split
);
3546 * how many bytes are required to store the items in a leaf. start
3547 * and nr indicate which items in the leaf to check. This totals up the
3548 * space used both by the item structs and the item data
3550 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
3552 struct btrfs_item
*start_item
;
3553 struct btrfs_item
*end_item
;
3554 struct btrfs_map_token token
;
3556 int nritems
= btrfs_header_nritems(l
);
3557 int end
= min(nritems
, start
+ nr
) - 1;
3561 btrfs_init_map_token(&token
);
3562 start_item
= btrfs_item_nr(start
);
3563 end_item
= btrfs_item_nr(end
);
3564 data_len
= btrfs_token_item_offset(l
, start_item
, &token
) +
3565 btrfs_token_item_size(l
, start_item
, &token
);
3566 data_len
= data_len
- btrfs_token_item_offset(l
, end_item
, &token
);
3567 data_len
+= sizeof(struct btrfs_item
) * nr
;
3568 WARN_ON(data_len
< 0);
3573 * The space between the end of the leaf items and
3574 * the start of the leaf data. IOW, how much room
3575 * the leaf has left for both items and data
3577 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
3578 struct extent_buffer
*leaf
)
3580 int nritems
= btrfs_header_nritems(leaf
);
3582 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
3584 btrfs_crit(root
->fs_info
,
3585 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3586 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
3587 leaf_space_used(leaf
, 0, nritems
), nritems
);
3593 * min slot controls the lowest index we're willing to push to the
3594 * right. We'll push up to and including min_slot, but no lower
3596 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
3597 struct btrfs_root
*root
,
3598 struct btrfs_path
*path
,
3599 int data_size
, int empty
,
3600 struct extent_buffer
*right
,
3601 int free_space
, u32 left_nritems
,
3604 struct extent_buffer
*left
= path
->nodes
[0];
3605 struct extent_buffer
*upper
= path
->nodes
[1];
3606 struct btrfs_map_token token
;
3607 struct btrfs_disk_key disk_key
;
3612 struct btrfs_item
*item
;
3618 btrfs_init_map_token(&token
);
3623 nr
= max_t(u32
, 1, min_slot
);
3625 if (path
->slots
[0] >= left_nritems
)
3626 push_space
+= data_size
;
3628 slot
= path
->slots
[1];
3629 i
= left_nritems
- 1;
3631 item
= btrfs_item_nr(i
);
3633 if (!empty
&& push_items
> 0) {
3634 if (path
->slots
[0] > i
)
3636 if (path
->slots
[0] == i
) {
3637 int space
= btrfs_leaf_free_space(root
, left
);
3638 if (space
+ push_space
* 2 > free_space
)
3643 if (path
->slots
[0] == i
)
3644 push_space
+= data_size
;
3646 this_item_size
= btrfs_item_size(left
, item
);
3647 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3651 push_space
+= this_item_size
+ sizeof(*item
);
3657 if (push_items
== 0)
3660 WARN_ON(!empty
&& push_items
== left_nritems
);
3662 /* push left to right */
3663 right_nritems
= btrfs_header_nritems(right
);
3665 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
3666 push_space
-= leaf_data_end(root
, left
);
3668 /* make room in the right data area */
3669 data_end
= leaf_data_end(root
, right
);
3670 memmove_extent_buffer(right
,
3671 btrfs_leaf_data(right
) + data_end
- push_space
,
3672 btrfs_leaf_data(right
) + data_end
,
3673 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
3675 /* copy from the left data area */
3676 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
3677 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3678 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
3681 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
3682 btrfs_item_nr_offset(0),
3683 right_nritems
* sizeof(struct btrfs_item
));
3685 /* copy the items from left to right */
3686 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
3687 btrfs_item_nr_offset(left_nritems
- push_items
),
3688 push_items
* sizeof(struct btrfs_item
));
3690 /* update the item pointers */
3691 right_nritems
+= push_items
;
3692 btrfs_set_header_nritems(right
, right_nritems
);
3693 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3694 for (i
= 0; i
< right_nritems
; i
++) {
3695 item
= btrfs_item_nr(i
);
3696 push_space
-= btrfs_token_item_size(right
, item
, &token
);
3697 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3700 left_nritems
-= push_items
;
3701 btrfs_set_header_nritems(left
, left_nritems
);
3704 btrfs_mark_buffer_dirty(left
);
3706 clean_tree_block(trans
, root
->fs_info
, left
);
3708 btrfs_mark_buffer_dirty(right
);
3710 btrfs_item_key(right
, &disk_key
, 0);
3711 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
3712 btrfs_mark_buffer_dirty(upper
);
3714 /* then fixup the leaf pointer in the path */
3715 if (path
->slots
[0] >= left_nritems
) {
3716 path
->slots
[0] -= left_nritems
;
3717 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
3718 clean_tree_block(trans
, root
->fs_info
, path
->nodes
[0]);
3719 btrfs_tree_unlock(path
->nodes
[0]);
3720 free_extent_buffer(path
->nodes
[0]);
3721 path
->nodes
[0] = right
;
3722 path
->slots
[1] += 1;
3724 btrfs_tree_unlock(right
);
3725 free_extent_buffer(right
);
3730 btrfs_tree_unlock(right
);
3731 free_extent_buffer(right
);
3736 * push some data in the path leaf to the right, trying to free up at
3737 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3739 * returns 1 if the push failed because the other node didn't have enough
3740 * room, 0 if everything worked out and < 0 if there were major errors.
3742 * this will push starting from min_slot to the end of the leaf. It won't
3743 * push any slot lower than min_slot
3745 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
3746 *root
, struct btrfs_path
*path
,
3747 int min_data_size
, int data_size
,
3748 int empty
, u32 min_slot
)
3750 struct extent_buffer
*left
= path
->nodes
[0];
3751 struct extent_buffer
*right
;
3752 struct extent_buffer
*upper
;
3758 if (!path
->nodes
[1])
3761 slot
= path
->slots
[1];
3762 upper
= path
->nodes
[1];
3763 if (slot
>= btrfs_header_nritems(upper
) - 1)
3766 btrfs_assert_tree_locked(path
->nodes
[1]);
3768 right
= read_node_slot(root
, upper
, slot
+ 1);
3772 btrfs_tree_lock(right
);
3773 btrfs_set_lock_blocking(right
);
3775 free_space
= btrfs_leaf_free_space(root
, right
);
3776 if (free_space
< data_size
)
3779 /* cow and double check */
3780 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
3785 free_space
= btrfs_leaf_free_space(root
, right
);
3786 if (free_space
< data_size
)
3789 left_nritems
= btrfs_header_nritems(left
);
3790 if (left_nritems
== 0)
3793 if (path
->slots
[0] == left_nritems
&& !empty
) {
3794 /* Key greater than all keys in the leaf, right neighbor has
3795 * enough room for it and we're not emptying our leaf to delete
3796 * it, therefore use right neighbor to insert the new item and
3797 * no need to touch/dirty our left leaft. */
3798 btrfs_tree_unlock(left
);
3799 free_extent_buffer(left
);
3800 path
->nodes
[0] = right
;
3806 return __push_leaf_right(trans
, root
, path
, min_data_size
, empty
,
3807 right
, free_space
, left_nritems
, min_slot
);
3809 btrfs_tree_unlock(right
);
3810 free_extent_buffer(right
);
3815 * push some data in the path leaf to the left, trying to free up at
3816 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3818 * max_slot can put a limit on how far into the leaf we'll push items. The
3819 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3822 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
3823 struct btrfs_root
*root
,
3824 struct btrfs_path
*path
, int data_size
,
3825 int empty
, struct extent_buffer
*left
,
3826 int free_space
, u32 right_nritems
,
3829 struct btrfs_disk_key disk_key
;
3830 struct extent_buffer
*right
= path
->nodes
[0];
3834 struct btrfs_item
*item
;
3835 u32 old_left_nritems
;
3839 u32 old_left_item_size
;
3840 struct btrfs_map_token token
;
3842 btrfs_init_map_token(&token
);
3845 nr
= min(right_nritems
, max_slot
);
3847 nr
= min(right_nritems
- 1, max_slot
);
3849 for (i
= 0; i
< nr
; i
++) {
3850 item
= btrfs_item_nr(i
);
3852 if (!empty
&& push_items
> 0) {
3853 if (path
->slots
[0] < i
)
3855 if (path
->slots
[0] == i
) {
3856 int space
= btrfs_leaf_free_space(root
, right
);
3857 if (space
+ push_space
* 2 > free_space
)
3862 if (path
->slots
[0] == i
)
3863 push_space
+= data_size
;
3865 this_item_size
= btrfs_item_size(right
, item
);
3866 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
3870 push_space
+= this_item_size
+ sizeof(*item
);
3873 if (push_items
== 0) {
3877 WARN_ON(!empty
&& push_items
== btrfs_header_nritems(right
));
3879 /* push data from right to left */
3880 copy_extent_buffer(left
, right
,
3881 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
3882 btrfs_item_nr_offset(0),
3883 push_items
* sizeof(struct btrfs_item
));
3885 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
3886 btrfs_item_offset_nr(right
, push_items
- 1);
3888 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
3889 leaf_data_end(root
, left
) - push_space
,
3890 btrfs_leaf_data(right
) +
3891 btrfs_item_offset_nr(right
, push_items
- 1),
3893 old_left_nritems
= btrfs_header_nritems(left
);
3894 BUG_ON(old_left_nritems
<= 0);
3896 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
3897 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
3900 item
= btrfs_item_nr(i
);
3902 ioff
= btrfs_token_item_offset(left
, item
, &token
);
3903 btrfs_set_token_item_offset(left
, item
,
3904 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
),
3907 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
3909 /* fixup right node */
3910 if (push_items
> right_nritems
)
3911 WARN(1, KERN_CRIT
"push items %d nr %u\n", push_items
,
3914 if (push_items
< right_nritems
) {
3915 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
3916 leaf_data_end(root
, right
);
3917 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
3918 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
3919 btrfs_leaf_data(right
) +
3920 leaf_data_end(root
, right
), push_space
);
3922 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
3923 btrfs_item_nr_offset(push_items
),
3924 (btrfs_header_nritems(right
) - push_items
) *
3925 sizeof(struct btrfs_item
));
3927 right_nritems
-= push_items
;
3928 btrfs_set_header_nritems(right
, right_nritems
);
3929 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
3930 for (i
= 0; i
< right_nritems
; i
++) {
3931 item
= btrfs_item_nr(i
);
3933 push_space
= push_space
- btrfs_token_item_size(right
,
3935 btrfs_set_token_item_offset(right
, item
, push_space
, &token
);
3938 btrfs_mark_buffer_dirty(left
);
3940 btrfs_mark_buffer_dirty(right
);
3942 clean_tree_block(trans
, root
->fs_info
, right
);
3944 btrfs_item_key(right
, &disk_key
, 0);
3945 fixup_low_keys(root
->fs_info
, path
, &disk_key
, 1);
3947 /* then fixup the leaf pointer in the path */
3948 if (path
->slots
[0] < push_items
) {
3949 path
->slots
[0] += old_left_nritems
;
3950 btrfs_tree_unlock(path
->nodes
[0]);
3951 free_extent_buffer(path
->nodes
[0]);
3952 path
->nodes
[0] = left
;
3953 path
->slots
[1] -= 1;
3955 btrfs_tree_unlock(left
);
3956 free_extent_buffer(left
);
3957 path
->slots
[0] -= push_items
;
3959 BUG_ON(path
->slots
[0] < 0);
3962 btrfs_tree_unlock(left
);
3963 free_extent_buffer(left
);
3968 * push some data in the path leaf to the left, trying to free up at
3969 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3971 * max_slot can put a limit on how far into the leaf we'll push items. The
3972 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3975 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
3976 *root
, struct btrfs_path
*path
, int min_data_size
,
3977 int data_size
, int empty
, u32 max_slot
)
3979 struct extent_buffer
*right
= path
->nodes
[0];
3980 struct extent_buffer
*left
;
3986 slot
= path
->slots
[1];
3989 if (!path
->nodes
[1])
3992 right_nritems
= btrfs_header_nritems(right
);
3993 if (right_nritems
== 0)
3996 btrfs_assert_tree_locked(path
->nodes
[1]);
3998 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
4002 btrfs_tree_lock(left
);
4003 btrfs_set_lock_blocking(left
);
4005 free_space
= btrfs_leaf_free_space(root
, left
);
4006 if (free_space
< data_size
) {
4011 /* cow and double check */
4012 ret
= btrfs_cow_block(trans
, root
, left
,
4013 path
->nodes
[1], slot
- 1, &left
);
4015 /* we hit -ENOSPC, but it isn't fatal here */
4021 free_space
= btrfs_leaf_free_space(root
, left
);
4022 if (free_space
< data_size
) {
4027 return __push_leaf_left(trans
, root
, path
, min_data_size
,
4028 empty
, left
, free_space
, right_nritems
,
4031 btrfs_tree_unlock(left
);
4032 free_extent_buffer(left
);
4037 * split the path's leaf in two, making sure there is at least data_size
4038 * available for the resulting leaf level of the path.
4040 static noinline
void copy_for_split(struct btrfs_trans_handle
*trans
,
4041 struct btrfs_root
*root
,
4042 struct btrfs_path
*path
,
4043 struct extent_buffer
*l
,
4044 struct extent_buffer
*right
,
4045 int slot
, int mid
, int nritems
)
4050 struct btrfs_disk_key disk_key
;
4051 struct btrfs_map_token token
;
4053 btrfs_init_map_token(&token
);
4055 nritems
= nritems
- mid
;
4056 btrfs_set_header_nritems(right
, nritems
);
4057 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
4059 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
4060 btrfs_item_nr_offset(mid
),
4061 nritems
* sizeof(struct btrfs_item
));
4063 copy_extent_buffer(right
, l
,
4064 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
4065 data_copy_size
, btrfs_leaf_data(l
) +
4066 leaf_data_end(root
, l
), data_copy_size
);
4068 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
4069 btrfs_item_end_nr(l
, mid
);
4071 for (i
= 0; i
< nritems
; i
++) {
4072 struct btrfs_item
*item
= btrfs_item_nr(i
);
4075 ioff
= btrfs_token_item_offset(right
, item
, &token
);
4076 btrfs_set_token_item_offset(right
, item
,
4077 ioff
+ rt_data_off
, &token
);
4080 btrfs_set_header_nritems(l
, mid
);
4081 btrfs_item_key(right
, &disk_key
, 0);
4082 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
4083 path
->slots
[1] + 1, 1);
4085 btrfs_mark_buffer_dirty(right
);
4086 btrfs_mark_buffer_dirty(l
);
4087 BUG_ON(path
->slots
[0] != slot
);
4090 btrfs_tree_unlock(path
->nodes
[0]);
4091 free_extent_buffer(path
->nodes
[0]);
4092 path
->nodes
[0] = right
;
4093 path
->slots
[0] -= mid
;
4094 path
->slots
[1] += 1;
4096 btrfs_tree_unlock(right
);
4097 free_extent_buffer(right
);
4100 BUG_ON(path
->slots
[0] < 0);
4104 * double splits happen when we need to insert a big item in the middle
4105 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4106 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4109 * We avoid this by trying to push the items on either side of our target
4110 * into the adjacent leaves. If all goes well we can avoid the double split
4113 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
4114 struct btrfs_root
*root
,
4115 struct btrfs_path
*path
,
4122 int space_needed
= data_size
;
4124 slot
= path
->slots
[0];
4125 if (slot
< btrfs_header_nritems(path
->nodes
[0]))
4126 space_needed
-= btrfs_leaf_free_space(root
, path
->nodes
[0]);
4129 * try to push all the items after our slot into the
4132 ret
= push_leaf_right(trans
, root
, path
, 1, space_needed
, 0, slot
);
4139 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4141 * our goal is to get our slot at the start or end of a leaf. If
4142 * we've done so we're done
4144 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
4147 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
4150 /* try to push all the items before our slot into the next leaf */
4151 slot
= path
->slots
[0];
4152 ret
= push_leaf_left(trans
, root
, path
, 1, space_needed
, 0, slot
);
4165 * split the path's leaf in two, making sure there is at least data_size
4166 * available for the resulting leaf level of the path.
4168 * returns 0 if all went well and < 0 on failure.
4170 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
4171 struct btrfs_root
*root
,
4172 struct btrfs_key
*ins_key
,
4173 struct btrfs_path
*path
, int data_size
,
4176 struct btrfs_disk_key disk_key
;
4177 struct extent_buffer
*l
;
4181 struct extent_buffer
*right
;
4182 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4186 int num_doubles
= 0;
4187 int tried_avoid_double
= 0;
4190 slot
= path
->slots
[0];
4191 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
4192 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(root
))
4195 /* first try to make some room by pushing left and right */
4196 if (data_size
&& path
->nodes
[1]) {
4197 int space_needed
= data_size
;
4199 if (slot
< btrfs_header_nritems(l
))
4200 space_needed
-= btrfs_leaf_free_space(root
, l
);
4202 wret
= push_leaf_right(trans
, root
, path
, space_needed
,
4203 space_needed
, 0, 0);
4207 wret
= push_leaf_left(trans
, root
, path
, space_needed
,
4208 space_needed
, 0, (u32
)-1);
4214 /* did the pushes work? */
4215 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
4219 if (!path
->nodes
[1]) {
4220 ret
= insert_new_root(trans
, root
, path
, 1);
4227 slot
= path
->slots
[0];
4228 nritems
= btrfs_header_nritems(l
);
4229 mid
= (nritems
+ 1) / 2;
4233 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
4234 BTRFS_LEAF_DATA_SIZE(root
)) {
4235 if (slot
>= nritems
) {
4239 if (mid
!= nritems
&&
4240 leaf_space_used(l
, mid
, nritems
- mid
) +
4241 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
4242 if (data_size
&& !tried_avoid_double
)
4243 goto push_for_double
;
4249 if (leaf_space_used(l
, 0, mid
) + data_size
>
4250 BTRFS_LEAF_DATA_SIZE(root
)) {
4251 if (!extend
&& data_size
&& slot
== 0) {
4253 } else if ((extend
|| !data_size
) && slot
== 0) {
4257 if (mid
!= nritems
&&
4258 leaf_space_used(l
, mid
, nritems
- mid
) +
4259 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
4260 if (data_size
&& !tried_avoid_double
)
4261 goto push_for_double
;
4269 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
4271 btrfs_item_key(l
, &disk_key
, mid
);
4273 right
= btrfs_alloc_tree_block(trans
, root
, 0, root
->root_key
.objectid
,
4274 &disk_key
, 0, l
->start
, 0);
4276 return PTR_ERR(right
);
4278 root_add_used(root
, root
->nodesize
);
4280 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
4281 btrfs_set_header_bytenr(right
, right
->start
);
4282 btrfs_set_header_generation(right
, trans
->transid
);
4283 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
4284 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
4285 btrfs_set_header_level(right
, 0);
4286 write_extent_buffer(right
, fs_info
->fsid
,
4287 btrfs_header_fsid(), BTRFS_FSID_SIZE
);
4289 write_extent_buffer(right
, fs_info
->chunk_tree_uuid
,
4290 btrfs_header_chunk_tree_uuid(right
),
4295 btrfs_set_header_nritems(right
, 0);
4296 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
4297 path
->slots
[1] + 1, 1);
4298 btrfs_tree_unlock(path
->nodes
[0]);
4299 free_extent_buffer(path
->nodes
[0]);
4300 path
->nodes
[0] = right
;
4302 path
->slots
[1] += 1;
4304 btrfs_set_header_nritems(right
, 0);
4305 insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
4307 btrfs_tree_unlock(path
->nodes
[0]);
4308 free_extent_buffer(path
->nodes
[0]);
4309 path
->nodes
[0] = right
;
4311 if (path
->slots
[1] == 0)
4312 fixup_low_keys(fs_info
, path
, &disk_key
, 1);
4314 btrfs_mark_buffer_dirty(right
);
4318 copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
4321 BUG_ON(num_doubles
!= 0);
4329 push_for_double_split(trans
, root
, path
, data_size
);
4330 tried_avoid_double
= 1;
4331 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
4336 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
4337 struct btrfs_root
*root
,
4338 struct btrfs_path
*path
, int ins_len
)
4340 struct btrfs_key key
;
4341 struct extent_buffer
*leaf
;
4342 struct btrfs_file_extent_item
*fi
;
4347 leaf
= path
->nodes
[0];
4348 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4350 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
4351 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
4353 if (btrfs_leaf_free_space(root
, leaf
) >= ins_len
)
4356 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4357 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4358 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4359 struct btrfs_file_extent_item
);
4360 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
4362 btrfs_release_path(path
);
4364 path
->keep_locks
= 1;
4365 path
->search_for_split
= 1;
4366 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
4367 path
->search_for_split
= 0;
4374 leaf
= path
->nodes
[0];
4375 /* if our item isn't there, return now */
4376 if (item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
4379 /* the leaf has changed, it now has room. return now */
4380 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= ins_len
)
4383 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
4384 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4385 struct btrfs_file_extent_item
);
4386 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
4390 btrfs_set_path_blocking(path
);
4391 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
4395 path
->keep_locks
= 0;
4396 btrfs_unlock_up_safe(path
, 1);
4399 path
->keep_locks
= 0;
4403 static noinline
int split_item(struct btrfs_trans_handle
*trans
,
4404 struct btrfs_root
*root
,
4405 struct btrfs_path
*path
,
4406 struct btrfs_key
*new_key
,
4407 unsigned long split_offset
)
4409 struct extent_buffer
*leaf
;
4410 struct btrfs_item
*item
;
4411 struct btrfs_item
*new_item
;
4417 struct btrfs_disk_key disk_key
;
4419 leaf
= path
->nodes
[0];
4420 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
4422 btrfs_set_path_blocking(path
);
4424 item
= btrfs_item_nr(path
->slots
[0]);
4425 orig_offset
= btrfs_item_offset(leaf
, item
);
4426 item_size
= btrfs_item_size(leaf
, item
);
4428 buf
= kmalloc(item_size
, GFP_NOFS
);
4432 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
4433 path
->slots
[0]), item_size
);
4435 slot
= path
->slots
[0] + 1;
4436 nritems
= btrfs_header_nritems(leaf
);
4437 if (slot
!= nritems
) {
4438 /* shift the items */
4439 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
4440 btrfs_item_nr_offset(slot
),
4441 (nritems
- slot
) * sizeof(struct btrfs_item
));
4444 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
4445 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4447 new_item
= btrfs_item_nr(slot
);
4449 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
4450 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
4452 btrfs_set_item_offset(leaf
, item
,
4453 orig_offset
+ item_size
- split_offset
);
4454 btrfs_set_item_size(leaf
, item
, split_offset
);
4456 btrfs_set_header_nritems(leaf
, nritems
+ 1);
4458 /* write the data for the start of the original item */
4459 write_extent_buffer(leaf
, buf
,
4460 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4463 /* write the data for the new item */
4464 write_extent_buffer(leaf
, buf
+ split_offset
,
4465 btrfs_item_ptr_offset(leaf
, slot
),
4466 item_size
- split_offset
);
4467 btrfs_mark_buffer_dirty(leaf
);
4469 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < 0);
4475 * This function splits a single item into two items,
4476 * giving 'new_key' to the new item and splitting the
4477 * old one at split_offset (from the start of the item).
4479 * The path may be released by this operation. After
4480 * the split, the path is pointing to the old item. The
4481 * new item is going to be in the same node as the old one.
4483 * Note, the item being split must be smaller enough to live alone on
4484 * a tree block with room for one extra struct btrfs_item
4486 * This allows us to split the item in place, keeping a lock on the
4487 * leaf the entire time.
4489 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
4490 struct btrfs_root
*root
,
4491 struct btrfs_path
*path
,
4492 struct btrfs_key
*new_key
,
4493 unsigned long split_offset
)
4496 ret
= setup_leaf_for_split(trans
, root
, path
,
4497 sizeof(struct btrfs_item
));
4501 ret
= split_item(trans
, root
, path
, new_key
, split_offset
);
4506 * This function duplicate a item, giving 'new_key' to the new item.
4507 * It guarantees both items live in the same tree leaf and the new item
4508 * is contiguous with the original item.
4510 * This allows us to split file extent in place, keeping a lock on the
4511 * leaf the entire time.
4513 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
4514 struct btrfs_root
*root
,
4515 struct btrfs_path
*path
,
4516 struct btrfs_key
*new_key
)
4518 struct extent_buffer
*leaf
;
4522 leaf
= path
->nodes
[0];
4523 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
4524 ret
= setup_leaf_for_split(trans
, root
, path
,
4525 item_size
+ sizeof(struct btrfs_item
));
4530 setup_items_for_insert(root
, path
, new_key
, &item_size
,
4531 item_size
, item_size
+
4532 sizeof(struct btrfs_item
), 1);
4533 leaf
= path
->nodes
[0];
4534 memcpy_extent_buffer(leaf
,
4535 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
4536 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
4542 * make the item pointed to by the path smaller. new_size indicates
4543 * how small to make it, and from_end tells us if we just chop bytes
4544 * off the end of the item or if we shift the item to chop bytes off
4547 void btrfs_truncate_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
4548 u32 new_size
, int from_end
)
4551 struct extent_buffer
*leaf
;
4552 struct btrfs_item
*item
;
4554 unsigned int data_end
;
4555 unsigned int old_data_start
;
4556 unsigned int old_size
;
4557 unsigned int size_diff
;
4559 struct btrfs_map_token token
;
4561 btrfs_init_map_token(&token
);
4563 leaf
= path
->nodes
[0];
4564 slot
= path
->slots
[0];
4566 old_size
= btrfs_item_size_nr(leaf
, slot
);
4567 if (old_size
== new_size
)
4570 nritems
= btrfs_header_nritems(leaf
);
4571 data_end
= leaf_data_end(root
, leaf
);
4573 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
4575 size_diff
= old_size
- new_size
;
4578 BUG_ON(slot
>= nritems
);
4581 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4583 /* first correct the data pointers */
4584 for (i
= slot
; i
< nritems
; i
++) {
4586 item
= btrfs_item_nr(i
);
4588 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4589 btrfs_set_token_item_offset(leaf
, item
,
4590 ioff
+ size_diff
, &token
);
4593 /* shift the data */
4595 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4596 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4597 data_end
, old_data_start
+ new_size
- data_end
);
4599 struct btrfs_disk_key disk_key
;
4602 btrfs_item_key(leaf
, &disk_key
, slot
);
4604 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
4606 struct btrfs_file_extent_item
*fi
;
4608 fi
= btrfs_item_ptr(leaf
, slot
,
4609 struct btrfs_file_extent_item
);
4610 fi
= (struct btrfs_file_extent_item
*)(
4611 (unsigned long)fi
- size_diff
);
4613 if (btrfs_file_extent_type(leaf
, fi
) ==
4614 BTRFS_FILE_EXTENT_INLINE
) {
4615 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
4616 memmove_extent_buffer(leaf
, ptr
,
4618 BTRFS_FILE_EXTENT_INLINE_DATA_START
);
4622 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4623 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
4624 data_end
, old_data_start
- data_end
);
4626 offset
= btrfs_disk_key_offset(&disk_key
);
4627 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
4628 btrfs_set_item_key(leaf
, &disk_key
, slot
);
4630 fixup_low_keys(root
->fs_info
, path
, &disk_key
, 1);
4633 item
= btrfs_item_nr(slot
);
4634 btrfs_set_item_size(leaf
, item
, new_size
);
4635 btrfs_mark_buffer_dirty(leaf
);
4637 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4638 btrfs_print_leaf(root
, leaf
);
4644 * make the item pointed to by the path bigger, data_size is the added size.
4646 void btrfs_extend_item(struct btrfs_root
*root
, struct btrfs_path
*path
,
4650 struct extent_buffer
*leaf
;
4651 struct btrfs_item
*item
;
4653 unsigned int data_end
;
4654 unsigned int old_data
;
4655 unsigned int old_size
;
4657 struct btrfs_map_token token
;
4659 btrfs_init_map_token(&token
);
4661 leaf
= path
->nodes
[0];
4663 nritems
= btrfs_header_nritems(leaf
);
4664 data_end
= leaf_data_end(root
, leaf
);
4666 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
4667 btrfs_print_leaf(root
, leaf
);
4670 slot
= path
->slots
[0];
4671 old_data
= btrfs_item_end_nr(leaf
, slot
);
4674 if (slot
>= nritems
) {
4675 btrfs_print_leaf(root
, leaf
);
4676 btrfs_crit(root
->fs_info
, "slot %d too large, nritems %d",
4682 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4684 /* first correct the data pointers */
4685 for (i
= slot
; i
< nritems
; i
++) {
4687 item
= btrfs_item_nr(i
);
4689 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4690 btrfs_set_token_item_offset(leaf
, item
,
4691 ioff
- data_size
, &token
);
4694 /* shift the data */
4695 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4696 data_end
- data_size
, btrfs_leaf_data(leaf
) +
4697 data_end
, old_data
- data_end
);
4699 data_end
= old_data
;
4700 old_size
= btrfs_item_size_nr(leaf
, slot
);
4701 item
= btrfs_item_nr(slot
);
4702 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
4703 btrfs_mark_buffer_dirty(leaf
);
4705 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4706 btrfs_print_leaf(root
, leaf
);
4712 * this is a helper for btrfs_insert_empty_items, the main goal here is
4713 * to save stack depth by doing the bulk of the work in a function
4714 * that doesn't call btrfs_search_slot
4716 void setup_items_for_insert(struct btrfs_root
*root
, struct btrfs_path
*path
,
4717 struct btrfs_key
*cpu_key
, u32
*data_size
,
4718 u32 total_data
, u32 total_size
, int nr
)
4720 struct btrfs_item
*item
;
4723 unsigned int data_end
;
4724 struct btrfs_disk_key disk_key
;
4725 struct extent_buffer
*leaf
;
4727 struct btrfs_map_token token
;
4729 if (path
->slots
[0] == 0) {
4730 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
4731 fixup_low_keys(root
->fs_info
, path
, &disk_key
, 1);
4733 btrfs_unlock_up_safe(path
, 1);
4735 btrfs_init_map_token(&token
);
4737 leaf
= path
->nodes
[0];
4738 slot
= path
->slots
[0];
4740 nritems
= btrfs_header_nritems(leaf
);
4741 data_end
= leaf_data_end(root
, leaf
);
4743 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
4744 btrfs_print_leaf(root
, leaf
);
4745 btrfs_crit(root
->fs_info
, "not enough freespace need %u have %d",
4746 total_size
, btrfs_leaf_free_space(root
, leaf
));
4750 if (slot
!= nritems
) {
4751 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
4753 if (old_data
< data_end
) {
4754 btrfs_print_leaf(root
, leaf
);
4755 btrfs_crit(root
->fs_info
, "slot %d old_data %d data_end %d",
4756 slot
, old_data
, data_end
);
4760 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4762 /* first correct the data pointers */
4763 for (i
= slot
; i
< nritems
; i
++) {
4766 item
= btrfs_item_nr( i
);
4767 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4768 btrfs_set_token_item_offset(leaf
, item
,
4769 ioff
- total_data
, &token
);
4771 /* shift the items */
4772 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
4773 btrfs_item_nr_offset(slot
),
4774 (nritems
- slot
) * sizeof(struct btrfs_item
));
4776 /* shift the data */
4777 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4778 data_end
- total_data
, btrfs_leaf_data(leaf
) +
4779 data_end
, old_data
- data_end
);
4780 data_end
= old_data
;
4783 /* setup the item for the new data */
4784 for (i
= 0; i
< nr
; i
++) {
4785 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
4786 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
4787 item
= btrfs_item_nr(slot
+ i
);
4788 btrfs_set_token_item_offset(leaf
, item
,
4789 data_end
- data_size
[i
], &token
);
4790 data_end
-= data_size
[i
];
4791 btrfs_set_token_item_size(leaf
, item
, data_size
[i
], &token
);
4794 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
4795 btrfs_mark_buffer_dirty(leaf
);
4797 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
4798 btrfs_print_leaf(root
, leaf
);
4804 * Given a key and some data, insert items into the tree.
4805 * This does all the path init required, making room in the tree if needed.
4807 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
4808 struct btrfs_root
*root
,
4809 struct btrfs_path
*path
,
4810 struct btrfs_key
*cpu_key
, u32
*data_size
,
4819 for (i
= 0; i
< nr
; i
++)
4820 total_data
+= data_size
[i
];
4822 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
4823 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
4829 slot
= path
->slots
[0];
4832 setup_items_for_insert(root
, path
, cpu_key
, data_size
,
4833 total_data
, total_size
, nr
);
4838 * Given a key and some data, insert an item into the tree.
4839 * This does all the path init required, making room in the tree if needed.
4841 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
4842 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
4846 struct btrfs_path
*path
;
4847 struct extent_buffer
*leaf
;
4850 path
= btrfs_alloc_path();
4853 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
4855 leaf
= path
->nodes
[0];
4856 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
4857 write_extent_buffer(leaf
, data
, ptr
, data_size
);
4858 btrfs_mark_buffer_dirty(leaf
);
4860 btrfs_free_path(path
);
4865 * delete the pointer from a given node.
4867 * the tree should have been previously balanced so the deletion does not
4870 static void del_ptr(struct btrfs_root
*root
, struct btrfs_path
*path
,
4871 int level
, int slot
)
4873 struct extent_buffer
*parent
= path
->nodes
[level
];
4877 nritems
= btrfs_header_nritems(parent
);
4878 if (slot
!= nritems
- 1) {
4880 tree_mod_log_eb_move(root
->fs_info
, parent
, slot
,
4881 slot
+ 1, nritems
- slot
- 1);
4882 memmove_extent_buffer(parent
,
4883 btrfs_node_key_ptr_offset(slot
),
4884 btrfs_node_key_ptr_offset(slot
+ 1),
4885 sizeof(struct btrfs_key_ptr
) *
4886 (nritems
- slot
- 1));
4888 ret
= tree_mod_log_insert_key(root
->fs_info
, parent
, slot
,
4889 MOD_LOG_KEY_REMOVE
, GFP_NOFS
);
4894 btrfs_set_header_nritems(parent
, nritems
);
4895 if (nritems
== 0 && parent
== root
->node
) {
4896 BUG_ON(btrfs_header_level(root
->node
) != 1);
4897 /* just turn the root into a leaf and break */
4898 btrfs_set_header_level(root
->node
, 0);
4899 } else if (slot
== 0) {
4900 struct btrfs_disk_key disk_key
;
4902 btrfs_node_key(parent
, &disk_key
, 0);
4903 fixup_low_keys(root
->fs_info
, path
, &disk_key
, level
+ 1);
4905 btrfs_mark_buffer_dirty(parent
);
4909 * a helper function to delete the leaf pointed to by path->slots[1] and
4912 * This deletes the pointer in path->nodes[1] and frees the leaf
4913 * block extent. zero is returned if it all worked out, < 0 otherwise.
4915 * The path must have already been setup for deleting the leaf, including
4916 * all the proper balancing. path->nodes[1] must be locked.
4918 static noinline
void btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
4919 struct btrfs_root
*root
,
4920 struct btrfs_path
*path
,
4921 struct extent_buffer
*leaf
)
4923 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
4924 del_ptr(root
, path
, 1, path
->slots
[1]);
4927 * btrfs_free_extent is expensive, we want to make sure we
4928 * aren't holding any locks when we call it
4930 btrfs_unlock_up_safe(path
, 0);
4932 root_sub_used(root
, leaf
->len
);
4934 extent_buffer_get(leaf
);
4935 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1);
4936 free_extent_buffer_stale(leaf
);
4939 * delete the item at the leaf level in path. If that empties
4940 * the leaf, remove it from the tree
4942 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
4943 struct btrfs_path
*path
, int slot
, int nr
)
4945 struct extent_buffer
*leaf
;
4946 struct btrfs_item
*item
;
4953 struct btrfs_map_token token
;
4955 btrfs_init_map_token(&token
);
4957 leaf
= path
->nodes
[0];
4958 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
4960 for (i
= 0; i
< nr
; i
++)
4961 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
4963 nritems
= btrfs_header_nritems(leaf
);
4965 if (slot
+ nr
!= nritems
) {
4966 int data_end
= leaf_data_end(root
, leaf
);
4968 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
4970 btrfs_leaf_data(leaf
) + data_end
,
4971 last_off
- data_end
);
4973 for (i
= slot
+ nr
; i
< nritems
; i
++) {
4976 item
= btrfs_item_nr(i
);
4977 ioff
= btrfs_token_item_offset(leaf
, item
, &token
);
4978 btrfs_set_token_item_offset(leaf
, item
,
4979 ioff
+ dsize
, &token
);
4982 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
4983 btrfs_item_nr_offset(slot
+ nr
),
4984 sizeof(struct btrfs_item
) *
4985 (nritems
- slot
- nr
));
4987 btrfs_set_header_nritems(leaf
, nritems
- nr
);
4990 /* delete the leaf if we've emptied it */
4992 if (leaf
== root
->node
) {
4993 btrfs_set_header_level(leaf
, 0);
4995 btrfs_set_path_blocking(path
);
4996 clean_tree_block(trans
, root
->fs_info
, leaf
);
4997 btrfs_del_leaf(trans
, root
, path
, leaf
);
5000 int used
= leaf_space_used(leaf
, 0, nritems
);
5002 struct btrfs_disk_key disk_key
;
5004 btrfs_item_key(leaf
, &disk_key
, 0);
5005 fixup_low_keys(root
->fs_info
, path
, &disk_key
, 1);
5008 /* delete the leaf if it is mostly empty */
5009 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 3) {
5010 /* push_leaf_left fixes the path.
5011 * make sure the path still points to our leaf
5012 * for possible call to del_ptr below
5014 slot
= path
->slots
[1];
5015 extent_buffer_get(leaf
);
5017 btrfs_set_path_blocking(path
);
5018 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
5020 if (wret
< 0 && wret
!= -ENOSPC
)
5023 if (path
->nodes
[0] == leaf
&&
5024 btrfs_header_nritems(leaf
)) {
5025 wret
= push_leaf_right(trans
, root
, path
, 1,
5027 if (wret
< 0 && wret
!= -ENOSPC
)
5031 if (btrfs_header_nritems(leaf
) == 0) {
5032 path
->slots
[1] = slot
;
5033 btrfs_del_leaf(trans
, root
, path
, leaf
);
5034 free_extent_buffer(leaf
);
5037 /* if we're still in the path, make sure
5038 * we're dirty. Otherwise, one of the
5039 * push_leaf functions must have already
5040 * dirtied this buffer
5042 if (path
->nodes
[0] == leaf
)
5043 btrfs_mark_buffer_dirty(leaf
);
5044 free_extent_buffer(leaf
);
5047 btrfs_mark_buffer_dirty(leaf
);
5054 * search the tree again to find a leaf with lesser keys
5055 * returns 0 if it found something or 1 if there are no lesser leaves.
5056 * returns < 0 on io errors.
5058 * This may release the path, and so you may lose any locks held at the
5061 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5063 struct btrfs_key key
;
5064 struct btrfs_disk_key found_key
;
5067 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
5069 if (key
.offset
> 0) {
5071 } else if (key
.type
> 0) {
5073 key
.offset
= (u64
)-1;
5074 } else if (key
.objectid
> 0) {
5077 key
.offset
= (u64
)-1;
5082 btrfs_release_path(path
);
5083 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5086 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
5087 ret
= comp_keys(&found_key
, &key
);
5089 * We might have had an item with the previous key in the tree right
5090 * before we released our path. And after we released our path, that
5091 * item might have been pushed to the first slot (0) of the leaf we
5092 * were holding due to a tree balance. Alternatively, an item with the
5093 * previous key can exist as the only element of a leaf (big fat item).
5094 * Therefore account for these 2 cases, so that our callers (like
5095 * btrfs_previous_item) don't miss an existing item with a key matching
5096 * the previous key we computed above.
5104 * A helper function to walk down the tree starting at min_key, and looking
5105 * for nodes or leaves that are have a minimum transaction id.
5106 * This is used by the btree defrag code, and tree logging
5108 * This does not cow, but it does stuff the starting key it finds back
5109 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5110 * key and get a writable path.
5112 * This does lock as it descends, and path->keep_locks should be set
5113 * to 1 by the caller.
5115 * This honors path->lowest_level to prevent descent past a given level
5118 * min_trans indicates the oldest transaction that you are interested
5119 * in walking through. Any nodes or leaves older than min_trans are
5120 * skipped over (without reading them).
5122 * returns zero if something useful was found, < 0 on error and 1 if there
5123 * was nothing in the tree that matched the search criteria.
5125 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
5126 struct btrfs_path
*path
,
5129 struct extent_buffer
*cur
;
5130 struct btrfs_key found_key
;
5136 int keep_locks
= path
->keep_locks
;
5138 path
->keep_locks
= 1;
5140 cur
= btrfs_read_lock_root_node(root
);
5141 level
= btrfs_header_level(cur
);
5142 WARN_ON(path
->nodes
[level
]);
5143 path
->nodes
[level
] = cur
;
5144 path
->locks
[level
] = BTRFS_READ_LOCK
;
5146 if (btrfs_header_generation(cur
) < min_trans
) {
5151 nritems
= btrfs_header_nritems(cur
);
5152 level
= btrfs_header_level(cur
);
5153 sret
= bin_search(cur
, min_key
, level
, &slot
);
5155 /* at the lowest level, we're done, setup the path and exit */
5156 if (level
== path
->lowest_level
) {
5157 if (slot
>= nritems
)
5160 path
->slots
[level
] = slot
;
5161 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
5164 if (sret
&& slot
> 0)
5167 * check this node pointer against the min_trans parameters.
5168 * If it is too old, old, skip to the next one.
5170 while (slot
< nritems
) {
5173 gen
= btrfs_node_ptr_generation(cur
, slot
);
5174 if (gen
< min_trans
) {
5182 * we didn't find a candidate key in this node, walk forward
5183 * and find another one
5185 if (slot
>= nritems
) {
5186 path
->slots
[level
] = slot
;
5187 btrfs_set_path_blocking(path
);
5188 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
5191 btrfs_release_path(path
);
5197 /* save our key for returning back */
5198 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
5199 path
->slots
[level
] = slot
;
5200 if (level
== path
->lowest_level
) {
5204 btrfs_set_path_blocking(path
);
5205 cur
= read_node_slot(root
, cur
, slot
);
5206 BUG_ON(!cur
); /* -ENOMEM */
5208 btrfs_tree_read_lock(cur
);
5210 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
5211 path
->nodes
[level
- 1] = cur
;
5212 unlock_up(path
, level
, 1, 0, NULL
);
5213 btrfs_clear_path_blocking(path
, NULL
, 0);
5216 path
->keep_locks
= keep_locks
;
5218 btrfs_unlock_up_safe(path
, path
->lowest_level
+ 1);
5219 btrfs_set_path_blocking(path
);
5220 memcpy(min_key
, &found_key
, sizeof(found_key
));
5225 static void tree_move_down(struct btrfs_root
*root
,
5226 struct btrfs_path
*path
,
5227 int *level
, int root_level
)
5229 BUG_ON(*level
== 0);
5230 path
->nodes
[*level
- 1] = read_node_slot(root
, path
->nodes
[*level
],
5231 path
->slots
[*level
]);
5232 path
->slots
[*level
- 1] = 0;
5236 static int tree_move_next_or_upnext(struct btrfs_root
*root
,
5237 struct btrfs_path
*path
,
5238 int *level
, int root_level
)
5242 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
5244 path
->slots
[*level
]++;
5246 while (path
->slots
[*level
] >= nritems
) {
5247 if (*level
== root_level
)
5251 path
->slots
[*level
] = 0;
5252 free_extent_buffer(path
->nodes
[*level
]);
5253 path
->nodes
[*level
] = NULL
;
5255 path
->slots
[*level
]++;
5257 nritems
= btrfs_header_nritems(path
->nodes
[*level
]);
5264 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5267 static int tree_advance(struct btrfs_root
*root
,
5268 struct btrfs_path
*path
,
5269 int *level
, int root_level
,
5271 struct btrfs_key
*key
)
5275 if (*level
== 0 || !allow_down
) {
5276 ret
= tree_move_next_or_upnext(root
, path
, level
, root_level
);
5278 tree_move_down(root
, path
, level
, root_level
);
5283 btrfs_item_key_to_cpu(path
->nodes
[*level
], key
,
5284 path
->slots
[*level
]);
5286 btrfs_node_key_to_cpu(path
->nodes
[*level
], key
,
5287 path
->slots
[*level
]);
5292 static int tree_compare_item(struct btrfs_root
*left_root
,
5293 struct btrfs_path
*left_path
,
5294 struct btrfs_path
*right_path
,
5299 unsigned long off1
, off2
;
5301 len1
= btrfs_item_size_nr(left_path
->nodes
[0], left_path
->slots
[0]);
5302 len2
= btrfs_item_size_nr(right_path
->nodes
[0], right_path
->slots
[0]);
5306 off1
= btrfs_item_ptr_offset(left_path
->nodes
[0], left_path
->slots
[0]);
5307 off2
= btrfs_item_ptr_offset(right_path
->nodes
[0],
5308 right_path
->slots
[0]);
5310 read_extent_buffer(left_path
->nodes
[0], tmp_buf
, off1
, len1
);
5312 cmp
= memcmp_extent_buffer(right_path
->nodes
[0], tmp_buf
, off2
, len1
);
5319 #define ADVANCE_ONLY_NEXT -1
5322 * This function compares two trees and calls the provided callback for
5323 * every changed/new/deleted item it finds.
5324 * If shared tree blocks are encountered, whole subtrees are skipped, making
5325 * the compare pretty fast on snapshotted subvolumes.
5327 * This currently works on commit roots only. As commit roots are read only,
5328 * we don't do any locking. The commit roots are protected with transactions.
5329 * Transactions are ended and rejoined when a commit is tried in between.
5331 * This function checks for modifications done to the trees while comparing.
5332 * If it detects a change, it aborts immediately.
5334 int btrfs_compare_trees(struct btrfs_root
*left_root
,
5335 struct btrfs_root
*right_root
,
5336 btrfs_changed_cb_t changed_cb
, void *ctx
)
5340 struct btrfs_path
*left_path
= NULL
;
5341 struct btrfs_path
*right_path
= NULL
;
5342 struct btrfs_key left_key
;
5343 struct btrfs_key right_key
;
5344 char *tmp_buf
= NULL
;
5345 int left_root_level
;
5346 int right_root_level
;
5349 int left_end_reached
;
5350 int right_end_reached
;
5358 left_path
= btrfs_alloc_path();
5363 right_path
= btrfs_alloc_path();
5369 tmp_buf
= kmalloc(left_root
->nodesize
, GFP_KERNEL
| __GFP_NOWARN
);
5371 tmp_buf
= vmalloc(left_root
->nodesize
);
5378 left_path
->search_commit_root
= 1;
5379 left_path
->skip_locking
= 1;
5380 right_path
->search_commit_root
= 1;
5381 right_path
->skip_locking
= 1;
5384 * Strategy: Go to the first items of both trees. Then do
5386 * If both trees are at level 0
5387 * Compare keys of current items
5388 * If left < right treat left item as new, advance left tree
5390 * If left > right treat right item as deleted, advance right tree
5392 * If left == right do deep compare of items, treat as changed if
5393 * needed, advance both trees and repeat
5394 * If both trees are at the same level but not at level 0
5395 * Compare keys of current nodes/leafs
5396 * If left < right advance left tree and repeat
5397 * If left > right advance right tree and repeat
5398 * If left == right compare blockptrs of the next nodes/leafs
5399 * If they match advance both trees but stay at the same level
5401 * If they don't match advance both trees while allowing to go
5403 * If tree levels are different
5404 * Advance the tree that needs it and repeat
5406 * Advancing a tree means:
5407 * If we are at level 0, try to go to the next slot. If that's not
5408 * possible, go one level up and repeat. Stop when we found a level
5409 * where we could go to the next slot. We may at this point be on a
5412 * If we are not at level 0 and not on shared tree blocks, go one
5415 * If we are not at level 0 and on shared tree blocks, go one slot to
5416 * the right if possible or go up and right.
5419 down_read(&left_root
->fs_info
->commit_root_sem
);
5420 left_level
= btrfs_header_level(left_root
->commit_root
);
5421 left_root_level
= left_level
;
5422 left_path
->nodes
[left_level
] = left_root
->commit_root
;
5423 extent_buffer_get(left_path
->nodes
[left_level
]);
5425 right_level
= btrfs_header_level(right_root
->commit_root
);
5426 right_root_level
= right_level
;
5427 right_path
->nodes
[right_level
] = right_root
->commit_root
;
5428 extent_buffer_get(right_path
->nodes
[right_level
]);
5429 up_read(&left_root
->fs_info
->commit_root_sem
);
5431 if (left_level
== 0)
5432 btrfs_item_key_to_cpu(left_path
->nodes
[left_level
],
5433 &left_key
, left_path
->slots
[left_level
]);
5435 btrfs_node_key_to_cpu(left_path
->nodes
[left_level
],
5436 &left_key
, left_path
->slots
[left_level
]);
5437 if (right_level
== 0)
5438 btrfs_item_key_to_cpu(right_path
->nodes
[right_level
],
5439 &right_key
, right_path
->slots
[right_level
]);
5441 btrfs_node_key_to_cpu(right_path
->nodes
[right_level
],
5442 &right_key
, right_path
->slots
[right_level
]);
5444 left_end_reached
= right_end_reached
= 0;
5445 advance_left
= advance_right
= 0;
5448 if (advance_left
&& !left_end_reached
) {
5449 ret
= tree_advance(left_root
, left_path
, &left_level
,
5451 advance_left
!= ADVANCE_ONLY_NEXT
,
5454 left_end_reached
= ADVANCE
;
5457 if (advance_right
&& !right_end_reached
) {
5458 ret
= tree_advance(right_root
, right_path
, &right_level
,
5460 advance_right
!= ADVANCE_ONLY_NEXT
,
5463 right_end_reached
= ADVANCE
;
5467 if (left_end_reached
&& right_end_reached
) {
5470 } else if (left_end_reached
) {
5471 if (right_level
== 0) {
5472 ret
= changed_cb(left_root
, right_root
,
5473 left_path
, right_path
,
5475 BTRFS_COMPARE_TREE_DELETED
,
5480 advance_right
= ADVANCE
;
5482 } else if (right_end_reached
) {
5483 if (left_level
== 0) {
5484 ret
= changed_cb(left_root
, right_root
,
5485 left_path
, right_path
,
5487 BTRFS_COMPARE_TREE_NEW
,
5492 advance_left
= ADVANCE
;
5496 if (left_level
== 0 && right_level
== 0) {
5497 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5499 ret
= changed_cb(left_root
, right_root
,
5500 left_path
, right_path
,
5502 BTRFS_COMPARE_TREE_NEW
,
5506 advance_left
= ADVANCE
;
5507 } else if (cmp
> 0) {
5508 ret
= changed_cb(left_root
, right_root
,
5509 left_path
, right_path
,
5511 BTRFS_COMPARE_TREE_DELETED
,
5515 advance_right
= ADVANCE
;
5517 enum btrfs_compare_tree_result result
;
5519 WARN_ON(!extent_buffer_uptodate(left_path
->nodes
[0]));
5520 ret
= tree_compare_item(left_root
, left_path
,
5521 right_path
, tmp_buf
);
5523 result
= BTRFS_COMPARE_TREE_CHANGED
;
5525 result
= BTRFS_COMPARE_TREE_SAME
;
5526 ret
= changed_cb(left_root
, right_root
,
5527 left_path
, right_path
,
5528 &left_key
, result
, ctx
);
5531 advance_left
= ADVANCE
;
5532 advance_right
= ADVANCE
;
5534 } else if (left_level
== right_level
) {
5535 cmp
= btrfs_comp_cpu_keys(&left_key
, &right_key
);
5537 advance_left
= ADVANCE
;
5538 } else if (cmp
> 0) {
5539 advance_right
= ADVANCE
;
5541 left_blockptr
= btrfs_node_blockptr(
5542 left_path
->nodes
[left_level
],
5543 left_path
->slots
[left_level
]);
5544 right_blockptr
= btrfs_node_blockptr(
5545 right_path
->nodes
[right_level
],
5546 right_path
->slots
[right_level
]);
5547 left_gen
= btrfs_node_ptr_generation(
5548 left_path
->nodes
[left_level
],
5549 left_path
->slots
[left_level
]);
5550 right_gen
= btrfs_node_ptr_generation(
5551 right_path
->nodes
[right_level
],
5552 right_path
->slots
[right_level
]);
5553 if (left_blockptr
== right_blockptr
&&
5554 left_gen
== right_gen
) {
5556 * As we're on a shared block, don't
5557 * allow to go deeper.
5559 advance_left
= ADVANCE_ONLY_NEXT
;
5560 advance_right
= ADVANCE_ONLY_NEXT
;
5562 advance_left
= ADVANCE
;
5563 advance_right
= ADVANCE
;
5566 } else if (left_level
< right_level
) {
5567 advance_right
= ADVANCE
;
5569 advance_left
= ADVANCE
;
5574 btrfs_free_path(left_path
);
5575 btrfs_free_path(right_path
);
5581 * this is similar to btrfs_next_leaf, but does not try to preserve
5582 * and fixup the path. It looks for and returns the next key in the
5583 * tree based on the current path and the min_trans parameters.
5585 * 0 is returned if another key is found, < 0 if there are any errors
5586 * and 1 is returned if there are no higher keys in the tree
5588 * path->keep_locks should be set to 1 on the search made before
5589 * calling this function.
5591 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
5592 struct btrfs_key
*key
, int level
, u64 min_trans
)
5595 struct extent_buffer
*c
;
5597 WARN_ON(!path
->keep_locks
);
5598 while (level
< BTRFS_MAX_LEVEL
) {
5599 if (!path
->nodes
[level
])
5602 slot
= path
->slots
[level
] + 1;
5603 c
= path
->nodes
[level
];
5605 if (slot
>= btrfs_header_nritems(c
)) {
5608 struct btrfs_key cur_key
;
5609 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
5610 !path
->nodes
[level
+ 1])
5613 if (path
->locks
[level
+ 1]) {
5618 slot
= btrfs_header_nritems(c
) - 1;
5620 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
5622 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
5624 orig_lowest
= path
->lowest_level
;
5625 btrfs_release_path(path
);
5626 path
->lowest_level
= level
;
5627 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
5629 path
->lowest_level
= orig_lowest
;
5633 c
= path
->nodes
[level
];
5634 slot
= path
->slots
[level
];
5641 btrfs_item_key_to_cpu(c
, key
, slot
);
5643 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
5645 if (gen
< min_trans
) {
5649 btrfs_node_key_to_cpu(c
, key
, slot
);
5657 * search the tree again to find a leaf with greater keys
5658 * returns 0 if it found something or 1 if there are no greater leaves.
5659 * returns < 0 on io errors.
5661 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
5663 return btrfs_next_old_leaf(root
, path
, 0);
5666 int btrfs_next_old_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
,
5671 struct extent_buffer
*c
;
5672 struct extent_buffer
*next
;
5673 struct btrfs_key key
;
5676 int old_spinning
= path
->leave_spinning
;
5677 int next_rw_lock
= 0;
5679 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5683 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
5688 btrfs_release_path(path
);
5690 path
->keep_locks
= 1;
5691 path
->leave_spinning
= 1;
5694 ret
= btrfs_search_old_slot(root
, &key
, path
, time_seq
);
5696 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5697 path
->keep_locks
= 0;
5702 nritems
= btrfs_header_nritems(path
->nodes
[0]);
5704 * by releasing the path above we dropped all our locks. A balance
5705 * could have added more items next to the key that used to be
5706 * at the very end of the block. So, check again here and
5707 * advance the path if there are now more items available.
5709 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
5716 * So the above check misses one case:
5717 * - after releasing the path above, someone has removed the item that
5718 * used to be at the very end of the block, and balance between leafs
5719 * gets another one with bigger key.offset to replace it.
5721 * This one should be returned as well, or we can get leaf corruption
5722 * later(esp. in __btrfs_drop_extents()).
5724 * And a bit more explanation about this check,
5725 * with ret > 0, the key isn't found, the path points to the slot
5726 * where it should be inserted, so the path->slots[0] item must be the
5729 if (nritems
> 0 && ret
> 0 && path
->slots
[0] == nritems
- 1) {
5734 while (level
< BTRFS_MAX_LEVEL
) {
5735 if (!path
->nodes
[level
]) {
5740 slot
= path
->slots
[level
] + 1;
5741 c
= path
->nodes
[level
];
5742 if (slot
>= btrfs_header_nritems(c
)) {
5744 if (level
== BTRFS_MAX_LEVEL
) {
5752 btrfs_tree_unlock_rw(next
, next_rw_lock
);
5753 free_extent_buffer(next
);
5757 next_rw_lock
= path
->locks
[level
];
5758 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
5764 btrfs_release_path(path
);
5768 if (!path
->skip_locking
) {
5769 ret
= btrfs_try_tree_read_lock(next
);
5770 if (!ret
&& time_seq
) {
5772 * If we don't get the lock, we may be racing
5773 * with push_leaf_left, holding that lock while
5774 * itself waiting for the leaf we've currently
5775 * locked. To solve this situation, we give up
5776 * on our lock and cycle.
5778 free_extent_buffer(next
);
5779 btrfs_release_path(path
);
5784 btrfs_set_path_blocking(path
);
5785 btrfs_tree_read_lock(next
);
5786 btrfs_clear_path_blocking(path
, next
,
5789 next_rw_lock
= BTRFS_READ_LOCK
;
5793 path
->slots
[level
] = slot
;
5796 c
= path
->nodes
[level
];
5797 if (path
->locks
[level
])
5798 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
5800 free_extent_buffer(c
);
5801 path
->nodes
[level
] = next
;
5802 path
->slots
[level
] = 0;
5803 if (!path
->skip_locking
)
5804 path
->locks
[level
] = next_rw_lock
;
5808 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
5814 btrfs_release_path(path
);
5818 if (!path
->skip_locking
) {
5819 ret
= btrfs_try_tree_read_lock(next
);
5821 btrfs_set_path_blocking(path
);
5822 btrfs_tree_read_lock(next
);
5823 btrfs_clear_path_blocking(path
, next
,
5826 next_rw_lock
= BTRFS_READ_LOCK
;
5831 unlock_up(path
, 0, 1, 0, NULL
);
5832 path
->leave_spinning
= old_spinning
;
5834 btrfs_set_path_blocking(path
);
5840 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5841 * searching until it gets past min_objectid or finds an item of 'type'
5843 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5845 int btrfs_previous_item(struct btrfs_root
*root
,
5846 struct btrfs_path
*path
, u64 min_objectid
,
5849 struct btrfs_key found_key
;
5850 struct extent_buffer
*leaf
;
5855 if (path
->slots
[0] == 0) {
5856 btrfs_set_path_blocking(path
);
5857 ret
= btrfs_prev_leaf(root
, path
);
5863 leaf
= path
->nodes
[0];
5864 nritems
= btrfs_header_nritems(leaf
);
5867 if (path
->slots
[0] == nritems
)
5870 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5871 if (found_key
.objectid
< min_objectid
)
5873 if (found_key
.type
== type
)
5875 if (found_key
.objectid
== min_objectid
&&
5876 found_key
.type
< type
)
5883 * search in extent tree to find a previous Metadata/Data extent item with
5886 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5888 int btrfs_previous_extent_item(struct btrfs_root
*root
,
5889 struct btrfs_path
*path
, u64 min_objectid
)
5891 struct btrfs_key found_key
;
5892 struct extent_buffer
*leaf
;
5897 if (path
->slots
[0] == 0) {
5898 btrfs_set_path_blocking(path
);
5899 ret
= btrfs_prev_leaf(root
, path
);
5905 leaf
= path
->nodes
[0];
5906 nritems
= btrfs_header_nritems(leaf
);
5909 if (path
->slots
[0] == nritems
)
5912 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5913 if (found_key
.objectid
< min_objectid
)
5915 if (found_key
.type
== BTRFS_EXTENT_ITEM_KEY
||
5916 found_key
.type
== BTRFS_METADATA_ITEM_KEY
)
5918 if (found_key
.objectid
== min_objectid
&&
5919 found_key
.type
< BTRFS_EXTENT_ITEM_KEY
)