2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
23 #include "transaction.h"
24 #include "print-tree.h"
27 static int split_node(struct btrfs_trans_handle
*trans
, struct btrfs_root
28 *root
, struct btrfs_path
*path
, int level
);
29 static int split_leaf(struct btrfs_trans_handle
*trans
, struct btrfs_root
30 *root
, struct btrfs_key
*ins_key
,
31 struct btrfs_path
*path
, int data_size
, int extend
);
32 static int push_node_left(struct btrfs_trans_handle
*trans
,
33 struct btrfs_root
*root
, struct extent_buffer
*dst
,
34 struct extent_buffer
*src
, int empty
);
35 static int balance_node_right(struct btrfs_trans_handle
*trans
,
36 struct btrfs_root
*root
,
37 struct extent_buffer
*dst_buf
,
38 struct extent_buffer
*src_buf
);
39 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
40 struct btrfs_path
*path
, int level
, int slot
);
42 struct btrfs_path
*btrfs_alloc_path(void)
44 struct btrfs_path
*path
;
45 path
= kmem_cache_zalloc(btrfs_path_cachep
, GFP_NOFS
);
50 * set all locked nodes in the path to blocking locks. This should
51 * be done before scheduling
53 noinline
void btrfs_set_path_blocking(struct btrfs_path
*p
)
56 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
57 if (!p
->nodes
[i
] || !p
->locks
[i
])
59 btrfs_set_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
60 if (p
->locks
[i
] == BTRFS_READ_LOCK
)
61 p
->locks
[i
] = BTRFS_READ_LOCK_BLOCKING
;
62 else if (p
->locks
[i
] == BTRFS_WRITE_LOCK
)
63 p
->locks
[i
] = BTRFS_WRITE_LOCK_BLOCKING
;
68 * reset all the locked nodes in the patch to spinning locks.
70 * held is used to keep lockdep happy, when lockdep is enabled
71 * we set held to a blocking lock before we go around and
72 * retake all the spinlocks in the path. You can safely use NULL
75 noinline
void btrfs_clear_path_blocking(struct btrfs_path
*p
,
76 struct extent_buffer
*held
, int held_rw
)
80 #ifdef CONFIG_DEBUG_LOCK_ALLOC
81 /* lockdep really cares that we take all of these spinlocks
82 * in the right order. If any of the locks in the path are not
83 * currently blocking, it is going to complain. So, make really
84 * really sure by forcing the path to blocking before we clear
88 btrfs_set_lock_blocking_rw(held
, held_rw
);
89 if (held_rw
== BTRFS_WRITE_LOCK
)
90 held_rw
= BTRFS_WRITE_LOCK_BLOCKING
;
91 else if (held_rw
== BTRFS_READ_LOCK
)
92 held_rw
= BTRFS_READ_LOCK_BLOCKING
;
94 btrfs_set_path_blocking(p
);
97 for (i
= BTRFS_MAX_LEVEL
- 1; i
>= 0; i
--) {
98 if (p
->nodes
[i
] && p
->locks
[i
]) {
99 btrfs_clear_lock_blocking_rw(p
->nodes
[i
], p
->locks
[i
]);
100 if (p
->locks
[i
] == BTRFS_WRITE_LOCK_BLOCKING
)
101 p
->locks
[i
] = BTRFS_WRITE_LOCK
;
102 else if (p
->locks
[i
] == BTRFS_READ_LOCK_BLOCKING
)
103 p
->locks
[i
] = BTRFS_READ_LOCK
;
107 #ifdef CONFIG_DEBUG_LOCK_ALLOC
109 btrfs_clear_lock_blocking_rw(held
, held_rw
);
113 /* this also releases the path */
114 void btrfs_free_path(struct btrfs_path
*p
)
118 btrfs_release_path(p
);
119 kmem_cache_free(btrfs_path_cachep
, p
);
123 * path release drops references on the extent buffers in the path
124 * and it drops any locks held by this path
126 * It is safe to call this on paths that no locks or extent buffers held.
128 noinline
void btrfs_release_path(struct btrfs_path
*p
)
132 for (i
= 0; i
< BTRFS_MAX_LEVEL
; i
++) {
137 btrfs_tree_unlock_rw(p
->nodes
[i
], p
->locks
[i
]);
140 free_extent_buffer(p
->nodes
[i
]);
146 * safely gets a reference on the root node of a tree. A lock
147 * is not taken, so a concurrent writer may put a different node
148 * at the root of the tree. See btrfs_lock_root_node for the
151 * The extent buffer returned by this has a reference taken, so
152 * it won't disappear. It may stop being the root of the tree
153 * at any time because there are no locks held.
155 struct extent_buffer
*btrfs_root_node(struct btrfs_root
*root
)
157 struct extent_buffer
*eb
;
161 eb
= rcu_dereference(root
->node
);
164 * RCU really hurts here, we could free up the root node because
165 * it was cow'ed but we may not get the new root node yet so do
166 * the inc_not_zero dance and if it doesn't work then
167 * synchronize_rcu and try again.
169 if (atomic_inc_not_zero(&eb
->refs
)) {
179 /* loop around taking references on and locking the root node of the
180 * tree until you end up with a lock on the root. A locked buffer
181 * is returned, with a reference held.
183 struct extent_buffer
*btrfs_lock_root_node(struct btrfs_root
*root
)
185 struct extent_buffer
*eb
;
188 eb
= btrfs_root_node(root
);
190 if (eb
== root
->node
)
192 btrfs_tree_unlock(eb
);
193 free_extent_buffer(eb
);
198 /* loop around taking references on and locking the root node of the
199 * tree until you end up with a lock on the root. A locked buffer
200 * is returned, with a reference held.
202 struct extent_buffer
*btrfs_read_lock_root_node(struct btrfs_root
*root
)
204 struct extent_buffer
*eb
;
207 eb
= btrfs_root_node(root
);
208 btrfs_tree_read_lock(eb
);
209 if (eb
== root
->node
)
211 btrfs_tree_read_unlock(eb
);
212 free_extent_buffer(eb
);
217 /* cowonly root (everything not a reference counted cow subvolume), just get
218 * put onto a simple dirty list. transaction.c walks this to make sure they
219 * get properly updated on disk.
221 static void add_root_to_dirty_list(struct btrfs_root
*root
)
223 if (root
->track_dirty
&& list_empty(&root
->dirty_list
)) {
224 list_add(&root
->dirty_list
,
225 &root
->fs_info
->dirty_cowonly_roots
);
230 * used by snapshot creation to make a copy of a root for a tree with
231 * a given objectid. The buffer with the new root node is returned in
232 * cow_ret, and this func returns zero on success or a negative error code.
234 int btrfs_copy_root(struct btrfs_trans_handle
*trans
,
235 struct btrfs_root
*root
,
236 struct extent_buffer
*buf
,
237 struct extent_buffer
**cow_ret
, u64 new_root_objectid
)
239 struct extent_buffer
*cow
;
242 struct btrfs_disk_key disk_key
;
244 WARN_ON(root
->ref_cows
&& trans
->transid
!=
245 root
->fs_info
->running_transaction
->transid
);
246 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
248 level
= btrfs_header_level(buf
);
250 btrfs_item_key(buf
, &disk_key
, 0);
252 btrfs_node_key(buf
, &disk_key
, 0);
254 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, 0,
255 new_root_objectid
, &disk_key
, level
,
260 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
261 btrfs_set_header_bytenr(cow
, cow
->start
);
262 btrfs_set_header_generation(cow
, trans
->transid
);
263 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
264 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
265 BTRFS_HEADER_FLAG_RELOC
);
266 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
267 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
269 btrfs_set_header_owner(cow
, new_root_objectid
);
271 write_extent_buffer(cow
, root
->fs_info
->fsid
,
272 (unsigned long)btrfs_header_fsid(cow
),
275 WARN_ON(btrfs_header_generation(buf
) > trans
->transid
);
276 if (new_root_objectid
== BTRFS_TREE_RELOC_OBJECTID
)
277 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
279 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
284 btrfs_mark_buffer_dirty(cow
);
290 * check if the tree block can be shared by multiple trees
292 int btrfs_block_can_be_shared(struct btrfs_root
*root
,
293 struct extent_buffer
*buf
)
296 * Tree blocks not in refernece counted trees and tree roots
297 * are never shared. If a block was allocated after the last
298 * snapshot and the block was not allocated by tree relocation,
299 * we know the block is not shared.
301 if (root
->ref_cows
&&
302 buf
!= root
->node
&& buf
!= root
->commit_root
&&
303 (btrfs_header_generation(buf
) <=
304 btrfs_root_last_snapshot(&root
->root_item
) ||
305 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)))
307 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
308 if (root
->ref_cows
&&
309 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
315 static noinline
int update_ref_for_cow(struct btrfs_trans_handle
*trans
,
316 struct btrfs_root
*root
,
317 struct extent_buffer
*buf
,
318 struct extent_buffer
*cow
,
328 * Backrefs update rules:
330 * Always use full backrefs for extent pointers in tree block
331 * allocated by tree relocation.
333 * If a shared tree block is no longer referenced by its owner
334 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
335 * use full backrefs for extent pointers in tree block.
337 * If a tree block is been relocating
338 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
339 * use full backrefs for extent pointers in tree block.
340 * The reason for this is some operations (such as drop tree)
341 * are only allowed for blocks use full backrefs.
344 if (btrfs_block_can_be_shared(root
, buf
)) {
345 ret
= btrfs_lookup_extent_info(trans
, root
, buf
->start
,
346 buf
->len
, &refs
, &flags
);
351 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
352 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
353 flags
= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
358 owner
= btrfs_header_owner(buf
);
359 BUG_ON(owner
== BTRFS_TREE_RELOC_OBJECTID
&&
360 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
));
363 if ((owner
== root
->root_key
.objectid
||
364 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) &&
365 !(flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
)) {
366 ret
= btrfs_inc_ref(trans
, root
, buf
, 1, 1);
369 if (root
->root_key
.objectid
==
370 BTRFS_TREE_RELOC_OBJECTID
) {
371 ret
= btrfs_dec_ref(trans
, root
, buf
, 0, 1);
373 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
376 new_flags
|= BTRFS_BLOCK_FLAG_FULL_BACKREF
;
379 if (root
->root_key
.objectid
==
380 BTRFS_TREE_RELOC_OBJECTID
)
381 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
383 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
386 if (new_flags
!= 0) {
387 ret
= btrfs_set_disk_extent_flags(trans
, root
,
394 if (flags
& BTRFS_BLOCK_FLAG_FULL_BACKREF
) {
395 if (root
->root_key
.objectid
==
396 BTRFS_TREE_RELOC_OBJECTID
)
397 ret
= btrfs_inc_ref(trans
, root
, cow
, 1, 1);
399 ret
= btrfs_inc_ref(trans
, root
, cow
, 0, 1);
401 ret
= btrfs_dec_ref(trans
, root
, buf
, 1, 1);
404 clean_tree_block(trans
, root
, buf
);
411 * does the dirty work in cow of a single block. The parent block (if
412 * supplied) is updated to point to the new cow copy. The new buffer is marked
413 * dirty and returned locked. If you modify the block it needs to be marked
416 * search_start -- an allocation hint for the new block
418 * empty_size -- a hint that you plan on doing more cow. This is the size in
419 * bytes the allocator should try to find free next to the block it returns.
420 * This is just a hint and may be ignored by the allocator.
422 static noinline
int __btrfs_cow_block(struct btrfs_trans_handle
*trans
,
423 struct btrfs_root
*root
,
424 struct extent_buffer
*buf
,
425 struct extent_buffer
*parent
, int parent_slot
,
426 struct extent_buffer
**cow_ret
,
427 u64 search_start
, u64 empty_size
)
429 struct btrfs_disk_key disk_key
;
430 struct extent_buffer
*cow
;
439 btrfs_assert_tree_locked(buf
);
441 WARN_ON(root
->ref_cows
&& trans
->transid
!=
442 root
->fs_info
->running_transaction
->transid
);
443 WARN_ON(root
->ref_cows
&& trans
->transid
!= root
->last_trans
);
445 level
= btrfs_header_level(buf
);
448 btrfs_item_key(buf
, &disk_key
, 0);
450 btrfs_node_key(buf
, &disk_key
, 0);
452 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
) {
454 parent_start
= parent
->start
;
460 cow
= btrfs_alloc_free_block(trans
, root
, buf
->len
, parent_start
,
461 root
->root_key
.objectid
, &disk_key
,
462 level
, search_start
, empty_size
, 1);
466 /* cow is set to blocking by btrfs_init_new_buffer */
468 copy_extent_buffer(cow
, buf
, 0, 0, cow
->len
);
469 btrfs_set_header_bytenr(cow
, cow
->start
);
470 btrfs_set_header_generation(cow
, trans
->transid
);
471 btrfs_set_header_backref_rev(cow
, BTRFS_MIXED_BACKREF_REV
);
472 btrfs_clear_header_flag(cow
, BTRFS_HEADER_FLAG_WRITTEN
|
473 BTRFS_HEADER_FLAG_RELOC
);
474 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
475 btrfs_set_header_flag(cow
, BTRFS_HEADER_FLAG_RELOC
);
477 btrfs_set_header_owner(cow
, root
->root_key
.objectid
);
479 write_extent_buffer(cow
, root
->fs_info
->fsid
,
480 (unsigned long)btrfs_header_fsid(cow
),
483 update_ref_for_cow(trans
, root
, buf
, cow
, &last_ref
);
486 btrfs_reloc_cow_block(trans
, root
, buf
, cow
);
488 if (buf
== root
->node
) {
489 WARN_ON(parent
&& parent
!= buf
);
490 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
||
491 btrfs_header_backref_rev(buf
) < BTRFS_MIXED_BACKREF_REV
)
492 parent_start
= buf
->start
;
496 extent_buffer_get(cow
);
497 rcu_assign_pointer(root
->node
, cow
);
499 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
501 free_extent_buffer(buf
);
502 add_root_to_dirty_list(root
);
504 if (root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
)
505 parent_start
= parent
->start
;
509 WARN_ON(trans
->transid
!= btrfs_header_generation(parent
));
510 btrfs_set_node_blockptr(parent
, parent_slot
,
512 btrfs_set_node_ptr_generation(parent
, parent_slot
,
514 btrfs_mark_buffer_dirty(parent
);
515 btrfs_free_tree_block(trans
, root
, buf
, parent_start
,
519 btrfs_tree_unlock(buf
);
520 free_extent_buffer_stale(buf
);
521 btrfs_mark_buffer_dirty(cow
);
526 static inline int should_cow_block(struct btrfs_trans_handle
*trans
,
527 struct btrfs_root
*root
,
528 struct extent_buffer
*buf
)
530 /* ensure we can see the force_cow */
534 * We do not need to cow a block if
535 * 1) this block is not created or changed in this transaction;
536 * 2) this block does not belong to TREE_RELOC tree;
537 * 3) the root is not forced COW.
539 * What is forced COW:
540 * when we create snapshot during commiting the transaction,
541 * after we've finished coping src root, we must COW the shared
542 * block to ensure the metadata consistency.
544 if (btrfs_header_generation(buf
) == trans
->transid
&&
545 !btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_WRITTEN
) &&
546 !(root
->root_key
.objectid
!= BTRFS_TREE_RELOC_OBJECTID
&&
547 btrfs_header_flag(buf
, BTRFS_HEADER_FLAG_RELOC
)) &&
554 * cows a single block, see __btrfs_cow_block for the real work.
555 * This version of it has extra checks so that a block isn't cow'd more than
556 * once per transaction, as long as it hasn't been written yet
558 noinline
int btrfs_cow_block(struct btrfs_trans_handle
*trans
,
559 struct btrfs_root
*root
, struct extent_buffer
*buf
,
560 struct extent_buffer
*parent
, int parent_slot
,
561 struct extent_buffer
**cow_ret
)
566 if (trans
->transaction
!= root
->fs_info
->running_transaction
) {
567 printk(KERN_CRIT
"trans %llu running %llu\n",
568 (unsigned long long)trans
->transid
,
570 root
->fs_info
->running_transaction
->transid
);
573 if (trans
->transid
!= root
->fs_info
->generation
) {
574 printk(KERN_CRIT
"trans %llu running %llu\n",
575 (unsigned long long)trans
->transid
,
576 (unsigned long long)root
->fs_info
->generation
);
580 if (!should_cow_block(trans
, root
, buf
)) {
585 search_start
= buf
->start
& ~((u64
)(1024 * 1024 * 1024) - 1);
588 btrfs_set_lock_blocking(parent
);
589 btrfs_set_lock_blocking(buf
);
591 ret
= __btrfs_cow_block(trans
, root
, buf
, parent
,
592 parent_slot
, cow_ret
, search_start
, 0);
594 trace_btrfs_cow_block(root
, buf
, *cow_ret
);
600 * helper function for defrag to decide if two blocks pointed to by a
601 * node are actually close by
603 static int close_blocks(u64 blocknr
, u64 other
, u32 blocksize
)
605 if (blocknr
< other
&& other
- (blocknr
+ blocksize
) < 32768)
607 if (blocknr
> other
&& blocknr
- (other
+ blocksize
) < 32768)
613 * compare two keys in a memcmp fashion
615 static int comp_keys(struct btrfs_disk_key
*disk
, struct btrfs_key
*k2
)
619 btrfs_disk_key_to_cpu(&k1
, disk
);
621 return btrfs_comp_cpu_keys(&k1
, k2
);
625 * same as comp_keys only with two btrfs_key's
627 int btrfs_comp_cpu_keys(struct btrfs_key
*k1
, struct btrfs_key
*k2
)
629 if (k1
->objectid
> k2
->objectid
)
631 if (k1
->objectid
< k2
->objectid
)
633 if (k1
->type
> k2
->type
)
635 if (k1
->type
< k2
->type
)
637 if (k1
->offset
> k2
->offset
)
639 if (k1
->offset
< k2
->offset
)
645 * this is used by the defrag code to go through all the
646 * leaves pointed to by a node and reallocate them so that
647 * disk order is close to key order
649 int btrfs_realloc_node(struct btrfs_trans_handle
*trans
,
650 struct btrfs_root
*root
, struct extent_buffer
*parent
,
651 int start_slot
, int cache_only
, u64
*last_ret
,
652 struct btrfs_key
*progress
)
654 struct extent_buffer
*cur
;
657 u64 search_start
= *last_ret
;
667 int progress_passed
= 0;
668 struct btrfs_disk_key disk_key
;
670 parent_level
= btrfs_header_level(parent
);
671 if (cache_only
&& parent_level
!= 1)
674 if (trans
->transaction
!= root
->fs_info
->running_transaction
)
676 if (trans
->transid
!= root
->fs_info
->generation
)
679 parent_nritems
= btrfs_header_nritems(parent
);
680 blocksize
= btrfs_level_size(root
, parent_level
- 1);
681 end_slot
= parent_nritems
;
683 if (parent_nritems
== 1)
686 btrfs_set_lock_blocking(parent
);
688 for (i
= start_slot
; i
< end_slot
; i
++) {
691 btrfs_node_key(parent
, &disk_key
, i
);
692 if (!progress_passed
&& comp_keys(&disk_key
, progress
) < 0)
696 blocknr
= btrfs_node_blockptr(parent
, i
);
697 gen
= btrfs_node_ptr_generation(parent
, i
);
699 last_block
= blocknr
;
702 other
= btrfs_node_blockptr(parent
, i
- 1);
703 close
= close_blocks(blocknr
, other
, blocksize
);
705 if (!close
&& i
< end_slot
- 2) {
706 other
= btrfs_node_blockptr(parent
, i
+ 1);
707 close
= close_blocks(blocknr
, other
, blocksize
);
710 last_block
= blocknr
;
714 cur
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
716 uptodate
= btrfs_buffer_uptodate(cur
, gen
);
719 if (!cur
|| !uptodate
) {
721 free_extent_buffer(cur
);
725 cur
= read_tree_block(root
, blocknr
,
729 } else if (!uptodate
) {
730 btrfs_read_buffer(cur
, gen
);
733 if (search_start
== 0)
734 search_start
= last_block
;
736 btrfs_tree_lock(cur
);
737 btrfs_set_lock_blocking(cur
);
738 err
= __btrfs_cow_block(trans
, root
, cur
, parent
, i
,
741 (end_slot
- i
) * blocksize
));
743 btrfs_tree_unlock(cur
);
744 free_extent_buffer(cur
);
747 search_start
= cur
->start
;
748 last_block
= cur
->start
;
749 *last_ret
= search_start
;
750 btrfs_tree_unlock(cur
);
751 free_extent_buffer(cur
);
757 * The leaf data grows from end-to-front in the node.
758 * this returns the address of the start of the last item,
759 * which is the stop of the leaf data stack
761 static inline unsigned int leaf_data_end(struct btrfs_root
*root
,
762 struct extent_buffer
*leaf
)
764 u32 nr
= btrfs_header_nritems(leaf
);
766 return BTRFS_LEAF_DATA_SIZE(root
);
767 return btrfs_item_offset_nr(leaf
, nr
- 1);
772 * search for key in the extent_buffer. The items start at offset p,
773 * and they are item_size apart. There are 'max' items in p.
775 * the slot in the array is returned via slot, and it points to
776 * the place where you would insert key if it is not found in
779 * slot may point to max if the key is bigger than all of the keys
781 static noinline
int generic_bin_search(struct extent_buffer
*eb
,
783 int item_size
, struct btrfs_key
*key
,
790 struct btrfs_disk_key
*tmp
= NULL
;
791 struct btrfs_disk_key unaligned
;
792 unsigned long offset
;
794 unsigned long map_start
= 0;
795 unsigned long map_len
= 0;
799 mid
= (low
+ high
) / 2;
800 offset
= p
+ mid
* item_size
;
802 if (!kaddr
|| offset
< map_start
||
803 (offset
+ sizeof(struct btrfs_disk_key
)) >
804 map_start
+ map_len
) {
806 err
= map_private_extent_buffer(eb
, offset
,
807 sizeof(struct btrfs_disk_key
),
808 &kaddr
, &map_start
, &map_len
);
811 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
814 read_extent_buffer(eb
, &unaligned
,
815 offset
, sizeof(unaligned
));
820 tmp
= (struct btrfs_disk_key
*)(kaddr
+ offset
-
823 ret
= comp_keys(tmp
, key
);
839 * simple bin_search frontend that does the right thing for
842 static int bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
843 int level
, int *slot
)
846 return generic_bin_search(eb
,
847 offsetof(struct btrfs_leaf
, items
),
848 sizeof(struct btrfs_item
),
849 key
, btrfs_header_nritems(eb
),
852 return generic_bin_search(eb
,
853 offsetof(struct btrfs_node
, ptrs
),
854 sizeof(struct btrfs_key_ptr
),
855 key
, btrfs_header_nritems(eb
),
861 int btrfs_bin_search(struct extent_buffer
*eb
, struct btrfs_key
*key
,
862 int level
, int *slot
)
864 return bin_search(eb
, key
, level
, slot
);
867 static void root_add_used(struct btrfs_root
*root
, u32 size
)
869 spin_lock(&root
->accounting_lock
);
870 btrfs_set_root_used(&root
->root_item
,
871 btrfs_root_used(&root
->root_item
) + size
);
872 spin_unlock(&root
->accounting_lock
);
875 static void root_sub_used(struct btrfs_root
*root
, u32 size
)
877 spin_lock(&root
->accounting_lock
);
878 btrfs_set_root_used(&root
->root_item
,
879 btrfs_root_used(&root
->root_item
) - size
);
880 spin_unlock(&root
->accounting_lock
);
883 /* given a node and slot number, this reads the blocks it points to. The
884 * extent buffer is returned with a reference taken (but unlocked).
885 * NULL is returned on error.
887 static noinline
struct extent_buffer
*read_node_slot(struct btrfs_root
*root
,
888 struct extent_buffer
*parent
, int slot
)
890 int level
= btrfs_header_level(parent
);
893 if (slot
>= btrfs_header_nritems(parent
))
898 return read_tree_block(root
, btrfs_node_blockptr(parent
, slot
),
899 btrfs_level_size(root
, level
- 1),
900 btrfs_node_ptr_generation(parent
, slot
));
904 * node level balancing, used to make sure nodes are in proper order for
905 * item deletion. We balance from the top down, so we have to make sure
906 * that a deletion won't leave an node completely empty later on.
908 static noinline
int balance_level(struct btrfs_trans_handle
*trans
,
909 struct btrfs_root
*root
,
910 struct btrfs_path
*path
, int level
)
912 struct extent_buffer
*right
= NULL
;
913 struct extent_buffer
*mid
;
914 struct extent_buffer
*left
= NULL
;
915 struct extent_buffer
*parent
= NULL
;
919 int orig_slot
= path
->slots
[level
];
925 mid
= path
->nodes
[level
];
927 WARN_ON(path
->locks
[level
] != BTRFS_WRITE_LOCK
&&
928 path
->locks
[level
] != BTRFS_WRITE_LOCK_BLOCKING
);
929 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
931 orig_ptr
= btrfs_node_blockptr(mid
, orig_slot
);
933 if (level
< BTRFS_MAX_LEVEL
- 1) {
934 parent
= path
->nodes
[level
+ 1];
935 pslot
= path
->slots
[level
+ 1];
939 * deal with the case where there is only one pointer in the root
940 * by promoting the node below to a root
943 struct extent_buffer
*child
;
945 if (btrfs_header_nritems(mid
) != 1)
948 /* promote the child to a root */
949 child
= read_node_slot(root
, mid
, 0);
951 btrfs_tree_lock(child
);
952 btrfs_set_lock_blocking(child
);
953 ret
= btrfs_cow_block(trans
, root
, child
, mid
, 0, &child
);
955 btrfs_tree_unlock(child
);
956 free_extent_buffer(child
);
960 rcu_assign_pointer(root
->node
, child
);
962 add_root_to_dirty_list(root
);
963 btrfs_tree_unlock(child
);
965 path
->locks
[level
] = 0;
966 path
->nodes
[level
] = NULL
;
967 clean_tree_block(trans
, root
, mid
);
968 btrfs_tree_unlock(mid
);
969 /* once for the path */
970 free_extent_buffer(mid
);
972 root_sub_used(root
, mid
->len
);
973 btrfs_free_tree_block(trans
, root
, mid
, 0, 1, 0);
974 /* once for the root ptr */
975 free_extent_buffer_stale(mid
);
978 if (btrfs_header_nritems(mid
) >
979 BTRFS_NODEPTRS_PER_BLOCK(root
) / 4)
982 btrfs_header_nritems(mid
);
984 left
= read_node_slot(root
, parent
, pslot
- 1);
986 btrfs_tree_lock(left
);
987 btrfs_set_lock_blocking(left
);
988 wret
= btrfs_cow_block(trans
, root
, left
,
989 parent
, pslot
- 1, &left
);
995 right
= read_node_slot(root
, parent
, pslot
+ 1);
997 btrfs_tree_lock(right
);
998 btrfs_set_lock_blocking(right
);
999 wret
= btrfs_cow_block(trans
, root
, right
,
1000 parent
, pslot
+ 1, &right
);
1007 /* first, try to make some room in the middle buffer */
1009 orig_slot
+= btrfs_header_nritems(left
);
1010 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1013 btrfs_header_nritems(mid
);
1017 * then try to empty the right most buffer into the middle
1020 wret
= push_node_left(trans
, root
, mid
, right
, 1);
1021 if (wret
< 0 && wret
!= -ENOSPC
)
1023 if (btrfs_header_nritems(right
) == 0) {
1024 clean_tree_block(trans
, root
, right
);
1025 btrfs_tree_unlock(right
);
1026 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
+
1030 root_sub_used(root
, right
->len
);
1031 btrfs_free_tree_block(trans
, root
, right
, 0, 1, 0);
1032 free_extent_buffer_stale(right
);
1035 struct btrfs_disk_key right_key
;
1036 btrfs_node_key(right
, &right_key
, 0);
1037 btrfs_set_node_key(parent
, &right_key
, pslot
+ 1);
1038 btrfs_mark_buffer_dirty(parent
);
1041 if (btrfs_header_nritems(mid
) == 1) {
1043 * we're not allowed to leave a node with one item in the
1044 * tree during a delete. A deletion from lower in the tree
1045 * could try to delete the only pointer in this node.
1046 * So, pull some keys from the left.
1047 * There has to be a left pointer at this point because
1048 * otherwise we would have pulled some pointers from the
1052 wret
= balance_node_right(trans
, root
, mid
, left
);
1058 wret
= push_node_left(trans
, root
, left
, mid
, 1);
1064 if (btrfs_header_nritems(mid
) == 0) {
1065 clean_tree_block(trans
, root
, mid
);
1066 btrfs_tree_unlock(mid
);
1067 wret
= del_ptr(trans
, root
, path
, level
+ 1, pslot
);
1070 root_sub_used(root
, mid
->len
);
1071 btrfs_free_tree_block(trans
, root
, mid
, 0, 1, 0);
1072 free_extent_buffer_stale(mid
);
1075 /* update the parent key to reflect our changes */
1076 struct btrfs_disk_key mid_key
;
1077 btrfs_node_key(mid
, &mid_key
, 0);
1078 btrfs_set_node_key(parent
, &mid_key
, pslot
);
1079 btrfs_mark_buffer_dirty(parent
);
1082 /* update the path */
1084 if (btrfs_header_nritems(left
) > orig_slot
) {
1085 extent_buffer_get(left
);
1086 /* left was locked after cow */
1087 path
->nodes
[level
] = left
;
1088 path
->slots
[level
+ 1] -= 1;
1089 path
->slots
[level
] = orig_slot
;
1091 btrfs_tree_unlock(mid
);
1092 free_extent_buffer(mid
);
1095 orig_slot
-= btrfs_header_nritems(left
);
1096 path
->slots
[level
] = orig_slot
;
1099 /* double check we haven't messed things up */
1101 btrfs_node_blockptr(path
->nodes
[level
], path
->slots
[level
]))
1105 btrfs_tree_unlock(right
);
1106 free_extent_buffer(right
);
1109 if (path
->nodes
[level
] != left
)
1110 btrfs_tree_unlock(left
);
1111 free_extent_buffer(left
);
1116 /* Node balancing for insertion. Here we only split or push nodes around
1117 * when they are completely full. This is also done top down, so we
1118 * have to be pessimistic.
1120 static noinline
int push_nodes_for_insert(struct btrfs_trans_handle
*trans
,
1121 struct btrfs_root
*root
,
1122 struct btrfs_path
*path
, int level
)
1124 struct extent_buffer
*right
= NULL
;
1125 struct extent_buffer
*mid
;
1126 struct extent_buffer
*left
= NULL
;
1127 struct extent_buffer
*parent
= NULL
;
1131 int orig_slot
= path
->slots
[level
];
1136 mid
= path
->nodes
[level
];
1137 WARN_ON(btrfs_header_generation(mid
) != trans
->transid
);
1139 if (level
< BTRFS_MAX_LEVEL
- 1) {
1140 parent
= path
->nodes
[level
+ 1];
1141 pslot
= path
->slots
[level
+ 1];
1147 left
= read_node_slot(root
, parent
, pslot
- 1);
1149 /* first, try to make some room in the middle buffer */
1153 btrfs_tree_lock(left
);
1154 btrfs_set_lock_blocking(left
);
1156 left_nr
= btrfs_header_nritems(left
);
1157 if (left_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1160 ret
= btrfs_cow_block(trans
, root
, left
, parent
,
1165 wret
= push_node_left(trans
, root
,
1172 struct btrfs_disk_key disk_key
;
1173 orig_slot
+= left_nr
;
1174 btrfs_node_key(mid
, &disk_key
, 0);
1175 btrfs_set_node_key(parent
, &disk_key
, pslot
);
1176 btrfs_mark_buffer_dirty(parent
);
1177 if (btrfs_header_nritems(left
) > orig_slot
) {
1178 path
->nodes
[level
] = left
;
1179 path
->slots
[level
+ 1] -= 1;
1180 path
->slots
[level
] = orig_slot
;
1181 btrfs_tree_unlock(mid
);
1182 free_extent_buffer(mid
);
1185 btrfs_header_nritems(left
);
1186 path
->slots
[level
] = orig_slot
;
1187 btrfs_tree_unlock(left
);
1188 free_extent_buffer(left
);
1192 btrfs_tree_unlock(left
);
1193 free_extent_buffer(left
);
1195 right
= read_node_slot(root
, parent
, pslot
+ 1);
1198 * then try to empty the right most buffer into the middle
1203 btrfs_tree_lock(right
);
1204 btrfs_set_lock_blocking(right
);
1206 right_nr
= btrfs_header_nritems(right
);
1207 if (right_nr
>= BTRFS_NODEPTRS_PER_BLOCK(root
) - 1) {
1210 ret
= btrfs_cow_block(trans
, root
, right
,
1216 wret
= balance_node_right(trans
, root
,
1223 struct btrfs_disk_key disk_key
;
1225 btrfs_node_key(right
, &disk_key
, 0);
1226 btrfs_set_node_key(parent
, &disk_key
, pslot
+ 1);
1227 btrfs_mark_buffer_dirty(parent
);
1229 if (btrfs_header_nritems(mid
) <= orig_slot
) {
1230 path
->nodes
[level
] = right
;
1231 path
->slots
[level
+ 1] += 1;
1232 path
->slots
[level
] = orig_slot
-
1233 btrfs_header_nritems(mid
);
1234 btrfs_tree_unlock(mid
);
1235 free_extent_buffer(mid
);
1237 btrfs_tree_unlock(right
);
1238 free_extent_buffer(right
);
1242 btrfs_tree_unlock(right
);
1243 free_extent_buffer(right
);
1249 * readahead one full node of leaves, finding things that are close
1250 * to the block in 'slot', and triggering ra on them.
1252 static void reada_for_search(struct btrfs_root
*root
,
1253 struct btrfs_path
*path
,
1254 int level
, int slot
, u64 objectid
)
1256 struct extent_buffer
*node
;
1257 struct btrfs_disk_key disk_key
;
1263 int direction
= path
->reada
;
1264 struct extent_buffer
*eb
;
1272 if (!path
->nodes
[level
])
1275 node
= path
->nodes
[level
];
1277 search
= btrfs_node_blockptr(node
, slot
);
1278 blocksize
= btrfs_level_size(root
, level
- 1);
1279 eb
= btrfs_find_tree_block(root
, search
, blocksize
);
1281 free_extent_buffer(eb
);
1287 nritems
= btrfs_header_nritems(node
);
1291 if (direction
< 0) {
1295 } else if (direction
> 0) {
1300 if (path
->reada
< 0 && objectid
) {
1301 btrfs_node_key(node
, &disk_key
, nr
);
1302 if (btrfs_disk_key_objectid(&disk_key
) != objectid
)
1305 search
= btrfs_node_blockptr(node
, nr
);
1306 if ((search
<= target
&& target
- search
<= 65536) ||
1307 (search
> target
&& search
- target
<= 65536)) {
1308 gen
= btrfs_node_ptr_generation(node
, nr
);
1309 readahead_tree_block(root
, search
, blocksize
, gen
);
1313 if ((nread
> 65536 || nscan
> 32))
1319 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1322 static noinline
int reada_for_balance(struct btrfs_root
*root
,
1323 struct btrfs_path
*path
, int level
)
1327 struct extent_buffer
*parent
;
1328 struct extent_buffer
*eb
;
1335 parent
= path
->nodes
[level
+ 1];
1339 nritems
= btrfs_header_nritems(parent
);
1340 slot
= path
->slots
[level
+ 1];
1341 blocksize
= btrfs_level_size(root
, level
);
1344 block1
= btrfs_node_blockptr(parent
, slot
- 1);
1345 gen
= btrfs_node_ptr_generation(parent
, slot
- 1);
1346 eb
= btrfs_find_tree_block(root
, block1
, blocksize
);
1347 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1349 free_extent_buffer(eb
);
1351 if (slot
+ 1 < nritems
) {
1352 block2
= btrfs_node_blockptr(parent
, slot
+ 1);
1353 gen
= btrfs_node_ptr_generation(parent
, slot
+ 1);
1354 eb
= btrfs_find_tree_block(root
, block2
, blocksize
);
1355 if (eb
&& btrfs_buffer_uptodate(eb
, gen
))
1357 free_extent_buffer(eb
);
1359 if (block1
|| block2
) {
1362 /* release the whole path */
1363 btrfs_release_path(path
);
1365 /* read the blocks */
1367 readahead_tree_block(root
, block1
, blocksize
, 0);
1369 readahead_tree_block(root
, block2
, blocksize
, 0);
1372 eb
= read_tree_block(root
, block1
, blocksize
, 0);
1373 free_extent_buffer(eb
);
1376 eb
= read_tree_block(root
, block2
, blocksize
, 0);
1377 free_extent_buffer(eb
);
1385 * when we walk down the tree, it is usually safe to unlock the higher layers
1386 * in the tree. The exceptions are when our path goes through slot 0, because
1387 * operations on the tree might require changing key pointers higher up in the
1390 * callers might also have set path->keep_locks, which tells this code to keep
1391 * the lock if the path points to the last slot in the block. This is part of
1392 * walking through the tree, and selecting the next slot in the higher block.
1394 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1395 * if lowest_unlock is 1, level 0 won't be unlocked
1397 static noinline
void unlock_up(struct btrfs_path
*path
, int level
,
1401 int skip_level
= level
;
1403 struct extent_buffer
*t
;
1405 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1406 if (!path
->nodes
[i
])
1408 if (!path
->locks
[i
])
1410 if (!no_skips
&& path
->slots
[i
] == 0) {
1414 if (!no_skips
&& path
->keep_locks
) {
1417 nritems
= btrfs_header_nritems(t
);
1418 if (nritems
< 1 || path
->slots
[i
] >= nritems
- 1) {
1423 if (skip_level
< i
&& i
>= lowest_unlock
)
1427 if (i
>= lowest_unlock
&& i
> skip_level
&& path
->locks
[i
]) {
1428 btrfs_tree_unlock_rw(t
, path
->locks
[i
]);
1435 * This releases any locks held in the path starting at level and
1436 * going all the way up to the root.
1438 * btrfs_search_slot will keep the lock held on higher nodes in a few
1439 * corner cases, such as COW of the block at slot zero in the node. This
1440 * ignores those rules, and it should only be called when there are no
1441 * more updates to be done higher up in the tree.
1443 noinline
void btrfs_unlock_up_safe(struct btrfs_path
*path
, int level
)
1447 if (path
->keep_locks
)
1450 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1451 if (!path
->nodes
[i
])
1453 if (!path
->locks
[i
])
1455 btrfs_tree_unlock_rw(path
->nodes
[i
], path
->locks
[i
]);
1461 * helper function for btrfs_search_slot. The goal is to find a block
1462 * in cache without setting the path to blocking. If we find the block
1463 * we return zero and the path is unchanged.
1465 * If we can't find the block, we set the path blocking and do some
1466 * reada. -EAGAIN is returned and the search must be repeated.
1469 read_block_for_search(struct btrfs_trans_handle
*trans
,
1470 struct btrfs_root
*root
, struct btrfs_path
*p
,
1471 struct extent_buffer
**eb_ret
, int level
, int slot
,
1472 struct btrfs_key
*key
)
1477 struct extent_buffer
*b
= *eb_ret
;
1478 struct extent_buffer
*tmp
;
1481 blocknr
= btrfs_node_blockptr(b
, slot
);
1482 gen
= btrfs_node_ptr_generation(b
, slot
);
1483 blocksize
= btrfs_level_size(root
, level
- 1);
1485 tmp
= btrfs_find_tree_block(root
, blocknr
, blocksize
);
1487 if (btrfs_buffer_uptodate(tmp
, 0)) {
1488 if (btrfs_buffer_uptodate(tmp
, gen
)) {
1490 * we found an up to date block without
1497 /* the pages were up to date, but we failed
1498 * the generation number check. Do a full
1499 * read for the generation number that is correct.
1500 * We must do this without dropping locks so
1501 * we can trust our generation number
1503 free_extent_buffer(tmp
);
1504 btrfs_set_path_blocking(p
);
1506 tmp
= read_tree_block(root
, blocknr
, blocksize
, gen
);
1507 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
1511 free_extent_buffer(tmp
);
1512 btrfs_release_path(p
);
1518 * reduce lock contention at high levels
1519 * of the btree by dropping locks before
1520 * we read. Don't release the lock on the current
1521 * level because we need to walk this node to figure
1522 * out which blocks to read.
1524 btrfs_unlock_up_safe(p
, level
+ 1);
1525 btrfs_set_path_blocking(p
);
1527 free_extent_buffer(tmp
);
1529 reada_for_search(root
, p
, level
, slot
, key
->objectid
);
1531 btrfs_release_path(p
);
1534 tmp
= read_tree_block(root
, blocknr
, blocksize
, 0);
1537 * If the read above didn't mark this buffer up to date,
1538 * it will never end up being up to date. Set ret to EIO now
1539 * and give up so that our caller doesn't loop forever
1542 if (!btrfs_buffer_uptodate(tmp
, 0))
1544 free_extent_buffer(tmp
);
1550 * helper function for btrfs_search_slot. This does all of the checks
1551 * for node-level blocks and does any balancing required based on
1554 * If no extra work was required, zero is returned. If we had to
1555 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1559 setup_nodes_for_search(struct btrfs_trans_handle
*trans
,
1560 struct btrfs_root
*root
, struct btrfs_path
*p
,
1561 struct extent_buffer
*b
, int level
, int ins_len
,
1562 int *write_lock_level
)
1565 if ((p
->search_for_split
|| ins_len
> 0) && btrfs_header_nritems(b
) >=
1566 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3) {
1569 if (*write_lock_level
< level
+ 1) {
1570 *write_lock_level
= level
+ 1;
1571 btrfs_release_path(p
);
1575 sret
= reada_for_balance(root
, p
, level
);
1579 btrfs_set_path_blocking(p
);
1580 sret
= split_node(trans
, root
, p
, level
);
1581 btrfs_clear_path_blocking(p
, NULL
, 0);
1588 b
= p
->nodes
[level
];
1589 } else if (ins_len
< 0 && btrfs_header_nritems(b
) <
1590 BTRFS_NODEPTRS_PER_BLOCK(root
) / 2) {
1593 if (*write_lock_level
< level
+ 1) {
1594 *write_lock_level
= level
+ 1;
1595 btrfs_release_path(p
);
1599 sret
= reada_for_balance(root
, p
, level
);
1603 btrfs_set_path_blocking(p
);
1604 sret
= balance_level(trans
, root
, p
, level
);
1605 btrfs_clear_path_blocking(p
, NULL
, 0);
1611 b
= p
->nodes
[level
];
1613 btrfs_release_path(p
);
1616 BUG_ON(btrfs_header_nritems(b
) == 1);
1627 * look for key in the tree. path is filled in with nodes along the way
1628 * if key is found, we return zero and you can find the item in the leaf
1629 * level of the path (level 0)
1631 * If the key isn't found, the path points to the slot where it should
1632 * be inserted, and 1 is returned. If there are other errors during the
1633 * search a negative error number is returned.
1635 * if ins_len > 0, nodes and leaves will be split as we walk down the
1636 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1639 int btrfs_search_slot(struct btrfs_trans_handle
*trans
, struct btrfs_root
1640 *root
, struct btrfs_key
*key
, struct btrfs_path
*p
, int
1643 struct extent_buffer
*b
;
1648 int lowest_unlock
= 1;
1650 /* everything at write_lock_level or lower must be write locked */
1651 int write_lock_level
= 0;
1652 u8 lowest_level
= 0;
1654 lowest_level
= p
->lowest_level
;
1655 WARN_ON(lowest_level
&& ins_len
> 0);
1656 WARN_ON(p
->nodes
[0] != NULL
);
1661 /* when we are removing items, we might have to go up to level
1662 * two as we update tree pointers Make sure we keep write
1663 * for those levels as well
1665 write_lock_level
= 2;
1666 } else if (ins_len
> 0) {
1668 * for inserting items, make sure we have a write lock on
1669 * level 1 so we can update keys
1671 write_lock_level
= 1;
1675 write_lock_level
= -1;
1677 if (cow
&& (p
->keep_locks
|| p
->lowest_level
))
1678 write_lock_level
= BTRFS_MAX_LEVEL
;
1682 * we try very hard to do read locks on the root
1684 root_lock
= BTRFS_READ_LOCK
;
1686 if (p
->search_commit_root
) {
1688 * the commit roots are read only
1689 * so we always do read locks
1691 b
= root
->commit_root
;
1692 extent_buffer_get(b
);
1693 level
= btrfs_header_level(b
);
1694 if (!p
->skip_locking
)
1695 btrfs_tree_read_lock(b
);
1697 if (p
->skip_locking
) {
1698 b
= btrfs_root_node(root
);
1699 level
= btrfs_header_level(b
);
1701 /* we don't know the level of the root node
1702 * until we actually have it read locked
1704 b
= btrfs_read_lock_root_node(root
);
1705 level
= btrfs_header_level(b
);
1706 if (level
<= write_lock_level
) {
1707 /* whoops, must trade for write lock */
1708 btrfs_tree_read_unlock(b
);
1709 free_extent_buffer(b
);
1710 b
= btrfs_lock_root_node(root
);
1711 root_lock
= BTRFS_WRITE_LOCK
;
1713 /* the level might have changed, check again */
1714 level
= btrfs_header_level(b
);
1718 p
->nodes
[level
] = b
;
1719 if (!p
->skip_locking
)
1720 p
->locks
[level
] = root_lock
;
1723 level
= btrfs_header_level(b
);
1726 * setup the path here so we can release it under lock
1727 * contention with the cow code
1731 * if we don't really need to cow this block
1732 * then we don't want to set the path blocking,
1733 * so we test it here
1735 if (!should_cow_block(trans
, root
, b
))
1738 btrfs_set_path_blocking(p
);
1741 * must have write locks on this node and the
1744 if (level
+ 1 > write_lock_level
) {
1745 write_lock_level
= level
+ 1;
1746 btrfs_release_path(p
);
1750 err
= btrfs_cow_block(trans
, root
, b
,
1751 p
->nodes
[level
+ 1],
1752 p
->slots
[level
+ 1], &b
);
1759 BUG_ON(!cow
&& ins_len
);
1761 p
->nodes
[level
] = b
;
1762 btrfs_clear_path_blocking(p
, NULL
, 0);
1765 * we have a lock on b and as long as we aren't changing
1766 * the tree, there is no way to for the items in b to change.
1767 * It is safe to drop the lock on our parent before we
1768 * go through the expensive btree search on b.
1770 * If cow is true, then we might be changing slot zero,
1771 * which may require changing the parent. So, we can't
1772 * drop the lock until after we know which slot we're
1776 btrfs_unlock_up_safe(p
, level
+ 1);
1778 ret
= bin_search(b
, key
, level
, &slot
);
1782 if (ret
&& slot
> 0) {
1786 p
->slots
[level
] = slot
;
1787 err
= setup_nodes_for_search(trans
, root
, p
, b
, level
,
1788 ins_len
, &write_lock_level
);
1795 b
= p
->nodes
[level
];
1796 slot
= p
->slots
[level
];
1799 * slot 0 is special, if we change the key
1800 * we have to update the parent pointer
1801 * which means we must have a write lock
1804 if (slot
== 0 && cow
&&
1805 write_lock_level
< level
+ 1) {
1806 write_lock_level
= level
+ 1;
1807 btrfs_release_path(p
);
1811 unlock_up(p
, level
, lowest_unlock
);
1813 if (level
== lowest_level
) {
1819 err
= read_block_for_search(trans
, root
, p
,
1820 &b
, level
, slot
, key
);
1828 if (!p
->skip_locking
) {
1829 level
= btrfs_header_level(b
);
1830 if (level
<= write_lock_level
) {
1831 err
= btrfs_try_tree_write_lock(b
);
1833 btrfs_set_path_blocking(p
);
1835 btrfs_clear_path_blocking(p
, b
,
1838 p
->locks
[level
] = BTRFS_WRITE_LOCK
;
1840 err
= btrfs_try_tree_read_lock(b
);
1842 btrfs_set_path_blocking(p
);
1843 btrfs_tree_read_lock(b
);
1844 btrfs_clear_path_blocking(p
, b
,
1847 p
->locks
[level
] = BTRFS_READ_LOCK
;
1849 p
->nodes
[level
] = b
;
1852 p
->slots
[level
] = slot
;
1854 btrfs_leaf_free_space(root
, b
) < ins_len
) {
1855 if (write_lock_level
< 1) {
1856 write_lock_level
= 1;
1857 btrfs_release_path(p
);
1861 btrfs_set_path_blocking(p
);
1862 err
= split_leaf(trans
, root
, key
,
1863 p
, ins_len
, ret
== 0);
1864 btrfs_clear_path_blocking(p
, NULL
, 0);
1872 if (!p
->search_for_split
)
1873 unlock_up(p
, level
, lowest_unlock
);
1880 * we don't really know what they plan on doing with the path
1881 * from here on, so for now just mark it as blocking
1883 if (!p
->leave_spinning
)
1884 btrfs_set_path_blocking(p
);
1886 btrfs_release_path(p
);
1891 * adjust the pointers going up the tree, starting at level
1892 * making sure the right key of each node is points to 'key'.
1893 * This is used after shifting pointers to the left, so it stops
1894 * fixing up pointers when a given leaf/node is not in slot 0 of the
1897 * If this fails to write a tree block, it returns -1, but continues
1898 * fixing up the blocks in ram so the tree is consistent.
1900 static int fixup_low_keys(struct btrfs_trans_handle
*trans
,
1901 struct btrfs_root
*root
, struct btrfs_path
*path
,
1902 struct btrfs_disk_key
*key
, int level
)
1906 struct extent_buffer
*t
;
1908 for (i
= level
; i
< BTRFS_MAX_LEVEL
; i
++) {
1909 int tslot
= path
->slots
[i
];
1910 if (!path
->nodes
[i
])
1913 btrfs_set_node_key(t
, key
, tslot
);
1914 btrfs_mark_buffer_dirty(path
->nodes
[i
]);
1924 * This function isn't completely safe. It's the caller's responsibility
1925 * that the new key won't break the order
1927 int btrfs_set_item_key_safe(struct btrfs_trans_handle
*trans
,
1928 struct btrfs_root
*root
, struct btrfs_path
*path
,
1929 struct btrfs_key
*new_key
)
1931 struct btrfs_disk_key disk_key
;
1932 struct extent_buffer
*eb
;
1935 eb
= path
->nodes
[0];
1936 slot
= path
->slots
[0];
1938 btrfs_item_key(eb
, &disk_key
, slot
- 1);
1939 if (comp_keys(&disk_key
, new_key
) >= 0)
1942 if (slot
< btrfs_header_nritems(eb
) - 1) {
1943 btrfs_item_key(eb
, &disk_key
, slot
+ 1);
1944 if (comp_keys(&disk_key
, new_key
) <= 0)
1948 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
1949 btrfs_set_item_key(eb
, &disk_key
, slot
);
1950 btrfs_mark_buffer_dirty(eb
);
1952 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
1957 * try to push data from one node into the next node left in the
1960 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1961 * error, and > 0 if there was no room in the left hand block.
1963 static int push_node_left(struct btrfs_trans_handle
*trans
,
1964 struct btrfs_root
*root
, struct extent_buffer
*dst
,
1965 struct extent_buffer
*src
, int empty
)
1972 src_nritems
= btrfs_header_nritems(src
);
1973 dst_nritems
= btrfs_header_nritems(dst
);
1974 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
1975 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
1976 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
1978 if (!empty
&& src_nritems
<= 8)
1981 if (push_items
<= 0)
1985 push_items
= min(src_nritems
, push_items
);
1986 if (push_items
< src_nritems
) {
1987 /* leave at least 8 pointers in the node if
1988 * we aren't going to empty it
1990 if (src_nritems
- push_items
< 8) {
1991 if (push_items
<= 8)
1997 push_items
= min(src_nritems
- 8, push_items
);
1999 copy_extent_buffer(dst
, src
,
2000 btrfs_node_key_ptr_offset(dst_nritems
),
2001 btrfs_node_key_ptr_offset(0),
2002 push_items
* sizeof(struct btrfs_key_ptr
));
2004 if (push_items
< src_nritems
) {
2005 memmove_extent_buffer(src
, btrfs_node_key_ptr_offset(0),
2006 btrfs_node_key_ptr_offset(push_items
),
2007 (src_nritems
- push_items
) *
2008 sizeof(struct btrfs_key_ptr
));
2010 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2011 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2012 btrfs_mark_buffer_dirty(src
);
2013 btrfs_mark_buffer_dirty(dst
);
2019 * try to push data from one node into the next node right in the
2022 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2023 * error, and > 0 if there was no room in the right hand block.
2025 * this will only push up to 1/2 the contents of the left node over
2027 static int balance_node_right(struct btrfs_trans_handle
*trans
,
2028 struct btrfs_root
*root
,
2029 struct extent_buffer
*dst
,
2030 struct extent_buffer
*src
)
2038 WARN_ON(btrfs_header_generation(src
) != trans
->transid
);
2039 WARN_ON(btrfs_header_generation(dst
) != trans
->transid
);
2041 src_nritems
= btrfs_header_nritems(src
);
2042 dst_nritems
= btrfs_header_nritems(dst
);
2043 push_items
= BTRFS_NODEPTRS_PER_BLOCK(root
) - dst_nritems
;
2044 if (push_items
<= 0)
2047 if (src_nritems
< 4)
2050 max_push
= src_nritems
/ 2 + 1;
2051 /* don't try to empty the node */
2052 if (max_push
>= src_nritems
)
2055 if (max_push
< push_items
)
2056 push_items
= max_push
;
2058 memmove_extent_buffer(dst
, btrfs_node_key_ptr_offset(push_items
),
2059 btrfs_node_key_ptr_offset(0),
2061 sizeof(struct btrfs_key_ptr
));
2063 copy_extent_buffer(dst
, src
,
2064 btrfs_node_key_ptr_offset(0),
2065 btrfs_node_key_ptr_offset(src_nritems
- push_items
),
2066 push_items
* sizeof(struct btrfs_key_ptr
));
2068 btrfs_set_header_nritems(src
, src_nritems
- push_items
);
2069 btrfs_set_header_nritems(dst
, dst_nritems
+ push_items
);
2071 btrfs_mark_buffer_dirty(src
);
2072 btrfs_mark_buffer_dirty(dst
);
2078 * helper function to insert a new root level in the tree.
2079 * A new node is allocated, and a single item is inserted to
2080 * point to the existing root
2082 * returns zero on success or < 0 on failure.
2084 static noinline
int insert_new_root(struct btrfs_trans_handle
*trans
,
2085 struct btrfs_root
*root
,
2086 struct btrfs_path
*path
, int level
)
2089 struct extent_buffer
*lower
;
2090 struct extent_buffer
*c
;
2091 struct extent_buffer
*old
;
2092 struct btrfs_disk_key lower_key
;
2094 BUG_ON(path
->nodes
[level
]);
2095 BUG_ON(path
->nodes
[level
-1] != root
->node
);
2097 lower
= path
->nodes
[level
-1];
2099 btrfs_item_key(lower
, &lower_key
, 0);
2101 btrfs_node_key(lower
, &lower_key
, 0);
2103 c
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2104 root
->root_key
.objectid
, &lower_key
,
2105 level
, root
->node
->start
, 0, 0);
2109 root_add_used(root
, root
->nodesize
);
2111 memset_extent_buffer(c
, 0, 0, sizeof(struct btrfs_header
));
2112 btrfs_set_header_nritems(c
, 1);
2113 btrfs_set_header_level(c
, level
);
2114 btrfs_set_header_bytenr(c
, c
->start
);
2115 btrfs_set_header_generation(c
, trans
->transid
);
2116 btrfs_set_header_backref_rev(c
, BTRFS_MIXED_BACKREF_REV
);
2117 btrfs_set_header_owner(c
, root
->root_key
.objectid
);
2119 write_extent_buffer(c
, root
->fs_info
->fsid
,
2120 (unsigned long)btrfs_header_fsid(c
),
2123 write_extent_buffer(c
, root
->fs_info
->chunk_tree_uuid
,
2124 (unsigned long)btrfs_header_chunk_tree_uuid(c
),
2127 btrfs_set_node_key(c
, &lower_key
, 0);
2128 btrfs_set_node_blockptr(c
, 0, lower
->start
);
2129 lower_gen
= btrfs_header_generation(lower
);
2130 WARN_ON(lower_gen
!= trans
->transid
);
2132 btrfs_set_node_ptr_generation(c
, 0, lower_gen
);
2134 btrfs_mark_buffer_dirty(c
);
2137 rcu_assign_pointer(root
->node
, c
);
2139 /* the super has an extra ref to root->node */
2140 free_extent_buffer(old
);
2142 add_root_to_dirty_list(root
);
2143 extent_buffer_get(c
);
2144 path
->nodes
[level
] = c
;
2145 path
->locks
[level
] = BTRFS_WRITE_LOCK
;
2146 path
->slots
[level
] = 0;
2151 * worker function to insert a single pointer in a node.
2152 * the node should have enough room for the pointer already
2154 * slot and level indicate where you want the key to go, and
2155 * blocknr is the block the key points to.
2157 * returns zero on success and < 0 on any error
2159 static int insert_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
2160 *root
, struct btrfs_path
*path
, struct btrfs_disk_key
2161 *key
, u64 bytenr
, int slot
, int level
)
2163 struct extent_buffer
*lower
;
2166 BUG_ON(!path
->nodes
[level
]);
2167 btrfs_assert_tree_locked(path
->nodes
[level
]);
2168 lower
= path
->nodes
[level
];
2169 nritems
= btrfs_header_nritems(lower
);
2170 BUG_ON(slot
> nritems
);
2171 if (nritems
== BTRFS_NODEPTRS_PER_BLOCK(root
))
2173 if (slot
!= nritems
) {
2174 memmove_extent_buffer(lower
,
2175 btrfs_node_key_ptr_offset(slot
+ 1),
2176 btrfs_node_key_ptr_offset(slot
),
2177 (nritems
- slot
) * sizeof(struct btrfs_key_ptr
));
2179 btrfs_set_node_key(lower
, key
, slot
);
2180 btrfs_set_node_blockptr(lower
, slot
, bytenr
);
2181 WARN_ON(trans
->transid
== 0);
2182 btrfs_set_node_ptr_generation(lower
, slot
, trans
->transid
);
2183 btrfs_set_header_nritems(lower
, nritems
+ 1);
2184 btrfs_mark_buffer_dirty(lower
);
2189 * split the node at the specified level in path in two.
2190 * The path is corrected to point to the appropriate node after the split
2192 * Before splitting this tries to make some room in the node by pushing
2193 * left and right, if either one works, it returns right away.
2195 * returns 0 on success and < 0 on failure
2197 static noinline
int split_node(struct btrfs_trans_handle
*trans
,
2198 struct btrfs_root
*root
,
2199 struct btrfs_path
*path
, int level
)
2201 struct extent_buffer
*c
;
2202 struct extent_buffer
*split
;
2203 struct btrfs_disk_key disk_key
;
2209 c
= path
->nodes
[level
];
2210 WARN_ON(btrfs_header_generation(c
) != trans
->transid
);
2211 if (c
== root
->node
) {
2212 /* trying to split the root, lets make a new one */
2213 ret
= insert_new_root(trans
, root
, path
, level
+ 1);
2217 ret
= push_nodes_for_insert(trans
, root
, path
, level
);
2218 c
= path
->nodes
[level
];
2219 if (!ret
&& btrfs_header_nritems(c
) <
2220 BTRFS_NODEPTRS_PER_BLOCK(root
) - 3)
2226 c_nritems
= btrfs_header_nritems(c
);
2227 mid
= (c_nritems
+ 1) / 2;
2228 btrfs_node_key(c
, &disk_key
, mid
);
2230 split
= btrfs_alloc_free_block(trans
, root
, root
->nodesize
, 0,
2231 root
->root_key
.objectid
,
2232 &disk_key
, level
, c
->start
, 0, 0);
2234 return PTR_ERR(split
);
2236 root_add_used(root
, root
->nodesize
);
2238 memset_extent_buffer(split
, 0, 0, sizeof(struct btrfs_header
));
2239 btrfs_set_header_level(split
, btrfs_header_level(c
));
2240 btrfs_set_header_bytenr(split
, split
->start
);
2241 btrfs_set_header_generation(split
, trans
->transid
);
2242 btrfs_set_header_backref_rev(split
, BTRFS_MIXED_BACKREF_REV
);
2243 btrfs_set_header_owner(split
, root
->root_key
.objectid
);
2244 write_extent_buffer(split
, root
->fs_info
->fsid
,
2245 (unsigned long)btrfs_header_fsid(split
),
2247 write_extent_buffer(split
, root
->fs_info
->chunk_tree_uuid
,
2248 (unsigned long)btrfs_header_chunk_tree_uuid(split
),
2252 copy_extent_buffer(split
, c
,
2253 btrfs_node_key_ptr_offset(0),
2254 btrfs_node_key_ptr_offset(mid
),
2255 (c_nritems
- mid
) * sizeof(struct btrfs_key_ptr
));
2256 btrfs_set_header_nritems(split
, c_nritems
- mid
);
2257 btrfs_set_header_nritems(c
, mid
);
2260 btrfs_mark_buffer_dirty(c
);
2261 btrfs_mark_buffer_dirty(split
);
2263 wret
= insert_ptr(trans
, root
, path
, &disk_key
, split
->start
,
2264 path
->slots
[level
+ 1] + 1,
2269 if (path
->slots
[level
] >= mid
) {
2270 path
->slots
[level
] -= mid
;
2271 btrfs_tree_unlock(c
);
2272 free_extent_buffer(c
);
2273 path
->nodes
[level
] = split
;
2274 path
->slots
[level
+ 1] += 1;
2276 btrfs_tree_unlock(split
);
2277 free_extent_buffer(split
);
2283 * how many bytes are required to store the items in a leaf. start
2284 * and nr indicate which items in the leaf to check. This totals up the
2285 * space used both by the item structs and the item data
2287 static int leaf_space_used(struct extent_buffer
*l
, int start
, int nr
)
2290 int nritems
= btrfs_header_nritems(l
);
2291 int end
= min(nritems
, start
+ nr
) - 1;
2295 data_len
= btrfs_item_end_nr(l
, start
);
2296 data_len
= data_len
- btrfs_item_offset_nr(l
, end
);
2297 data_len
+= sizeof(struct btrfs_item
) * nr
;
2298 WARN_ON(data_len
< 0);
2303 * The space between the end of the leaf items and
2304 * the start of the leaf data. IOW, how much room
2305 * the leaf has left for both items and data
2307 noinline
int btrfs_leaf_free_space(struct btrfs_root
*root
,
2308 struct extent_buffer
*leaf
)
2310 int nritems
= btrfs_header_nritems(leaf
);
2312 ret
= BTRFS_LEAF_DATA_SIZE(root
) - leaf_space_used(leaf
, 0, nritems
);
2314 printk(KERN_CRIT
"leaf free space ret %d, leaf data size %lu, "
2315 "used %d nritems %d\n",
2316 ret
, (unsigned long) BTRFS_LEAF_DATA_SIZE(root
),
2317 leaf_space_used(leaf
, 0, nritems
), nritems
);
2323 * min slot controls the lowest index we're willing to push to the
2324 * right. We'll push up to and including min_slot, but no lower
2326 static noinline
int __push_leaf_right(struct btrfs_trans_handle
*trans
,
2327 struct btrfs_root
*root
,
2328 struct btrfs_path
*path
,
2329 int data_size
, int empty
,
2330 struct extent_buffer
*right
,
2331 int free_space
, u32 left_nritems
,
2334 struct extent_buffer
*left
= path
->nodes
[0];
2335 struct extent_buffer
*upper
= path
->nodes
[1];
2336 struct btrfs_disk_key disk_key
;
2341 struct btrfs_item
*item
;
2350 nr
= max_t(u32
, 1, min_slot
);
2352 if (path
->slots
[0] >= left_nritems
)
2353 push_space
+= data_size
;
2355 slot
= path
->slots
[1];
2356 i
= left_nritems
- 1;
2358 item
= btrfs_item_nr(left
, i
);
2360 if (!empty
&& push_items
> 0) {
2361 if (path
->slots
[0] > i
)
2363 if (path
->slots
[0] == i
) {
2364 int space
= btrfs_leaf_free_space(root
, left
);
2365 if (space
+ push_space
* 2 > free_space
)
2370 if (path
->slots
[0] == i
)
2371 push_space
+= data_size
;
2373 this_item_size
= btrfs_item_size(left
, item
);
2374 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2378 push_space
+= this_item_size
+ sizeof(*item
);
2384 if (push_items
== 0)
2387 if (!empty
&& push_items
== left_nritems
)
2390 /* push left to right */
2391 right_nritems
= btrfs_header_nritems(right
);
2393 push_space
= btrfs_item_end_nr(left
, left_nritems
- push_items
);
2394 push_space
-= leaf_data_end(root
, left
);
2396 /* make room in the right data area */
2397 data_end
= leaf_data_end(root
, right
);
2398 memmove_extent_buffer(right
,
2399 btrfs_leaf_data(right
) + data_end
- push_space
,
2400 btrfs_leaf_data(right
) + data_end
,
2401 BTRFS_LEAF_DATA_SIZE(root
) - data_end
);
2403 /* copy from the left data area */
2404 copy_extent_buffer(right
, left
, btrfs_leaf_data(right
) +
2405 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2406 btrfs_leaf_data(left
) + leaf_data_end(root
, left
),
2409 memmove_extent_buffer(right
, btrfs_item_nr_offset(push_items
),
2410 btrfs_item_nr_offset(0),
2411 right_nritems
* sizeof(struct btrfs_item
));
2413 /* copy the items from left to right */
2414 copy_extent_buffer(right
, left
, btrfs_item_nr_offset(0),
2415 btrfs_item_nr_offset(left_nritems
- push_items
),
2416 push_items
* sizeof(struct btrfs_item
));
2418 /* update the item pointers */
2419 right_nritems
+= push_items
;
2420 btrfs_set_header_nritems(right
, right_nritems
);
2421 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2422 for (i
= 0; i
< right_nritems
; i
++) {
2423 item
= btrfs_item_nr(right
, i
);
2424 push_space
-= btrfs_item_size(right
, item
);
2425 btrfs_set_item_offset(right
, item
, push_space
);
2428 left_nritems
-= push_items
;
2429 btrfs_set_header_nritems(left
, left_nritems
);
2432 btrfs_mark_buffer_dirty(left
);
2434 clean_tree_block(trans
, root
, left
);
2436 btrfs_mark_buffer_dirty(right
);
2438 btrfs_item_key(right
, &disk_key
, 0);
2439 btrfs_set_node_key(upper
, &disk_key
, slot
+ 1);
2440 btrfs_mark_buffer_dirty(upper
);
2442 /* then fixup the leaf pointer in the path */
2443 if (path
->slots
[0] >= left_nritems
) {
2444 path
->slots
[0] -= left_nritems
;
2445 if (btrfs_header_nritems(path
->nodes
[0]) == 0)
2446 clean_tree_block(trans
, root
, path
->nodes
[0]);
2447 btrfs_tree_unlock(path
->nodes
[0]);
2448 free_extent_buffer(path
->nodes
[0]);
2449 path
->nodes
[0] = right
;
2450 path
->slots
[1] += 1;
2452 btrfs_tree_unlock(right
);
2453 free_extent_buffer(right
);
2458 btrfs_tree_unlock(right
);
2459 free_extent_buffer(right
);
2464 * push some data in the path leaf to the right, trying to free up at
2465 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2467 * returns 1 if the push failed because the other node didn't have enough
2468 * room, 0 if everything worked out and < 0 if there were major errors.
2470 * this will push starting from min_slot to the end of the leaf. It won't
2471 * push any slot lower than min_slot
2473 static int push_leaf_right(struct btrfs_trans_handle
*trans
, struct btrfs_root
2474 *root
, struct btrfs_path
*path
,
2475 int min_data_size
, int data_size
,
2476 int empty
, u32 min_slot
)
2478 struct extent_buffer
*left
= path
->nodes
[0];
2479 struct extent_buffer
*right
;
2480 struct extent_buffer
*upper
;
2486 if (!path
->nodes
[1])
2489 slot
= path
->slots
[1];
2490 upper
= path
->nodes
[1];
2491 if (slot
>= btrfs_header_nritems(upper
) - 1)
2494 btrfs_assert_tree_locked(path
->nodes
[1]);
2496 right
= read_node_slot(root
, upper
, slot
+ 1);
2500 btrfs_tree_lock(right
);
2501 btrfs_set_lock_blocking(right
);
2503 free_space
= btrfs_leaf_free_space(root
, right
);
2504 if (free_space
< data_size
)
2507 /* cow and double check */
2508 ret
= btrfs_cow_block(trans
, root
, right
, upper
,
2513 free_space
= btrfs_leaf_free_space(root
, right
);
2514 if (free_space
< data_size
)
2517 left_nritems
= btrfs_header_nritems(left
);
2518 if (left_nritems
== 0)
2521 return __push_leaf_right(trans
, root
, path
, min_data_size
, empty
,
2522 right
, free_space
, left_nritems
, min_slot
);
2524 btrfs_tree_unlock(right
);
2525 free_extent_buffer(right
);
2530 * push some data in the path leaf to the left, trying to free up at
2531 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2533 * max_slot can put a limit on how far into the leaf we'll push items. The
2534 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2537 static noinline
int __push_leaf_left(struct btrfs_trans_handle
*trans
,
2538 struct btrfs_root
*root
,
2539 struct btrfs_path
*path
, int data_size
,
2540 int empty
, struct extent_buffer
*left
,
2541 int free_space
, u32 right_nritems
,
2544 struct btrfs_disk_key disk_key
;
2545 struct extent_buffer
*right
= path
->nodes
[0];
2549 struct btrfs_item
*item
;
2550 u32 old_left_nritems
;
2555 u32 old_left_item_size
;
2558 nr
= min(right_nritems
, max_slot
);
2560 nr
= min(right_nritems
- 1, max_slot
);
2562 for (i
= 0; i
< nr
; i
++) {
2563 item
= btrfs_item_nr(right
, i
);
2565 if (!empty
&& push_items
> 0) {
2566 if (path
->slots
[0] < i
)
2568 if (path
->slots
[0] == i
) {
2569 int space
= btrfs_leaf_free_space(root
, right
);
2570 if (space
+ push_space
* 2 > free_space
)
2575 if (path
->slots
[0] == i
)
2576 push_space
+= data_size
;
2578 this_item_size
= btrfs_item_size(right
, item
);
2579 if (this_item_size
+ sizeof(*item
) + push_space
> free_space
)
2583 push_space
+= this_item_size
+ sizeof(*item
);
2586 if (push_items
== 0) {
2590 if (!empty
&& push_items
== btrfs_header_nritems(right
))
2593 /* push data from right to left */
2594 copy_extent_buffer(left
, right
,
2595 btrfs_item_nr_offset(btrfs_header_nritems(left
)),
2596 btrfs_item_nr_offset(0),
2597 push_items
* sizeof(struct btrfs_item
));
2599 push_space
= BTRFS_LEAF_DATA_SIZE(root
) -
2600 btrfs_item_offset_nr(right
, push_items
- 1);
2602 copy_extent_buffer(left
, right
, btrfs_leaf_data(left
) +
2603 leaf_data_end(root
, left
) - push_space
,
2604 btrfs_leaf_data(right
) +
2605 btrfs_item_offset_nr(right
, push_items
- 1),
2607 old_left_nritems
= btrfs_header_nritems(left
);
2608 BUG_ON(old_left_nritems
<= 0);
2610 old_left_item_size
= btrfs_item_offset_nr(left
, old_left_nritems
- 1);
2611 for (i
= old_left_nritems
; i
< old_left_nritems
+ push_items
; i
++) {
2614 item
= btrfs_item_nr(left
, i
);
2616 ioff
= btrfs_item_offset(left
, item
);
2617 btrfs_set_item_offset(left
, item
,
2618 ioff
- (BTRFS_LEAF_DATA_SIZE(root
) - old_left_item_size
));
2620 btrfs_set_header_nritems(left
, old_left_nritems
+ push_items
);
2622 /* fixup right node */
2623 if (push_items
> right_nritems
) {
2624 printk(KERN_CRIT
"push items %d nr %u\n", push_items
,
2629 if (push_items
< right_nritems
) {
2630 push_space
= btrfs_item_offset_nr(right
, push_items
- 1) -
2631 leaf_data_end(root
, right
);
2632 memmove_extent_buffer(right
, btrfs_leaf_data(right
) +
2633 BTRFS_LEAF_DATA_SIZE(root
) - push_space
,
2634 btrfs_leaf_data(right
) +
2635 leaf_data_end(root
, right
), push_space
);
2637 memmove_extent_buffer(right
, btrfs_item_nr_offset(0),
2638 btrfs_item_nr_offset(push_items
),
2639 (btrfs_header_nritems(right
) - push_items
) *
2640 sizeof(struct btrfs_item
));
2642 right_nritems
-= push_items
;
2643 btrfs_set_header_nritems(right
, right_nritems
);
2644 push_space
= BTRFS_LEAF_DATA_SIZE(root
);
2645 for (i
= 0; i
< right_nritems
; i
++) {
2646 item
= btrfs_item_nr(right
, i
);
2648 push_space
= push_space
- btrfs_item_size(right
, item
);
2649 btrfs_set_item_offset(right
, item
, push_space
);
2652 btrfs_mark_buffer_dirty(left
);
2654 btrfs_mark_buffer_dirty(right
);
2656 clean_tree_block(trans
, root
, right
);
2658 btrfs_item_key(right
, &disk_key
, 0);
2659 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
2663 /* then fixup the leaf pointer in the path */
2664 if (path
->slots
[0] < push_items
) {
2665 path
->slots
[0] += old_left_nritems
;
2666 btrfs_tree_unlock(path
->nodes
[0]);
2667 free_extent_buffer(path
->nodes
[0]);
2668 path
->nodes
[0] = left
;
2669 path
->slots
[1] -= 1;
2671 btrfs_tree_unlock(left
);
2672 free_extent_buffer(left
);
2673 path
->slots
[0] -= push_items
;
2675 BUG_ON(path
->slots
[0] < 0);
2678 btrfs_tree_unlock(left
);
2679 free_extent_buffer(left
);
2684 * push some data in the path leaf to the left, trying to free up at
2685 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2687 * max_slot can put a limit on how far into the leaf we'll push items. The
2688 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
2691 static int push_leaf_left(struct btrfs_trans_handle
*trans
, struct btrfs_root
2692 *root
, struct btrfs_path
*path
, int min_data_size
,
2693 int data_size
, int empty
, u32 max_slot
)
2695 struct extent_buffer
*right
= path
->nodes
[0];
2696 struct extent_buffer
*left
;
2702 slot
= path
->slots
[1];
2705 if (!path
->nodes
[1])
2708 right_nritems
= btrfs_header_nritems(right
);
2709 if (right_nritems
== 0)
2712 btrfs_assert_tree_locked(path
->nodes
[1]);
2714 left
= read_node_slot(root
, path
->nodes
[1], slot
- 1);
2718 btrfs_tree_lock(left
);
2719 btrfs_set_lock_blocking(left
);
2721 free_space
= btrfs_leaf_free_space(root
, left
);
2722 if (free_space
< data_size
) {
2727 /* cow and double check */
2728 ret
= btrfs_cow_block(trans
, root
, left
,
2729 path
->nodes
[1], slot
- 1, &left
);
2731 /* we hit -ENOSPC, but it isn't fatal here */
2736 free_space
= btrfs_leaf_free_space(root
, left
);
2737 if (free_space
< data_size
) {
2742 return __push_leaf_left(trans
, root
, path
, min_data_size
,
2743 empty
, left
, free_space
, right_nritems
,
2746 btrfs_tree_unlock(left
);
2747 free_extent_buffer(left
);
2752 * split the path's leaf in two, making sure there is at least data_size
2753 * available for the resulting leaf level of the path.
2755 * returns 0 if all went well and < 0 on failure.
2757 static noinline
int copy_for_split(struct btrfs_trans_handle
*trans
,
2758 struct btrfs_root
*root
,
2759 struct btrfs_path
*path
,
2760 struct extent_buffer
*l
,
2761 struct extent_buffer
*right
,
2762 int slot
, int mid
, int nritems
)
2769 struct btrfs_disk_key disk_key
;
2771 nritems
= nritems
- mid
;
2772 btrfs_set_header_nritems(right
, nritems
);
2773 data_copy_size
= btrfs_item_end_nr(l
, mid
) - leaf_data_end(root
, l
);
2775 copy_extent_buffer(right
, l
, btrfs_item_nr_offset(0),
2776 btrfs_item_nr_offset(mid
),
2777 nritems
* sizeof(struct btrfs_item
));
2779 copy_extent_buffer(right
, l
,
2780 btrfs_leaf_data(right
) + BTRFS_LEAF_DATA_SIZE(root
) -
2781 data_copy_size
, btrfs_leaf_data(l
) +
2782 leaf_data_end(root
, l
), data_copy_size
);
2784 rt_data_off
= BTRFS_LEAF_DATA_SIZE(root
) -
2785 btrfs_item_end_nr(l
, mid
);
2787 for (i
= 0; i
< nritems
; i
++) {
2788 struct btrfs_item
*item
= btrfs_item_nr(right
, i
);
2791 ioff
= btrfs_item_offset(right
, item
);
2792 btrfs_set_item_offset(right
, item
, ioff
+ rt_data_off
);
2795 btrfs_set_header_nritems(l
, mid
);
2797 btrfs_item_key(right
, &disk_key
, 0);
2798 wret
= insert_ptr(trans
, root
, path
, &disk_key
, right
->start
,
2799 path
->slots
[1] + 1, 1);
2803 btrfs_mark_buffer_dirty(right
);
2804 btrfs_mark_buffer_dirty(l
);
2805 BUG_ON(path
->slots
[0] != slot
);
2808 btrfs_tree_unlock(path
->nodes
[0]);
2809 free_extent_buffer(path
->nodes
[0]);
2810 path
->nodes
[0] = right
;
2811 path
->slots
[0] -= mid
;
2812 path
->slots
[1] += 1;
2814 btrfs_tree_unlock(right
);
2815 free_extent_buffer(right
);
2818 BUG_ON(path
->slots
[0] < 0);
2824 * double splits happen when we need to insert a big item in the middle
2825 * of a leaf. A double split can leave us with 3 mostly empty leaves:
2826 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
2829 * We avoid this by trying to push the items on either side of our target
2830 * into the adjacent leaves. If all goes well we can avoid the double split
2833 static noinline
int push_for_double_split(struct btrfs_trans_handle
*trans
,
2834 struct btrfs_root
*root
,
2835 struct btrfs_path
*path
,
2843 slot
= path
->slots
[0];
2846 * try to push all the items after our slot into the
2849 ret
= push_leaf_right(trans
, root
, path
, 1, data_size
, 0, slot
);
2856 nritems
= btrfs_header_nritems(path
->nodes
[0]);
2858 * our goal is to get our slot at the start or end of a leaf. If
2859 * we've done so we're done
2861 if (path
->slots
[0] == 0 || path
->slots
[0] == nritems
)
2864 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
2867 /* try to push all the items before our slot into the next leaf */
2868 slot
= path
->slots
[0];
2869 ret
= push_leaf_left(trans
, root
, path
, 1, data_size
, 0, slot
);
2882 * split the path's leaf in two, making sure there is at least data_size
2883 * available for the resulting leaf level of the path.
2885 * returns 0 if all went well and < 0 on failure.
2887 static noinline
int split_leaf(struct btrfs_trans_handle
*trans
,
2888 struct btrfs_root
*root
,
2889 struct btrfs_key
*ins_key
,
2890 struct btrfs_path
*path
, int data_size
,
2893 struct btrfs_disk_key disk_key
;
2894 struct extent_buffer
*l
;
2898 struct extent_buffer
*right
;
2902 int num_doubles
= 0;
2903 int tried_avoid_double
= 0;
2906 slot
= path
->slots
[0];
2907 if (extend
&& data_size
+ btrfs_item_size_nr(l
, slot
) +
2908 sizeof(struct btrfs_item
) > BTRFS_LEAF_DATA_SIZE(root
))
2911 /* first try to make some room by pushing left and right */
2913 wret
= push_leaf_right(trans
, root
, path
, data_size
,
2918 wret
= push_leaf_left(trans
, root
, path
, data_size
,
2919 data_size
, 0, (u32
)-1);
2925 /* did the pushes work? */
2926 if (btrfs_leaf_free_space(root
, l
) >= data_size
)
2930 if (!path
->nodes
[1]) {
2931 ret
= insert_new_root(trans
, root
, path
, 1);
2938 slot
= path
->slots
[0];
2939 nritems
= btrfs_header_nritems(l
);
2940 mid
= (nritems
+ 1) / 2;
2944 leaf_space_used(l
, mid
, nritems
- mid
) + data_size
>
2945 BTRFS_LEAF_DATA_SIZE(root
)) {
2946 if (slot
>= nritems
) {
2950 if (mid
!= nritems
&&
2951 leaf_space_used(l
, mid
, nritems
- mid
) +
2952 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
2953 if (data_size
&& !tried_avoid_double
)
2954 goto push_for_double
;
2960 if (leaf_space_used(l
, 0, mid
) + data_size
>
2961 BTRFS_LEAF_DATA_SIZE(root
)) {
2962 if (!extend
&& data_size
&& slot
== 0) {
2964 } else if ((extend
|| !data_size
) && slot
== 0) {
2968 if (mid
!= nritems
&&
2969 leaf_space_used(l
, mid
, nritems
- mid
) +
2970 data_size
> BTRFS_LEAF_DATA_SIZE(root
)) {
2971 if (data_size
&& !tried_avoid_double
)
2972 goto push_for_double
;
2980 btrfs_cpu_key_to_disk(&disk_key
, ins_key
);
2982 btrfs_item_key(l
, &disk_key
, mid
);
2984 right
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
2985 root
->root_key
.objectid
,
2986 &disk_key
, 0, l
->start
, 0, 0);
2988 return PTR_ERR(right
);
2990 root_add_used(root
, root
->leafsize
);
2992 memset_extent_buffer(right
, 0, 0, sizeof(struct btrfs_header
));
2993 btrfs_set_header_bytenr(right
, right
->start
);
2994 btrfs_set_header_generation(right
, trans
->transid
);
2995 btrfs_set_header_backref_rev(right
, BTRFS_MIXED_BACKREF_REV
);
2996 btrfs_set_header_owner(right
, root
->root_key
.objectid
);
2997 btrfs_set_header_level(right
, 0);
2998 write_extent_buffer(right
, root
->fs_info
->fsid
,
2999 (unsigned long)btrfs_header_fsid(right
),
3002 write_extent_buffer(right
, root
->fs_info
->chunk_tree_uuid
,
3003 (unsigned long)btrfs_header_chunk_tree_uuid(right
),
3008 btrfs_set_header_nritems(right
, 0);
3009 wret
= insert_ptr(trans
, root
, path
,
3010 &disk_key
, right
->start
,
3011 path
->slots
[1] + 1, 1);
3015 btrfs_tree_unlock(path
->nodes
[0]);
3016 free_extent_buffer(path
->nodes
[0]);
3017 path
->nodes
[0] = right
;
3019 path
->slots
[1] += 1;
3021 btrfs_set_header_nritems(right
, 0);
3022 wret
= insert_ptr(trans
, root
, path
,
3028 btrfs_tree_unlock(path
->nodes
[0]);
3029 free_extent_buffer(path
->nodes
[0]);
3030 path
->nodes
[0] = right
;
3032 if (path
->slots
[1] == 0) {
3033 wret
= fixup_low_keys(trans
, root
,
3034 path
, &disk_key
, 1);
3039 btrfs_mark_buffer_dirty(right
);
3043 ret
= copy_for_split(trans
, root
, path
, l
, right
, slot
, mid
, nritems
);
3047 BUG_ON(num_doubles
!= 0);
3055 push_for_double_split(trans
, root
, path
, data_size
);
3056 tried_avoid_double
= 1;
3057 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= data_size
)
3062 static noinline
int setup_leaf_for_split(struct btrfs_trans_handle
*trans
,
3063 struct btrfs_root
*root
,
3064 struct btrfs_path
*path
, int ins_len
)
3066 struct btrfs_key key
;
3067 struct extent_buffer
*leaf
;
3068 struct btrfs_file_extent_item
*fi
;
3073 leaf
= path
->nodes
[0];
3074 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
3076 BUG_ON(key
.type
!= BTRFS_EXTENT_DATA_KEY
&&
3077 key
.type
!= BTRFS_EXTENT_CSUM_KEY
);
3079 if (btrfs_leaf_free_space(root
, leaf
) >= ins_len
)
3082 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3083 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
3084 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3085 struct btrfs_file_extent_item
);
3086 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
3088 btrfs_release_path(path
);
3090 path
->keep_locks
= 1;
3091 path
->search_for_split
= 1;
3092 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
3093 path
->search_for_split
= 0;
3098 leaf
= path
->nodes
[0];
3099 /* if our item isn't there or got smaller, return now */
3100 if (ret
> 0 || item_size
!= btrfs_item_size_nr(leaf
, path
->slots
[0]))
3103 /* the leaf has changed, it now has room. return now */
3104 if (btrfs_leaf_free_space(root
, path
->nodes
[0]) >= ins_len
)
3107 if (key
.type
== BTRFS_EXTENT_DATA_KEY
) {
3108 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
3109 struct btrfs_file_extent_item
);
3110 if (extent_len
!= btrfs_file_extent_num_bytes(leaf
, fi
))
3114 btrfs_set_path_blocking(path
);
3115 ret
= split_leaf(trans
, root
, &key
, path
, ins_len
, 1);
3119 path
->keep_locks
= 0;
3120 btrfs_unlock_up_safe(path
, 1);
3123 path
->keep_locks
= 0;
3127 static noinline
int split_item(struct btrfs_trans_handle
*trans
,
3128 struct btrfs_root
*root
,
3129 struct btrfs_path
*path
,
3130 struct btrfs_key
*new_key
,
3131 unsigned long split_offset
)
3133 struct extent_buffer
*leaf
;
3134 struct btrfs_item
*item
;
3135 struct btrfs_item
*new_item
;
3141 struct btrfs_disk_key disk_key
;
3143 leaf
= path
->nodes
[0];
3144 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < sizeof(struct btrfs_item
));
3146 btrfs_set_path_blocking(path
);
3148 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
3149 orig_offset
= btrfs_item_offset(leaf
, item
);
3150 item_size
= btrfs_item_size(leaf
, item
);
3152 buf
= kmalloc(item_size
, GFP_NOFS
);
3156 read_extent_buffer(leaf
, buf
, btrfs_item_ptr_offset(leaf
,
3157 path
->slots
[0]), item_size
);
3159 slot
= path
->slots
[0] + 1;
3160 nritems
= btrfs_header_nritems(leaf
);
3161 if (slot
!= nritems
) {
3162 /* shift the items */
3163 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ 1),
3164 btrfs_item_nr_offset(slot
),
3165 (nritems
- slot
) * sizeof(struct btrfs_item
));
3168 btrfs_cpu_key_to_disk(&disk_key
, new_key
);
3169 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3171 new_item
= btrfs_item_nr(leaf
, slot
);
3173 btrfs_set_item_offset(leaf
, new_item
, orig_offset
);
3174 btrfs_set_item_size(leaf
, new_item
, item_size
- split_offset
);
3176 btrfs_set_item_offset(leaf
, item
,
3177 orig_offset
+ item_size
- split_offset
);
3178 btrfs_set_item_size(leaf
, item
, split_offset
);
3180 btrfs_set_header_nritems(leaf
, nritems
+ 1);
3182 /* write the data for the start of the original item */
3183 write_extent_buffer(leaf
, buf
,
3184 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3187 /* write the data for the new item */
3188 write_extent_buffer(leaf
, buf
+ split_offset
,
3189 btrfs_item_ptr_offset(leaf
, slot
),
3190 item_size
- split_offset
);
3191 btrfs_mark_buffer_dirty(leaf
);
3193 BUG_ON(btrfs_leaf_free_space(root
, leaf
) < 0);
3199 * This function splits a single item into two items,
3200 * giving 'new_key' to the new item and splitting the
3201 * old one at split_offset (from the start of the item).
3203 * The path may be released by this operation. After
3204 * the split, the path is pointing to the old item. The
3205 * new item is going to be in the same node as the old one.
3207 * Note, the item being split must be smaller enough to live alone on
3208 * a tree block with room for one extra struct btrfs_item
3210 * This allows us to split the item in place, keeping a lock on the
3211 * leaf the entire time.
3213 int btrfs_split_item(struct btrfs_trans_handle
*trans
,
3214 struct btrfs_root
*root
,
3215 struct btrfs_path
*path
,
3216 struct btrfs_key
*new_key
,
3217 unsigned long split_offset
)
3220 ret
= setup_leaf_for_split(trans
, root
, path
,
3221 sizeof(struct btrfs_item
));
3225 ret
= split_item(trans
, root
, path
, new_key
, split_offset
);
3230 * This function duplicate a item, giving 'new_key' to the new item.
3231 * It guarantees both items live in the same tree leaf and the new item
3232 * is contiguous with the original item.
3234 * This allows us to split file extent in place, keeping a lock on the
3235 * leaf the entire time.
3237 int btrfs_duplicate_item(struct btrfs_trans_handle
*trans
,
3238 struct btrfs_root
*root
,
3239 struct btrfs_path
*path
,
3240 struct btrfs_key
*new_key
)
3242 struct extent_buffer
*leaf
;
3246 leaf
= path
->nodes
[0];
3247 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
3248 ret
= setup_leaf_for_split(trans
, root
, path
,
3249 item_size
+ sizeof(struct btrfs_item
));
3254 ret
= setup_items_for_insert(trans
, root
, path
, new_key
, &item_size
,
3255 item_size
, item_size
+
3256 sizeof(struct btrfs_item
), 1);
3259 leaf
= path
->nodes
[0];
3260 memcpy_extent_buffer(leaf
,
3261 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
3262 btrfs_item_ptr_offset(leaf
, path
->slots
[0] - 1),
3268 * make the item pointed to by the path smaller. new_size indicates
3269 * how small to make it, and from_end tells us if we just chop bytes
3270 * off the end of the item or if we shift the item to chop bytes off
3273 int btrfs_truncate_item(struct btrfs_trans_handle
*trans
,
3274 struct btrfs_root
*root
,
3275 struct btrfs_path
*path
,
3276 u32 new_size
, int from_end
)
3279 struct extent_buffer
*leaf
;
3280 struct btrfs_item
*item
;
3282 unsigned int data_end
;
3283 unsigned int old_data_start
;
3284 unsigned int old_size
;
3285 unsigned int size_diff
;
3288 leaf
= path
->nodes
[0];
3289 slot
= path
->slots
[0];
3291 old_size
= btrfs_item_size_nr(leaf
, slot
);
3292 if (old_size
== new_size
)
3295 nritems
= btrfs_header_nritems(leaf
);
3296 data_end
= leaf_data_end(root
, leaf
);
3298 old_data_start
= btrfs_item_offset_nr(leaf
, slot
);
3300 size_diff
= old_size
- new_size
;
3303 BUG_ON(slot
>= nritems
);
3306 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3308 /* first correct the data pointers */
3309 for (i
= slot
; i
< nritems
; i
++) {
3311 item
= btrfs_item_nr(leaf
, i
);
3313 ioff
= btrfs_item_offset(leaf
, item
);
3314 btrfs_set_item_offset(leaf
, item
, ioff
+ size_diff
);
3317 /* shift the data */
3319 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3320 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3321 data_end
, old_data_start
+ new_size
- data_end
);
3323 struct btrfs_disk_key disk_key
;
3326 btrfs_item_key(leaf
, &disk_key
, slot
);
3328 if (btrfs_disk_key_type(&disk_key
) == BTRFS_EXTENT_DATA_KEY
) {
3330 struct btrfs_file_extent_item
*fi
;
3332 fi
= btrfs_item_ptr(leaf
, slot
,
3333 struct btrfs_file_extent_item
);
3334 fi
= (struct btrfs_file_extent_item
*)(
3335 (unsigned long)fi
- size_diff
);
3337 if (btrfs_file_extent_type(leaf
, fi
) ==
3338 BTRFS_FILE_EXTENT_INLINE
) {
3339 ptr
= btrfs_item_ptr_offset(leaf
, slot
);
3340 memmove_extent_buffer(leaf
, ptr
,
3342 offsetof(struct btrfs_file_extent_item
,
3347 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3348 data_end
+ size_diff
, btrfs_leaf_data(leaf
) +
3349 data_end
, old_data_start
- data_end
);
3351 offset
= btrfs_disk_key_offset(&disk_key
);
3352 btrfs_set_disk_key_offset(&disk_key
, offset
+ size_diff
);
3353 btrfs_set_item_key(leaf
, &disk_key
, slot
);
3355 fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3358 item
= btrfs_item_nr(leaf
, slot
);
3359 btrfs_set_item_size(leaf
, item
, new_size
);
3360 btrfs_mark_buffer_dirty(leaf
);
3362 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3363 btrfs_print_leaf(root
, leaf
);
3370 * make the item pointed to by the path bigger, data_size is the new size.
3372 int btrfs_extend_item(struct btrfs_trans_handle
*trans
,
3373 struct btrfs_root
*root
, struct btrfs_path
*path
,
3377 struct extent_buffer
*leaf
;
3378 struct btrfs_item
*item
;
3380 unsigned int data_end
;
3381 unsigned int old_data
;
3382 unsigned int old_size
;
3385 leaf
= path
->nodes
[0];
3387 nritems
= btrfs_header_nritems(leaf
);
3388 data_end
= leaf_data_end(root
, leaf
);
3390 if (btrfs_leaf_free_space(root
, leaf
) < data_size
) {
3391 btrfs_print_leaf(root
, leaf
);
3394 slot
= path
->slots
[0];
3395 old_data
= btrfs_item_end_nr(leaf
, slot
);
3398 if (slot
>= nritems
) {
3399 btrfs_print_leaf(root
, leaf
);
3400 printk(KERN_CRIT
"slot %d too large, nritems %d\n",
3406 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3408 /* first correct the data pointers */
3409 for (i
= slot
; i
< nritems
; i
++) {
3411 item
= btrfs_item_nr(leaf
, i
);
3413 ioff
= btrfs_item_offset(leaf
, item
);
3414 btrfs_set_item_offset(leaf
, item
, ioff
- data_size
);
3417 /* shift the data */
3418 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3419 data_end
- data_size
, btrfs_leaf_data(leaf
) +
3420 data_end
, old_data
- data_end
);
3422 data_end
= old_data
;
3423 old_size
= btrfs_item_size_nr(leaf
, slot
);
3424 item
= btrfs_item_nr(leaf
, slot
);
3425 btrfs_set_item_size(leaf
, item
, old_size
+ data_size
);
3426 btrfs_mark_buffer_dirty(leaf
);
3428 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3429 btrfs_print_leaf(root
, leaf
);
3436 * Given a key and some data, insert items into the tree.
3437 * This does all the path init required, making room in the tree if needed.
3438 * Returns the number of keys that were inserted.
3440 int btrfs_insert_some_items(struct btrfs_trans_handle
*trans
,
3441 struct btrfs_root
*root
,
3442 struct btrfs_path
*path
,
3443 struct btrfs_key
*cpu_key
, u32
*data_size
,
3446 struct extent_buffer
*leaf
;
3447 struct btrfs_item
*item
;
3454 unsigned int data_end
;
3455 struct btrfs_disk_key disk_key
;
3456 struct btrfs_key found_key
;
3458 for (i
= 0; i
< nr
; i
++) {
3459 if (total_size
+ data_size
[i
] + sizeof(struct btrfs_item
) >
3460 BTRFS_LEAF_DATA_SIZE(root
)) {
3464 total_data
+= data_size
[i
];
3465 total_size
+= data_size
[i
] + sizeof(struct btrfs_item
);
3469 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3475 leaf
= path
->nodes
[0];
3477 nritems
= btrfs_header_nritems(leaf
);
3478 data_end
= leaf_data_end(root
, leaf
);
3480 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3481 for (i
= nr
; i
>= 0; i
--) {
3482 total_data
-= data_size
[i
];
3483 total_size
-= data_size
[i
] + sizeof(struct btrfs_item
);
3484 if (total_size
< btrfs_leaf_free_space(root
, leaf
))
3490 slot
= path
->slots
[0];
3493 if (slot
!= nritems
) {
3494 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3496 item
= btrfs_item_nr(leaf
, slot
);
3497 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3499 /* figure out how many keys we can insert in here */
3500 total_data
= data_size
[0];
3501 for (i
= 1; i
< nr
; i
++) {
3502 if (btrfs_comp_cpu_keys(&found_key
, cpu_key
+ i
) <= 0)
3504 total_data
+= data_size
[i
];
3508 if (old_data
< data_end
) {
3509 btrfs_print_leaf(root
, leaf
);
3510 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3511 slot
, old_data
, data_end
);
3515 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3517 /* first correct the data pointers */
3518 for (i
= slot
; i
< nritems
; i
++) {
3521 item
= btrfs_item_nr(leaf
, i
);
3522 ioff
= btrfs_item_offset(leaf
, item
);
3523 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3525 /* shift the items */
3526 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3527 btrfs_item_nr_offset(slot
),
3528 (nritems
- slot
) * sizeof(struct btrfs_item
));
3530 /* shift the data */
3531 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3532 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3533 data_end
, old_data
- data_end
);
3534 data_end
= old_data
;
3537 * this sucks but it has to be done, if we are inserting at
3538 * the end of the leaf only insert 1 of the items, since we
3539 * have no way of knowing whats on the next leaf and we'd have
3540 * to drop our current locks to figure it out
3545 /* setup the item for the new data */
3546 for (i
= 0; i
< nr
; i
++) {
3547 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3548 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3549 item
= btrfs_item_nr(leaf
, slot
+ i
);
3550 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3551 data_end
-= data_size
[i
];
3552 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3554 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3555 btrfs_mark_buffer_dirty(leaf
);
3559 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3560 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3563 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3564 btrfs_print_leaf(root
, leaf
);
3574 * this is a helper for btrfs_insert_empty_items, the main goal here is
3575 * to save stack depth by doing the bulk of the work in a function
3576 * that doesn't call btrfs_search_slot
3578 int setup_items_for_insert(struct btrfs_trans_handle
*trans
,
3579 struct btrfs_root
*root
, struct btrfs_path
*path
,
3580 struct btrfs_key
*cpu_key
, u32
*data_size
,
3581 u32 total_data
, u32 total_size
, int nr
)
3583 struct btrfs_item
*item
;
3586 unsigned int data_end
;
3587 struct btrfs_disk_key disk_key
;
3589 struct extent_buffer
*leaf
;
3592 leaf
= path
->nodes
[0];
3593 slot
= path
->slots
[0];
3595 nritems
= btrfs_header_nritems(leaf
);
3596 data_end
= leaf_data_end(root
, leaf
);
3598 if (btrfs_leaf_free_space(root
, leaf
) < total_size
) {
3599 btrfs_print_leaf(root
, leaf
);
3600 printk(KERN_CRIT
"not enough freespace need %u have %d\n",
3601 total_size
, btrfs_leaf_free_space(root
, leaf
));
3605 if (slot
!= nritems
) {
3606 unsigned int old_data
= btrfs_item_end_nr(leaf
, slot
);
3608 if (old_data
< data_end
) {
3609 btrfs_print_leaf(root
, leaf
);
3610 printk(KERN_CRIT
"slot %d old_data %d data_end %d\n",
3611 slot
, old_data
, data_end
);
3615 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3617 /* first correct the data pointers */
3618 for (i
= slot
; i
< nritems
; i
++) {
3621 item
= btrfs_item_nr(leaf
, i
);
3622 ioff
= btrfs_item_offset(leaf
, item
);
3623 btrfs_set_item_offset(leaf
, item
, ioff
- total_data
);
3625 /* shift the items */
3626 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
+ nr
),
3627 btrfs_item_nr_offset(slot
),
3628 (nritems
- slot
) * sizeof(struct btrfs_item
));
3630 /* shift the data */
3631 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3632 data_end
- total_data
, btrfs_leaf_data(leaf
) +
3633 data_end
, old_data
- data_end
);
3634 data_end
= old_data
;
3637 /* setup the item for the new data */
3638 for (i
= 0; i
< nr
; i
++) {
3639 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
+ i
);
3640 btrfs_set_item_key(leaf
, &disk_key
, slot
+ i
);
3641 item
= btrfs_item_nr(leaf
, slot
+ i
);
3642 btrfs_set_item_offset(leaf
, item
, data_end
- data_size
[i
]);
3643 data_end
-= data_size
[i
];
3644 btrfs_set_item_size(leaf
, item
, data_size
[i
]);
3647 btrfs_set_header_nritems(leaf
, nritems
+ nr
);
3651 btrfs_cpu_key_to_disk(&disk_key
, cpu_key
);
3652 ret
= fixup_low_keys(trans
, root
, path
, &disk_key
, 1);
3654 btrfs_unlock_up_safe(path
, 1);
3655 btrfs_mark_buffer_dirty(leaf
);
3657 if (btrfs_leaf_free_space(root
, leaf
) < 0) {
3658 btrfs_print_leaf(root
, leaf
);
3665 * Given a key and some data, insert items into the tree.
3666 * This does all the path init required, making room in the tree if needed.
3668 int btrfs_insert_empty_items(struct btrfs_trans_handle
*trans
,
3669 struct btrfs_root
*root
,
3670 struct btrfs_path
*path
,
3671 struct btrfs_key
*cpu_key
, u32
*data_size
,
3680 for (i
= 0; i
< nr
; i
++)
3681 total_data
+= data_size
[i
];
3683 total_size
= total_data
+ (nr
* sizeof(struct btrfs_item
));
3684 ret
= btrfs_search_slot(trans
, root
, cpu_key
, path
, total_size
, 1);
3690 slot
= path
->slots
[0];
3693 ret
= setup_items_for_insert(trans
, root
, path
, cpu_key
, data_size
,
3694 total_data
, total_size
, nr
);
3701 * Given a key and some data, insert an item into the tree.
3702 * This does all the path init required, making room in the tree if needed.
3704 int btrfs_insert_item(struct btrfs_trans_handle
*trans
, struct btrfs_root
3705 *root
, struct btrfs_key
*cpu_key
, void *data
, u32
3709 struct btrfs_path
*path
;
3710 struct extent_buffer
*leaf
;
3713 path
= btrfs_alloc_path();
3716 ret
= btrfs_insert_empty_item(trans
, root
, path
, cpu_key
, data_size
);
3718 leaf
= path
->nodes
[0];
3719 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3720 write_extent_buffer(leaf
, data
, ptr
, data_size
);
3721 btrfs_mark_buffer_dirty(leaf
);
3723 btrfs_free_path(path
);
3728 * delete the pointer from a given node.
3730 * the tree should have been previously balanced so the deletion does not
3733 static int del_ptr(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3734 struct btrfs_path
*path
, int level
, int slot
)
3736 struct extent_buffer
*parent
= path
->nodes
[level
];
3741 nritems
= btrfs_header_nritems(parent
);
3742 if (slot
!= nritems
- 1) {
3743 memmove_extent_buffer(parent
,
3744 btrfs_node_key_ptr_offset(slot
),
3745 btrfs_node_key_ptr_offset(slot
+ 1),
3746 sizeof(struct btrfs_key_ptr
) *
3747 (nritems
- slot
- 1));
3750 btrfs_set_header_nritems(parent
, nritems
);
3751 if (nritems
== 0 && parent
== root
->node
) {
3752 BUG_ON(btrfs_header_level(root
->node
) != 1);
3753 /* just turn the root into a leaf and break */
3754 btrfs_set_header_level(root
->node
, 0);
3755 } else if (slot
== 0) {
3756 struct btrfs_disk_key disk_key
;
3758 btrfs_node_key(parent
, &disk_key
, 0);
3759 wret
= fixup_low_keys(trans
, root
, path
, &disk_key
, level
+ 1);
3763 btrfs_mark_buffer_dirty(parent
);
3768 * a helper function to delete the leaf pointed to by path->slots[1] and
3771 * This deletes the pointer in path->nodes[1] and frees the leaf
3772 * block extent. zero is returned if it all worked out, < 0 otherwise.
3774 * The path must have already been setup for deleting the leaf, including
3775 * all the proper balancing. path->nodes[1] must be locked.
3777 static noinline
int btrfs_del_leaf(struct btrfs_trans_handle
*trans
,
3778 struct btrfs_root
*root
,
3779 struct btrfs_path
*path
,
3780 struct extent_buffer
*leaf
)
3784 WARN_ON(btrfs_header_generation(leaf
) != trans
->transid
);
3785 ret
= del_ptr(trans
, root
, path
, 1, path
->slots
[1]);
3790 * btrfs_free_extent is expensive, we want to make sure we
3791 * aren't holding any locks when we call it
3793 btrfs_unlock_up_safe(path
, 0);
3795 root_sub_used(root
, leaf
->len
);
3797 extent_buffer_get(leaf
);
3798 btrfs_free_tree_block(trans
, root
, leaf
, 0, 1, 0);
3799 free_extent_buffer_stale(leaf
);
3803 * delete the item at the leaf level in path. If that empties
3804 * the leaf, remove it from the tree
3806 int btrfs_del_items(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
3807 struct btrfs_path
*path
, int slot
, int nr
)
3809 struct extent_buffer
*leaf
;
3810 struct btrfs_item
*item
;
3818 leaf
= path
->nodes
[0];
3819 last_off
= btrfs_item_offset_nr(leaf
, slot
+ nr
- 1);
3821 for (i
= 0; i
< nr
; i
++)
3822 dsize
+= btrfs_item_size_nr(leaf
, slot
+ i
);
3824 nritems
= btrfs_header_nritems(leaf
);
3826 if (slot
+ nr
!= nritems
) {
3827 int data_end
= leaf_data_end(root
, leaf
);
3829 memmove_extent_buffer(leaf
, btrfs_leaf_data(leaf
) +
3831 btrfs_leaf_data(leaf
) + data_end
,
3832 last_off
- data_end
);
3834 for (i
= slot
+ nr
; i
< nritems
; i
++) {
3837 item
= btrfs_item_nr(leaf
, i
);
3838 ioff
= btrfs_item_offset(leaf
, item
);
3839 btrfs_set_item_offset(leaf
, item
, ioff
+ dsize
);
3842 memmove_extent_buffer(leaf
, btrfs_item_nr_offset(slot
),
3843 btrfs_item_nr_offset(slot
+ nr
),
3844 sizeof(struct btrfs_item
) *
3845 (nritems
- slot
- nr
));
3847 btrfs_set_header_nritems(leaf
, nritems
- nr
);
3850 /* delete the leaf if we've emptied it */
3852 if (leaf
== root
->node
) {
3853 btrfs_set_header_level(leaf
, 0);
3855 btrfs_set_path_blocking(path
);
3856 clean_tree_block(trans
, root
, leaf
);
3857 ret
= btrfs_del_leaf(trans
, root
, path
, leaf
);
3861 int used
= leaf_space_used(leaf
, 0, nritems
);
3863 struct btrfs_disk_key disk_key
;
3865 btrfs_item_key(leaf
, &disk_key
, 0);
3866 wret
= fixup_low_keys(trans
, root
, path
,
3872 /* delete the leaf if it is mostly empty */
3873 if (used
< BTRFS_LEAF_DATA_SIZE(root
) / 3) {
3874 /* push_leaf_left fixes the path.
3875 * make sure the path still points to our leaf
3876 * for possible call to del_ptr below
3878 slot
= path
->slots
[1];
3879 extent_buffer_get(leaf
);
3881 btrfs_set_path_blocking(path
);
3882 wret
= push_leaf_left(trans
, root
, path
, 1, 1,
3884 if (wret
< 0 && wret
!= -ENOSPC
)
3887 if (path
->nodes
[0] == leaf
&&
3888 btrfs_header_nritems(leaf
)) {
3889 wret
= push_leaf_right(trans
, root
, path
, 1,
3891 if (wret
< 0 && wret
!= -ENOSPC
)
3895 if (btrfs_header_nritems(leaf
) == 0) {
3896 path
->slots
[1] = slot
;
3897 ret
= btrfs_del_leaf(trans
, root
, path
, leaf
);
3899 free_extent_buffer(leaf
);
3901 /* if we're still in the path, make sure
3902 * we're dirty. Otherwise, one of the
3903 * push_leaf functions must have already
3904 * dirtied this buffer
3906 if (path
->nodes
[0] == leaf
)
3907 btrfs_mark_buffer_dirty(leaf
);
3908 free_extent_buffer(leaf
);
3911 btrfs_mark_buffer_dirty(leaf
);
3918 * search the tree again to find a leaf with lesser keys
3919 * returns 0 if it found something or 1 if there are no lesser leaves.
3920 * returns < 0 on io errors.
3922 * This may release the path, and so you may lose any locks held at the
3925 int btrfs_prev_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
3927 struct btrfs_key key
;
3928 struct btrfs_disk_key found_key
;
3931 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, 0);
3935 else if (key
.type
> 0)
3937 else if (key
.objectid
> 0)
3942 btrfs_release_path(path
);
3943 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3946 btrfs_item_key(path
->nodes
[0], &found_key
, 0);
3947 ret
= comp_keys(&found_key
, &key
);
3954 * A helper function to walk down the tree starting at min_key, and looking
3955 * for nodes or leaves that are either in cache or have a minimum
3956 * transaction id. This is used by the btree defrag code, and tree logging
3958 * This does not cow, but it does stuff the starting key it finds back
3959 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3960 * key and get a writable path.
3962 * This does lock as it descends, and path->keep_locks should be set
3963 * to 1 by the caller.
3965 * This honors path->lowest_level to prevent descent past a given level
3968 * min_trans indicates the oldest transaction that you are interested
3969 * in walking through. Any nodes or leaves older than min_trans are
3970 * skipped over (without reading them).
3972 * returns zero if something useful was found, < 0 on error and 1 if there
3973 * was nothing in the tree that matched the search criteria.
3975 int btrfs_search_forward(struct btrfs_root
*root
, struct btrfs_key
*min_key
,
3976 struct btrfs_key
*max_key
,
3977 struct btrfs_path
*path
, int cache_only
,
3980 struct extent_buffer
*cur
;
3981 struct btrfs_key found_key
;
3988 WARN_ON(!path
->keep_locks
);
3990 cur
= btrfs_read_lock_root_node(root
);
3991 level
= btrfs_header_level(cur
);
3992 WARN_ON(path
->nodes
[level
]);
3993 path
->nodes
[level
] = cur
;
3994 path
->locks
[level
] = BTRFS_READ_LOCK
;
3996 if (btrfs_header_generation(cur
) < min_trans
) {
4001 nritems
= btrfs_header_nritems(cur
);
4002 level
= btrfs_header_level(cur
);
4003 sret
= bin_search(cur
, min_key
, level
, &slot
);
4005 /* at the lowest level, we're done, setup the path and exit */
4006 if (level
== path
->lowest_level
) {
4007 if (slot
>= nritems
)
4010 path
->slots
[level
] = slot
;
4011 btrfs_item_key_to_cpu(cur
, &found_key
, slot
);
4014 if (sret
&& slot
> 0)
4017 * check this node pointer against the cache_only and
4018 * min_trans parameters. If it isn't in cache or is too
4019 * old, skip to the next one.
4021 while (slot
< nritems
) {
4024 struct extent_buffer
*tmp
;
4025 struct btrfs_disk_key disk_key
;
4027 blockptr
= btrfs_node_blockptr(cur
, slot
);
4028 gen
= btrfs_node_ptr_generation(cur
, slot
);
4029 if (gen
< min_trans
) {
4037 btrfs_node_key(cur
, &disk_key
, slot
);
4038 if (comp_keys(&disk_key
, max_key
) >= 0) {
4044 tmp
= btrfs_find_tree_block(root
, blockptr
,
4045 btrfs_level_size(root
, level
- 1));
4047 if (tmp
&& btrfs_buffer_uptodate(tmp
, gen
)) {
4048 free_extent_buffer(tmp
);
4052 free_extent_buffer(tmp
);
4057 * we didn't find a candidate key in this node, walk forward
4058 * and find another one
4060 if (slot
>= nritems
) {
4061 path
->slots
[level
] = slot
;
4062 btrfs_set_path_blocking(path
);
4063 sret
= btrfs_find_next_key(root
, path
, min_key
, level
,
4064 cache_only
, min_trans
);
4066 btrfs_release_path(path
);
4072 /* save our key for returning back */
4073 btrfs_node_key_to_cpu(cur
, &found_key
, slot
);
4074 path
->slots
[level
] = slot
;
4075 if (level
== path
->lowest_level
) {
4077 unlock_up(path
, level
, 1);
4080 btrfs_set_path_blocking(path
);
4081 cur
= read_node_slot(root
, cur
, slot
);
4084 btrfs_tree_read_lock(cur
);
4086 path
->locks
[level
- 1] = BTRFS_READ_LOCK
;
4087 path
->nodes
[level
- 1] = cur
;
4088 unlock_up(path
, level
, 1);
4089 btrfs_clear_path_blocking(path
, NULL
, 0);
4093 memcpy(min_key
, &found_key
, sizeof(found_key
));
4094 btrfs_set_path_blocking(path
);
4099 * this is similar to btrfs_next_leaf, but does not try to preserve
4100 * and fixup the path. It looks for and returns the next key in the
4101 * tree based on the current path and the cache_only and min_trans
4104 * 0 is returned if another key is found, < 0 if there are any errors
4105 * and 1 is returned if there are no higher keys in the tree
4107 * path->keep_locks should be set to 1 on the search made before
4108 * calling this function.
4110 int btrfs_find_next_key(struct btrfs_root
*root
, struct btrfs_path
*path
,
4111 struct btrfs_key
*key
, int level
,
4112 int cache_only
, u64 min_trans
)
4115 struct extent_buffer
*c
;
4117 WARN_ON(!path
->keep_locks
);
4118 while (level
< BTRFS_MAX_LEVEL
) {
4119 if (!path
->nodes
[level
])
4122 slot
= path
->slots
[level
] + 1;
4123 c
= path
->nodes
[level
];
4125 if (slot
>= btrfs_header_nritems(c
)) {
4128 struct btrfs_key cur_key
;
4129 if (level
+ 1 >= BTRFS_MAX_LEVEL
||
4130 !path
->nodes
[level
+ 1])
4133 if (path
->locks
[level
+ 1]) {
4138 slot
= btrfs_header_nritems(c
) - 1;
4140 btrfs_item_key_to_cpu(c
, &cur_key
, slot
);
4142 btrfs_node_key_to_cpu(c
, &cur_key
, slot
);
4144 orig_lowest
= path
->lowest_level
;
4145 btrfs_release_path(path
);
4146 path
->lowest_level
= level
;
4147 ret
= btrfs_search_slot(NULL
, root
, &cur_key
, path
,
4149 path
->lowest_level
= orig_lowest
;
4153 c
= path
->nodes
[level
];
4154 slot
= path
->slots
[level
];
4161 btrfs_item_key_to_cpu(c
, key
, slot
);
4163 u64 blockptr
= btrfs_node_blockptr(c
, slot
);
4164 u64 gen
= btrfs_node_ptr_generation(c
, slot
);
4167 struct extent_buffer
*cur
;
4168 cur
= btrfs_find_tree_block(root
, blockptr
,
4169 btrfs_level_size(root
, level
- 1));
4170 if (!cur
|| !btrfs_buffer_uptodate(cur
, gen
)) {
4173 free_extent_buffer(cur
);
4176 free_extent_buffer(cur
);
4178 if (gen
< min_trans
) {
4182 btrfs_node_key_to_cpu(c
, key
, slot
);
4190 * search the tree again to find a leaf with greater keys
4191 * returns 0 if it found something or 1 if there are no greater leaves.
4192 * returns < 0 on io errors.
4194 int btrfs_next_leaf(struct btrfs_root
*root
, struct btrfs_path
*path
)
4198 struct extent_buffer
*c
;
4199 struct extent_buffer
*next
;
4200 struct btrfs_key key
;
4203 int old_spinning
= path
->leave_spinning
;
4204 int next_rw_lock
= 0;
4206 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4210 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, nritems
- 1);
4215 btrfs_release_path(path
);
4217 path
->keep_locks
= 1;
4218 path
->leave_spinning
= 1;
4220 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
4221 path
->keep_locks
= 0;
4226 nritems
= btrfs_header_nritems(path
->nodes
[0]);
4228 * by releasing the path above we dropped all our locks. A balance
4229 * could have added more items next to the key that used to be
4230 * at the very end of the block. So, check again here and
4231 * advance the path if there are now more items available.
4233 if (nritems
> 0 && path
->slots
[0] < nritems
- 1) {
4240 while (level
< BTRFS_MAX_LEVEL
) {
4241 if (!path
->nodes
[level
]) {
4246 slot
= path
->slots
[level
] + 1;
4247 c
= path
->nodes
[level
];
4248 if (slot
>= btrfs_header_nritems(c
)) {
4250 if (level
== BTRFS_MAX_LEVEL
) {
4258 btrfs_tree_unlock_rw(next
, next_rw_lock
);
4259 free_extent_buffer(next
);
4263 next_rw_lock
= path
->locks
[level
];
4264 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4270 btrfs_release_path(path
);
4274 if (!path
->skip_locking
) {
4275 ret
= btrfs_try_tree_read_lock(next
);
4277 btrfs_set_path_blocking(path
);
4278 btrfs_tree_read_lock(next
);
4279 btrfs_clear_path_blocking(path
, next
,
4282 next_rw_lock
= BTRFS_READ_LOCK
;
4286 path
->slots
[level
] = slot
;
4289 c
= path
->nodes
[level
];
4290 if (path
->locks
[level
])
4291 btrfs_tree_unlock_rw(c
, path
->locks
[level
]);
4293 free_extent_buffer(c
);
4294 path
->nodes
[level
] = next
;
4295 path
->slots
[level
] = 0;
4296 if (!path
->skip_locking
)
4297 path
->locks
[level
] = next_rw_lock
;
4301 ret
= read_block_for_search(NULL
, root
, path
, &next
, level
,
4307 btrfs_release_path(path
);
4311 if (!path
->skip_locking
) {
4312 ret
= btrfs_try_tree_read_lock(next
);
4314 btrfs_set_path_blocking(path
);
4315 btrfs_tree_read_lock(next
);
4316 btrfs_clear_path_blocking(path
, next
,
4319 next_rw_lock
= BTRFS_READ_LOCK
;
4324 unlock_up(path
, 0, 1);
4325 path
->leave_spinning
= old_spinning
;
4327 btrfs_set_path_blocking(path
);
4333 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4334 * searching until it gets past min_objectid or finds an item of 'type'
4336 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4338 int btrfs_previous_item(struct btrfs_root
*root
,
4339 struct btrfs_path
*path
, u64 min_objectid
,
4342 struct btrfs_key found_key
;
4343 struct extent_buffer
*leaf
;
4348 if (path
->slots
[0] == 0) {
4349 btrfs_set_path_blocking(path
);
4350 ret
= btrfs_prev_leaf(root
, path
);
4356 leaf
= path
->nodes
[0];
4357 nritems
= btrfs_header_nritems(leaf
);
4360 if (path
->slots
[0] == nritems
)
4363 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4364 if (found_key
.objectid
< min_objectid
)
4366 if (found_key
.type
== type
)
4368 if (found_key
.objectid
== min_objectid
&&
4369 found_key
.type
< type
)