2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
26 struct kmem_cache
*btrfs_delayed_ref_head_cachep
;
27 struct kmem_cache
*btrfs_delayed_tree_ref_cachep
;
28 struct kmem_cache
*btrfs_delayed_data_ref_cachep
;
29 struct kmem_cache
*btrfs_delayed_extent_op_cachep
;
31 * delayed back reference update tracking. For subvolume trees
32 * we queue up extent allocations and backref maintenance for
33 * delayed processing. This avoids deep call chains where we
34 * add extents in the middle of btrfs_search_slot, and it allows
35 * us to buffer up frequently modified backrefs in an rb tree instead
36 * of hammering updates on the extent allocation tree.
40 * compare two delayed tree backrefs with same bytenr and type
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref
*ref2
,
43 struct btrfs_delayed_tree_ref
*ref1
)
45 if (ref1
->root
< ref2
->root
)
47 if (ref1
->root
> ref2
->root
)
49 if (ref1
->parent
< ref2
->parent
)
51 if (ref1
->parent
> ref2
->parent
)
57 * compare two delayed data backrefs with same bytenr and type
59 static int comp_data_refs(struct btrfs_delayed_data_ref
*ref2
,
60 struct btrfs_delayed_data_ref
*ref1
)
62 if (ref1
->node
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
63 if (ref1
->root
< ref2
->root
)
65 if (ref1
->root
> ref2
->root
)
67 if (ref1
->objectid
< ref2
->objectid
)
69 if (ref1
->objectid
> ref2
->objectid
)
71 if (ref1
->offset
< ref2
->offset
)
73 if (ref1
->offset
> ref2
->offset
)
76 if (ref1
->parent
< ref2
->parent
)
78 if (ref1
->parent
> ref2
->parent
)
85 * entries in the rb tree are ordered by the byte number of the extent,
86 * type of the delayed backrefs and content of delayed backrefs.
88 static int comp_entry(struct btrfs_delayed_ref_node
*ref2
,
89 struct btrfs_delayed_ref_node
*ref1
,
92 if (ref1
->bytenr
< ref2
->bytenr
)
94 if (ref1
->bytenr
> ref2
->bytenr
)
96 if (ref1
->is_head
&& ref2
->is_head
)
102 if (ref1
->type
< ref2
->type
)
104 if (ref1
->type
> ref2
->type
)
106 /* merging of sequenced refs is not allowed */
108 if (ref1
->seq
< ref2
->seq
)
110 if (ref1
->seq
> ref2
->seq
)
113 if (ref1
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
114 ref1
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
115 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2
),
116 btrfs_delayed_node_to_tree_ref(ref1
));
117 } else if (ref1
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
118 ref1
->type
== BTRFS_SHARED_DATA_REF_KEY
) {
119 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2
),
120 btrfs_delayed_node_to_data_ref(ref1
));
127 * insert a new ref into the rbtree. This returns any existing refs
128 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
131 static struct btrfs_delayed_ref_node
*tree_insert(struct rb_root
*root
,
132 struct rb_node
*node
)
134 struct rb_node
**p
= &root
->rb_node
;
135 struct rb_node
*parent_node
= NULL
;
136 struct btrfs_delayed_ref_node
*entry
;
137 struct btrfs_delayed_ref_node
*ins
;
140 ins
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
143 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_node
,
146 cmp
= comp_entry(entry
, ins
, 1);
155 rb_link_node(node
, parent_node
, p
);
156 rb_insert_color(node
, root
);
161 * find an head entry based on bytenr. This returns the delayed ref
162 * head if it was able to find one, or NULL if nothing was in that spot.
163 * If return_bigger is given, the next bigger entry is returned if no exact
166 static struct btrfs_delayed_ref_node
*find_ref_head(struct rb_root
*root
,
168 struct btrfs_delayed_ref_node
**last
,
172 struct btrfs_delayed_ref_node
*entry
;
179 entry
= rb_entry(n
, struct btrfs_delayed_ref_node
, rb_node
);
180 WARN_ON(!entry
->in_tree
);
184 if (bytenr
< entry
->bytenr
)
186 else if (bytenr
> entry
->bytenr
)
188 else if (!btrfs_delayed_ref_is_head(entry
))
200 if (entry
&& return_bigger
) {
202 n
= rb_next(&entry
->rb_node
);
205 entry
= rb_entry(n
, struct btrfs_delayed_ref_node
,
207 bytenr
= entry
->bytenr
;
216 int btrfs_delayed_ref_lock(struct btrfs_trans_handle
*trans
,
217 struct btrfs_delayed_ref_head
*head
)
219 struct btrfs_delayed_ref_root
*delayed_refs
;
221 delayed_refs
= &trans
->transaction
->delayed_refs
;
222 assert_spin_locked(&delayed_refs
->lock
);
223 if (mutex_trylock(&head
->mutex
))
226 atomic_inc(&head
->node
.refs
);
227 spin_unlock(&delayed_refs
->lock
);
229 mutex_lock(&head
->mutex
);
230 spin_lock(&delayed_refs
->lock
);
231 if (!head
->node
.in_tree
) {
232 mutex_unlock(&head
->mutex
);
233 btrfs_put_delayed_ref(&head
->node
);
236 btrfs_put_delayed_ref(&head
->node
);
240 static void inline drop_delayed_ref(struct btrfs_trans_handle
*trans
,
241 struct btrfs_delayed_ref_root
*delayed_refs
,
242 struct btrfs_delayed_ref_node
*ref
)
244 rb_erase(&ref
->rb_node
, &delayed_refs
->root
);
246 btrfs_put_delayed_ref(ref
);
247 delayed_refs
->num_entries
--;
248 if (trans
->delayed_ref_updates
)
249 trans
->delayed_ref_updates
--;
252 static int merge_ref(struct btrfs_trans_handle
*trans
,
253 struct btrfs_delayed_ref_root
*delayed_refs
,
254 struct btrfs_delayed_ref_node
*ref
, u64 seq
)
256 struct rb_node
*node
;
261 node
= rb_prev(&ref
->rb_node
);
263 struct btrfs_delayed_ref_node
*next
;
265 next
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
266 node
= rb_prev(node
);
267 if (next
->bytenr
!= ref
->bytenr
)
269 if (seq
&& next
->seq
>= seq
)
271 if (comp_entry(ref
, next
, 0))
274 if (ref
->action
== next
->action
) {
277 if (ref
->ref_mod
< next
->ref_mod
) {
278 struct btrfs_delayed_ref_node
*tmp
;
285 mod
= -next
->ref_mod
;
289 drop_delayed_ref(trans
, delayed_refs
, next
);
291 if (ref
->ref_mod
== 0) {
292 drop_delayed_ref(trans
, delayed_refs
, ref
);
296 * You can't have multiples of the same ref on a tree
299 WARN_ON(ref
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
300 ref
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
305 node
= rb_prev(&ref
->rb_node
);
311 void btrfs_merge_delayed_refs(struct btrfs_trans_handle
*trans
,
312 struct btrfs_fs_info
*fs_info
,
313 struct btrfs_delayed_ref_root
*delayed_refs
,
314 struct btrfs_delayed_ref_head
*head
)
316 struct rb_node
*node
;
319 spin_lock(&fs_info
->tree_mod_seq_lock
);
320 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
321 struct seq_list
*elem
;
323 elem
= list_first_entry(&fs_info
->tree_mod_seq_list
,
324 struct seq_list
, list
);
327 spin_unlock(&fs_info
->tree_mod_seq_lock
);
329 node
= rb_prev(&head
->node
.rb_node
);
331 struct btrfs_delayed_ref_node
*ref
;
333 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
,
335 if (ref
->bytenr
!= head
->node
.bytenr
)
338 /* We can't merge refs that are outside of our seq count */
339 if (seq
&& ref
->seq
>= seq
)
341 if (merge_ref(trans
, delayed_refs
, ref
, seq
))
342 node
= rb_prev(&head
->node
.rb_node
);
344 node
= rb_prev(node
);
348 int btrfs_check_delayed_seq(struct btrfs_fs_info
*fs_info
,
349 struct btrfs_delayed_ref_root
*delayed_refs
,
352 struct seq_list
*elem
;
355 spin_lock(&fs_info
->tree_mod_seq_lock
);
356 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
357 elem
= list_first_entry(&fs_info
->tree_mod_seq_list
,
358 struct seq_list
, list
);
359 if (seq
>= elem
->seq
) {
360 pr_debug("holding back delayed_ref %llu, lowest is "
361 "%llu (%p)\n", seq
, elem
->seq
, delayed_refs
);
366 spin_unlock(&fs_info
->tree_mod_seq_lock
);
370 int btrfs_find_ref_cluster(struct btrfs_trans_handle
*trans
,
371 struct list_head
*cluster
, u64 start
)
374 struct btrfs_delayed_ref_root
*delayed_refs
;
375 struct rb_node
*node
;
376 struct btrfs_delayed_ref_node
*ref
;
377 struct btrfs_delayed_ref_head
*head
;
379 delayed_refs
= &trans
->transaction
->delayed_refs
;
381 node
= rb_first(&delayed_refs
->root
);
384 find_ref_head(&delayed_refs
->root
, start
+ 1, &ref
, 1);
386 node
= &ref
->rb_node
;
388 node
= rb_first(&delayed_refs
->root
);
391 while (node
&& count
< 32) {
392 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
393 if (btrfs_delayed_ref_is_head(ref
)) {
394 head
= btrfs_delayed_node_to_head(ref
);
395 if (list_empty(&head
->cluster
)) {
396 list_add_tail(&head
->cluster
, cluster
);
397 delayed_refs
->run_delayed_start
=
401 WARN_ON(delayed_refs
->num_heads_ready
== 0);
402 delayed_refs
->num_heads_ready
--;
404 /* the goal of the clustering is to find extents
405 * that are likely to end up in the same extent
406 * leaf on disk. So, we don't want them spread
407 * all over the tree. Stop now if we've hit
408 * a head that was already in use
413 node
= rb_next(node
);
419 * we've gone to the end of the rbtree without finding any
420 * clusters. start from the beginning and try again
423 node
= rb_first(&delayed_refs
->root
);
429 void btrfs_release_ref_cluster(struct list_head
*cluster
)
431 struct list_head
*pos
, *q
;
433 list_for_each_safe(pos
, q
, cluster
)
438 * helper function to update an extent delayed ref in the
439 * rbtree. existing and update must both have the same
442 * This may free existing if the update cancels out whatever
443 * operation it was doing.
446 update_existing_ref(struct btrfs_trans_handle
*trans
,
447 struct btrfs_delayed_ref_root
*delayed_refs
,
448 struct btrfs_delayed_ref_node
*existing
,
449 struct btrfs_delayed_ref_node
*update
)
451 if (update
->action
!= existing
->action
) {
453 * this is effectively undoing either an add or a
454 * drop. We decrement the ref_mod, and if it goes
455 * down to zero we just delete the entry without
456 * every changing the extent allocation tree.
459 if (existing
->ref_mod
== 0)
460 drop_delayed_ref(trans
, delayed_refs
, existing
);
462 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
463 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
465 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
466 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
468 * the action on the existing ref matches
469 * the action on the ref we're trying to add.
470 * Bump the ref_mod by one so the backref that
471 * is eventually added/removed has the correct
474 existing
->ref_mod
+= update
->ref_mod
;
479 * helper function to update the accounting in the head ref
480 * existing and update must have the same bytenr
483 update_existing_head_ref(struct btrfs_delayed_ref_node
*existing
,
484 struct btrfs_delayed_ref_node
*update
)
486 struct btrfs_delayed_ref_head
*existing_ref
;
487 struct btrfs_delayed_ref_head
*ref
;
489 existing_ref
= btrfs_delayed_node_to_head(existing
);
490 ref
= btrfs_delayed_node_to_head(update
);
491 BUG_ON(existing_ref
->is_data
!= ref
->is_data
);
493 if (ref
->must_insert_reserved
) {
494 /* if the extent was freed and then
495 * reallocated before the delayed ref
496 * entries were processed, we can end up
497 * with an existing head ref without
498 * the must_insert_reserved flag set.
501 existing_ref
->must_insert_reserved
= ref
->must_insert_reserved
;
504 * update the num_bytes so we make sure the accounting
507 existing
->num_bytes
= update
->num_bytes
;
511 if (ref
->extent_op
) {
512 if (!existing_ref
->extent_op
) {
513 existing_ref
->extent_op
= ref
->extent_op
;
515 if (ref
->extent_op
->update_key
) {
516 memcpy(&existing_ref
->extent_op
->key
,
517 &ref
->extent_op
->key
,
518 sizeof(ref
->extent_op
->key
));
519 existing_ref
->extent_op
->update_key
= 1;
521 if (ref
->extent_op
->update_flags
) {
522 existing_ref
->extent_op
->flags_to_set
|=
523 ref
->extent_op
->flags_to_set
;
524 existing_ref
->extent_op
->update_flags
= 1;
526 btrfs_free_delayed_extent_op(ref
->extent_op
);
530 * update the reference mod on the head to reflect this new operation
532 existing
->ref_mod
+= update
->ref_mod
;
536 * helper function to actually insert a head node into the rbtree.
537 * this does all the dirty work in terms of maintaining the correct
538 * overall modification count.
540 static noinline
void add_delayed_ref_head(struct btrfs_fs_info
*fs_info
,
541 struct btrfs_trans_handle
*trans
,
542 struct btrfs_delayed_ref_node
*ref
,
543 u64 bytenr
, u64 num_bytes
,
544 int action
, int is_data
)
546 struct btrfs_delayed_ref_node
*existing
;
547 struct btrfs_delayed_ref_head
*head_ref
= NULL
;
548 struct btrfs_delayed_ref_root
*delayed_refs
;
550 int must_insert_reserved
= 0;
553 * the head node stores the sum of all the mods, so dropping a ref
554 * should drop the sum in the head node by one.
556 if (action
== BTRFS_UPDATE_DELAYED_HEAD
)
558 else if (action
== BTRFS_DROP_DELAYED_REF
)
562 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
563 * the reserved accounting when the extent is finally added, or
564 * if a later modification deletes the delayed ref without ever
565 * inserting the extent into the extent allocation tree.
566 * ref->must_insert_reserved is the flag used to record
567 * that accounting mods are required.
569 * Once we record must_insert_reserved, switch the action to
570 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
572 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
573 must_insert_reserved
= 1;
575 must_insert_reserved
= 0;
577 delayed_refs
= &trans
->transaction
->delayed_refs
;
579 /* first set the basic ref node struct up */
580 atomic_set(&ref
->refs
, 1);
581 ref
->bytenr
= bytenr
;
582 ref
->num_bytes
= num_bytes
;
583 ref
->ref_mod
= count_mod
;
590 head_ref
= btrfs_delayed_node_to_head(ref
);
591 head_ref
->must_insert_reserved
= must_insert_reserved
;
592 head_ref
->is_data
= is_data
;
594 INIT_LIST_HEAD(&head_ref
->cluster
);
595 mutex_init(&head_ref
->mutex
);
597 trace_btrfs_delayed_ref_head(ref
, head_ref
, action
);
599 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
602 update_existing_head_ref(existing
, ref
);
604 * we've updated the existing ref, free the newly
607 kmem_cache_free(btrfs_delayed_ref_head_cachep
, head_ref
);
609 delayed_refs
->num_heads
++;
610 delayed_refs
->num_heads_ready
++;
611 delayed_refs
->num_entries
++;
612 trans
->delayed_ref_updates
++;
617 * helper to insert a delayed tree ref into the rbtree.
619 static noinline
void add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
620 struct btrfs_trans_handle
*trans
,
621 struct btrfs_delayed_ref_node
*ref
,
622 u64 bytenr
, u64 num_bytes
, u64 parent
,
623 u64 ref_root
, int level
, int action
,
626 struct btrfs_delayed_ref_node
*existing
;
627 struct btrfs_delayed_tree_ref
*full_ref
;
628 struct btrfs_delayed_ref_root
*delayed_refs
;
631 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
632 action
= BTRFS_ADD_DELAYED_REF
;
634 delayed_refs
= &trans
->transaction
->delayed_refs
;
636 /* first set the basic ref node struct up */
637 atomic_set(&ref
->refs
, 1);
638 ref
->bytenr
= bytenr
;
639 ref
->num_bytes
= num_bytes
;
641 ref
->action
= action
;
645 if (need_ref_seq(for_cow
, ref_root
))
646 seq
= btrfs_get_tree_mod_seq(fs_info
, &trans
->delayed_ref_elem
);
649 full_ref
= btrfs_delayed_node_to_tree_ref(ref
);
650 full_ref
->parent
= parent
;
651 full_ref
->root
= ref_root
;
653 ref
->type
= BTRFS_SHARED_BLOCK_REF_KEY
;
655 ref
->type
= BTRFS_TREE_BLOCK_REF_KEY
;
656 full_ref
->level
= level
;
658 trace_btrfs_delayed_tree_ref(ref
, full_ref
, action
);
660 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
663 update_existing_ref(trans
, delayed_refs
, existing
, ref
);
665 * we've updated the existing ref, free the newly
668 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, full_ref
);
670 delayed_refs
->num_entries
++;
671 trans
->delayed_ref_updates
++;
676 * helper to insert a delayed data ref into the rbtree.
678 static noinline
void add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
679 struct btrfs_trans_handle
*trans
,
680 struct btrfs_delayed_ref_node
*ref
,
681 u64 bytenr
, u64 num_bytes
, u64 parent
,
682 u64 ref_root
, u64 owner
, u64 offset
,
683 int action
, int for_cow
)
685 struct btrfs_delayed_ref_node
*existing
;
686 struct btrfs_delayed_data_ref
*full_ref
;
687 struct btrfs_delayed_ref_root
*delayed_refs
;
690 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
691 action
= BTRFS_ADD_DELAYED_REF
;
693 delayed_refs
= &trans
->transaction
->delayed_refs
;
695 /* first set the basic ref node struct up */
696 atomic_set(&ref
->refs
, 1);
697 ref
->bytenr
= bytenr
;
698 ref
->num_bytes
= num_bytes
;
700 ref
->action
= action
;
704 if (need_ref_seq(for_cow
, ref_root
))
705 seq
= btrfs_get_tree_mod_seq(fs_info
, &trans
->delayed_ref_elem
);
708 full_ref
= btrfs_delayed_node_to_data_ref(ref
);
709 full_ref
->parent
= parent
;
710 full_ref
->root
= ref_root
;
712 ref
->type
= BTRFS_SHARED_DATA_REF_KEY
;
714 ref
->type
= BTRFS_EXTENT_DATA_REF_KEY
;
716 full_ref
->objectid
= owner
;
717 full_ref
->offset
= offset
;
719 trace_btrfs_delayed_data_ref(ref
, full_ref
, action
);
721 existing
= tree_insert(&delayed_refs
->root
, &ref
->rb_node
);
724 update_existing_ref(trans
, delayed_refs
, existing
, ref
);
726 * we've updated the existing ref, free the newly
729 kmem_cache_free(btrfs_delayed_data_ref_cachep
, full_ref
);
731 delayed_refs
->num_entries
++;
732 trans
->delayed_ref_updates
++;
737 * add a delayed tree ref. This does all of the accounting required
738 * to make sure the delayed ref is eventually processed before this
739 * transaction commits.
741 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
742 struct btrfs_trans_handle
*trans
,
743 u64 bytenr
, u64 num_bytes
, u64 parent
,
744 u64 ref_root
, int level
, int action
,
745 struct btrfs_delayed_extent_op
*extent_op
,
748 struct btrfs_delayed_tree_ref
*ref
;
749 struct btrfs_delayed_ref_head
*head_ref
;
750 struct btrfs_delayed_ref_root
*delayed_refs
;
752 BUG_ON(extent_op
&& extent_op
->is_data
);
753 ref
= kmem_cache_alloc(btrfs_delayed_tree_ref_cachep
, GFP_NOFS
);
757 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
759 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, ref
);
763 head_ref
->extent_op
= extent_op
;
765 delayed_refs
= &trans
->transaction
->delayed_refs
;
766 spin_lock(&delayed_refs
->lock
);
769 * insert both the head node and the new ref without dropping
772 add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, bytenr
,
773 num_bytes
, action
, 0);
775 add_delayed_tree_ref(fs_info
, trans
, &ref
->node
, bytenr
,
776 num_bytes
, parent
, ref_root
, level
, action
,
778 spin_unlock(&delayed_refs
->lock
);
779 if (need_ref_seq(for_cow
, ref_root
))
780 btrfs_qgroup_record_ref(trans
, &ref
->node
, extent_op
);
786 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
788 int btrfs_add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
789 struct btrfs_trans_handle
*trans
,
790 u64 bytenr
, u64 num_bytes
,
791 u64 parent
, u64 ref_root
,
792 u64 owner
, u64 offset
, int action
,
793 struct btrfs_delayed_extent_op
*extent_op
,
796 struct btrfs_delayed_data_ref
*ref
;
797 struct btrfs_delayed_ref_head
*head_ref
;
798 struct btrfs_delayed_ref_root
*delayed_refs
;
800 BUG_ON(extent_op
&& !extent_op
->is_data
);
801 ref
= kmem_cache_alloc(btrfs_delayed_data_ref_cachep
, GFP_NOFS
);
805 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
807 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
811 head_ref
->extent_op
= extent_op
;
813 delayed_refs
= &trans
->transaction
->delayed_refs
;
814 spin_lock(&delayed_refs
->lock
);
817 * insert both the head node and the new ref without dropping
820 add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, bytenr
,
821 num_bytes
, action
, 1);
823 add_delayed_data_ref(fs_info
, trans
, &ref
->node
, bytenr
,
824 num_bytes
, parent
, ref_root
, owner
, offset
,
826 spin_unlock(&delayed_refs
->lock
);
827 if (need_ref_seq(for_cow
, ref_root
))
828 btrfs_qgroup_record_ref(trans
, &ref
->node
, extent_op
);
833 int btrfs_add_delayed_extent_op(struct btrfs_fs_info
*fs_info
,
834 struct btrfs_trans_handle
*trans
,
835 u64 bytenr
, u64 num_bytes
,
836 struct btrfs_delayed_extent_op
*extent_op
)
838 struct btrfs_delayed_ref_head
*head_ref
;
839 struct btrfs_delayed_ref_root
*delayed_refs
;
841 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
845 head_ref
->extent_op
= extent_op
;
847 delayed_refs
= &trans
->transaction
->delayed_refs
;
848 spin_lock(&delayed_refs
->lock
);
850 add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, bytenr
,
851 num_bytes
, BTRFS_UPDATE_DELAYED_HEAD
,
854 spin_unlock(&delayed_refs
->lock
);
859 * this does a simple search for the head node for a given extent.
860 * It must be called with the delayed ref spinlock held, and it returns
861 * the head node if any where found, or NULL if not.
863 struct btrfs_delayed_ref_head
*
864 btrfs_find_delayed_ref_head(struct btrfs_trans_handle
*trans
, u64 bytenr
)
866 struct btrfs_delayed_ref_node
*ref
;
867 struct btrfs_delayed_ref_root
*delayed_refs
;
869 delayed_refs
= &trans
->transaction
->delayed_refs
;
870 ref
= find_ref_head(&delayed_refs
->root
, bytenr
, NULL
, 0);
872 return btrfs_delayed_node_to_head(ref
);
876 void btrfs_delayed_ref_exit(void)
878 if (btrfs_delayed_ref_head_cachep
)
879 kmem_cache_destroy(btrfs_delayed_ref_head_cachep
);
880 if (btrfs_delayed_tree_ref_cachep
)
881 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep
);
882 if (btrfs_delayed_data_ref_cachep
)
883 kmem_cache_destroy(btrfs_delayed_data_ref_cachep
);
884 if (btrfs_delayed_extent_op_cachep
)
885 kmem_cache_destroy(btrfs_delayed_extent_op_cachep
);
888 int btrfs_delayed_ref_init(void)
890 btrfs_delayed_ref_head_cachep
= kmem_cache_create(
891 "btrfs_delayed_ref_head",
892 sizeof(struct btrfs_delayed_ref_head
), 0,
893 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
894 if (!btrfs_delayed_ref_head_cachep
)
897 btrfs_delayed_tree_ref_cachep
= kmem_cache_create(
898 "btrfs_delayed_tree_ref",
899 sizeof(struct btrfs_delayed_tree_ref
), 0,
900 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
901 if (!btrfs_delayed_tree_ref_cachep
)
904 btrfs_delayed_data_ref_cachep
= kmem_cache_create(
905 "btrfs_delayed_data_ref",
906 sizeof(struct btrfs_delayed_data_ref
), 0,
907 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
908 if (!btrfs_delayed_data_ref_cachep
)
911 btrfs_delayed_extent_op_cachep
= kmem_cache_create(
912 "btrfs_delayed_extent_op",
913 sizeof(struct btrfs_delayed_extent_op
), 0,
914 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
915 if (!btrfs_delayed_extent_op_cachep
)
920 btrfs_delayed_ref_exit();