2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/sort.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
26 struct kmem_cache
*btrfs_delayed_ref_head_cachep
;
27 struct kmem_cache
*btrfs_delayed_tree_ref_cachep
;
28 struct kmem_cache
*btrfs_delayed_data_ref_cachep
;
29 struct kmem_cache
*btrfs_delayed_extent_op_cachep
;
31 * delayed back reference update tracking. For subvolume trees
32 * we queue up extent allocations and backref maintenance for
33 * delayed processing. This avoids deep call chains where we
34 * add extents in the middle of btrfs_search_slot, and it allows
35 * us to buffer up frequently modified backrefs in an rb tree instead
36 * of hammering updates on the extent allocation tree.
40 * compare two delayed tree backrefs with same bytenr and type
42 static int comp_tree_refs(struct btrfs_delayed_tree_ref
*ref2
,
43 struct btrfs_delayed_tree_ref
*ref1
, int type
)
45 if (type
== BTRFS_TREE_BLOCK_REF_KEY
) {
46 if (ref1
->root
< ref2
->root
)
48 if (ref1
->root
> ref2
->root
)
51 if (ref1
->parent
< ref2
->parent
)
53 if (ref1
->parent
> ref2
->parent
)
60 * compare two delayed data backrefs with same bytenr and type
62 static int comp_data_refs(struct btrfs_delayed_data_ref
*ref2
,
63 struct btrfs_delayed_data_ref
*ref1
)
65 if (ref1
->node
.type
== BTRFS_EXTENT_DATA_REF_KEY
) {
66 if (ref1
->root
< ref2
->root
)
68 if (ref1
->root
> ref2
->root
)
70 if (ref1
->objectid
< ref2
->objectid
)
72 if (ref1
->objectid
> ref2
->objectid
)
74 if (ref1
->offset
< ref2
->offset
)
76 if (ref1
->offset
> ref2
->offset
)
79 if (ref1
->parent
< ref2
->parent
)
81 if (ref1
->parent
> ref2
->parent
)
88 * entries in the rb tree are ordered by the byte number of the extent,
89 * type of the delayed backrefs and content of delayed backrefs.
91 static int comp_entry(struct btrfs_delayed_ref_node
*ref2
,
92 struct btrfs_delayed_ref_node
*ref1
,
95 if (ref1
->bytenr
< ref2
->bytenr
)
97 if (ref1
->bytenr
> ref2
->bytenr
)
99 if (ref1
->is_head
&& ref2
->is_head
)
105 if (ref1
->type
< ref2
->type
)
107 if (ref1
->type
> ref2
->type
)
109 if (ref1
->no_quota
> ref2
->no_quota
)
111 if (ref1
->no_quota
< ref2
->no_quota
)
113 /* merging of sequenced refs is not allowed */
115 if (ref1
->seq
< ref2
->seq
)
117 if (ref1
->seq
> ref2
->seq
)
120 if (ref1
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
121 ref1
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) {
122 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2
),
123 btrfs_delayed_node_to_tree_ref(ref1
),
125 } else if (ref1
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
126 ref1
->type
== BTRFS_SHARED_DATA_REF_KEY
) {
127 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2
),
128 btrfs_delayed_node_to_data_ref(ref1
));
135 * insert a new ref into the rbtree. This returns any existing refs
136 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
139 static struct btrfs_delayed_ref_node
*tree_insert(struct rb_root
*root
,
140 struct rb_node
*node
)
142 struct rb_node
**p
= &root
->rb_node
;
143 struct rb_node
*parent_node
= NULL
;
144 struct btrfs_delayed_ref_node
*entry
;
145 struct btrfs_delayed_ref_node
*ins
;
148 ins
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
151 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_node
,
154 cmp
= comp_entry(entry
, ins
, 1);
163 rb_link_node(node
, parent_node
, p
);
164 rb_insert_color(node
, root
);
168 /* insert a new ref to head ref rbtree */
169 static struct btrfs_delayed_ref_head
*htree_insert(struct rb_root
*root
,
170 struct rb_node
*node
)
172 struct rb_node
**p
= &root
->rb_node
;
173 struct rb_node
*parent_node
= NULL
;
174 struct btrfs_delayed_ref_head
*entry
;
175 struct btrfs_delayed_ref_head
*ins
;
178 ins
= rb_entry(node
, struct btrfs_delayed_ref_head
, href_node
);
179 bytenr
= ins
->node
.bytenr
;
182 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_head
,
185 if (bytenr
< entry
->node
.bytenr
)
187 else if (bytenr
> entry
->node
.bytenr
)
193 rb_link_node(node
, parent_node
, p
);
194 rb_insert_color(node
, root
);
199 * find an head entry based on bytenr. This returns the delayed ref
200 * head if it was able to find one, or NULL if nothing was in that spot.
201 * If return_bigger is given, the next bigger entry is returned if no exact
204 static struct btrfs_delayed_ref_head
*
205 find_ref_head(struct rb_root
*root
, u64 bytenr
,
209 struct btrfs_delayed_ref_head
*entry
;
214 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
, href_node
);
216 if (bytenr
< entry
->node
.bytenr
)
218 else if (bytenr
> entry
->node
.bytenr
)
223 if (entry
&& return_bigger
) {
224 if (bytenr
> entry
->node
.bytenr
) {
225 n
= rb_next(&entry
->href_node
);
228 entry
= rb_entry(n
, struct btrfs_delayed_ref_head
,
237 int btrfs_delayed_ref_lock(struct btrfs_trans_handle
*trans
,
238 struct btrfs_delayed_ref_head
*head
)
240 struct btrfs_delayed_ref_root
*delayed_refs
;
242 delayed_refs
= &trans
->transaction
->delayed_refs
;
243 assert_spin_locked(&delayed_refs
->lock
);
244 if (mutex_trylock(&head
->mutex
))
247 atomic_inc(&head
->node
.refs
);
248 spin_unlock(&delayed_refs
->lock
);
250 mutex_lock(&head
->mutex
);
251 spin_lock(&delayed_refs
->lock
);
252 if (!head
->node
.in_tree
) {
253 mutex_unlock(&head
->mutex
);
254 btrfs_put_delayed_ref(&head
->node
);
257 btrfs_put_delayed_ref(&head
->node
);
261 static inline void drop_delayed_ref(struct btrfs_trans_handle
*trans
,
262 struct btrfs_delayed_ref_root
*delayed_refs
,
263 struct btrfs_delayed_ref_head
*head
,
264 struct btrfs_delayed_ref_node
*ref
)
266 if (btrfs_delayed_ref_is_head(ref
)) {
267 head
= btrfs_delayed_node_to_head(ref
);
268 rb_erase(&head
->href_node
, &delayed_refs
->href_root
);
270 assert_spin_locked(&head
->lock
);
271 list_del(&ref
->list
);
274 btrfs_put_delayed_ref(ref
);
275 atomic_dec(&delayed_refs
->num_entries
);
276 if (trans
->delayed_ref_updates
)
277 trans
->delayed_ref_updates
--;
280 static int merge_ref(struct btrfs_trans_handle
*trans
,
281 struct btrfs_delayed_ref_root
*delayed_refs
,
282 struct btrfs_delayed_ref_head
*head
,
283 struct btrfs_delayed_ref_node
*ref
, u64 seq
)
285 struct rb_node
*node
;
289 node
= rb_next(&ref
->rb_node
);
290 while (!done
&& node
) {
291 struct btrfs_delayed_ref_node
*next
;
293 next
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
294 node
= rb_next(node
);
295 if (seq
&& next
->seq
>= seq
)
297 if (comp_entry(ref
, next
, 0))
300 if (ref
->action
== next
->action
) {
303 if (ref
->ref_mod
< next
->ref_mod
) {
304 struct btrfs_delayed_ref_node
*tmp
;
311 mod
= -next
->ref_mod
;
314 drop_delayed_ref(trans
, delayed_refs
, head
, next
);
316 if (ref
->ref_mod
== 0) {
317 drop_delayed_ref(trans
, delayed_refs
, head
, ref
);
321 * You can't have multiples of the same ref on a tree
324 WARN_ON(ref
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
325 ref
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
331 int btrfs_check_delayed_seq(struct btrfs_fs_info
*fs_info
,
332 struct btrfs_delayed_ref_root
*delayed_refs
,
335 struct seq_list
*elem
;
338 spin_lock(&fs_info
->tree_mod_seq_lock
);
339 if (!list_empty(&fs_info
->tree_mod_seq_list
)) {
340 elem
= list_first_entry(&fs_info
->tree_mod_seq_list
,
341 struct seq_list
, list
);
342 if (seq
>= elem
->seq
) {
343 pr_debug("holding back delayed_ref %#x.%x, lowest is %#x.%x (%p)\n",
344 (u32
)(seq
>> 32), (u32
)seq
,
345 (u32
)(elem
->seq
>> 32), (u32
)elem
->seq
,
351 spin_unlock(&fs_info
->tree_mod_seq_lock
);
355 struct btrfs_delayed_ref_head
*
356 btrfs_select_ref_head(struct btrfs_trans_handle
*trans
)
358 struct btrfs_delayed_ref_root
*delayed_refs
;
359 struct btrfs_delayed_ref_head
*head
;
363 delayed_refs
= &trans
->transaction
->delayed_refs
;
366 start
= delayed_refs
->run_delayed_start
;
367 head
= find_ref_head(&delayed_refs
->href_root
, start
, 1);
368 if (!head
&& !loop
) {
369 delayed_refs
->run_delayed_start
= 0;
372 head
= find_ref_head(&delayed_refs
->href_root
, start
, 1);
375 } else if (!head
&& loop
) {
379 while (head
->processing
) {
380 struct rb_node
*node
;
382 node
= rb_next(&head
->href_node
);
386 delayed_refs
->run_delayed_start
= 0;
391 head
= rb_entry(node
, struct btrfs_delayed_ref_head
,
395 head
->processing
= 1;
396 WARN_ON(delayed_refs
->num_heads_ready
== 0);
397 delayed_refs
->num_heads_ready
--;
398 delayed_refs
->run_delayed_start
= head
->node
.bytenr
+
399 head
->node
.num_bytes
;
404 * helper function to update an extent delayed ref in the
405 * rbtree. existing and update must both have the same
408 * This may free existing if the update cancels out whatever
409 * operation it was doing.
412 update_existing_ref(struct btrfs_trans_handle
*trans
,
413 struct btrfs_delayed_ref_root
*delayed_refs
,
414 struct btrfs_delayed_ref_head
*head
,
415 struct btrfs_delayed_ref_node
*existing
,
416 struct btrfs_delayed_ref_node
*update
)
418 if (update
->action
!= existing
->action
) {
420 * this is effectively undoing either an add or a
421 * drop. We decrement the ref_mod, and if it goes
422 * down to zero we just delete the entry without
423 * every changing the extent allocation tree.
426 if (existing
->ref_mod
== 0)
427 drop_delayed_ref(trans
, delayed_refs
, head
, existing
);
429 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
430 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
432 WARN_ON(existing
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
433 existing
->type
== BTRFS_SHARED_BLOCK_REF_KEY
);
435 * the action on the existing ref matches
436 * the action on the ref we're trying to add.
437 * Bump the ref_mod by one so the backref that
438 * is eventually added/removed has the correct
441 existing
->ref_mod
+= update
->ref_mod
;
446 * Helper to insert the ref_node to the tail or merge with tail.
448 * Return 0 for insert.
449 * Return >0 for merge.
452 add_delayed_ref_tail_merge(struct btrfs_trans_handle
*trans
,
453 struct btrfs_delayed_ref_root
*root
,
454 struct btrfs_delayed_ref_head
*href
,
455 struct btrfs_delayed_ref_node
*ref
)
457 struct btrfs_delayed_ref_node
*exist
;
461 spin_lock(&href
->lock
);
462 /* Check whether we can merge the tail node with ref */
463 if (list_empty(&href
->ref_list
))
465 exist
= list_entry(href
->ref_list
.prev
, struct btrfs_delayed_ref_node
,
467 /* No need to compare bytenr nor is_head */
468 if (exist
->type
!= ref
->type
|| exist
->no_quota
!= ref
->no_quota
||
469 exist
->seq
!= ref
->seq
)
472 if ((exist
->type
== BTRFS_TREE_BLOCK_REF_KEY
||
473 exist
->type
== BTRFS_SHARED_BLOCK_REF_KEY
) &&
474 comp_tree_refs(btrfs_delayed_node_to_tree_ref(exist
),
475 btrfs_delayed_node_to_tree_ref(ref
),
478 if ((exist
->type
== BTRFS_EXTENT_DATA_REF_KEY
||
479 exist
->type
== BTRFS_SHARED_DATA_REF_KEY
) &&
480 comp_data_refs(btrfs_delayed_node_to_data_ref(exist
),
481 btrfs_delayed_node_to_data_ref(ref
)))
484 /* Now we are sure we can merge */
486 if (exist
->action
== ref
->action
) {
489 /* Need to change action */
490 if (exist
->ref_mod
< ref
->ref_mod
) {
491 exist
->action
= ref
->action
;
492 mod
= -exist
->ref_mod
;
493 exist
->ref_mod
= ref
->ref_mod
;
497 exist
->ref_mod
+= mod
;
499 /* remove existing tail if its ref_mod is zero */
500 if (exist
->ref_mod
== 0)
501 drop_delayed_ref(trans
, root
, href
, exist
);
502 spin_unlock(&href
->lock
);
506 list_add_tail(&ref
->list
, &href
->ref_list
);
507 atomic_inc(&root
->num_entries
);
508 trans
->delayed_ref_updates
++;
509 spin_unlock(&href
->lock
);
514 * helper function to update the accounting in the head ref
515 * existing and update must have the same bytenr
518 update_existing_head_ref(struct btrfs_delayed_ref_root
*delayed_refs
,
519 struct btrfs_delayed_ref_node
*existing
,
520 struct btrfs_delayed_ref_node
*update
)
522 struct btrfs_delayed_ref_head
*existing_ref
;
523 struct btrfs_delayed_ref_head
*ref
;
526 existing_ref
= btrfs_delayed_node_to_head(existing
);
527 ref
= btrfs_delayed_node_to_head(update
);
528 BUG_ON(existing_ref
->is_data
!= ref
->is_data
);
530 spin_lock(&existing_ref
->lock
);
531 if (ref
->must_insert_reserved
) {
532 /* if the extent was freed and then
533 * reallocated before the delayed ref
534 * entries were processed, we can end up
535 * with an existing head ref without
536 * the must_insert_reserved flag set.
539 existing_ref
->must_insert_reserved
= ref
->must_insert_reserved
;
542 * update the num_bytes so we make sure the accounting
545 existing
->num_bytes
= update
->num_bytes
;
549 if (ref
->extent_op
) {
550 if (!existing_ref
->extent_op
) {
551 existing_ref
->extent_op
= ref
->extent_op
;
553 if (ref
->extent_op
->update_key
) {
554 memcpy(&existing_ref
->extent_op
->key
,
555 &ref
->extent_op
->key
,
556 sizeof(ref
->extent_op
->key
));
557 existing_ref
->extent_op
->update_key
= 1;
559 if (ref
->extent_op
->update_flags
) {
560 existing_ref
->extent_op
->flags_to_set
|=
561 ref
->extent_op
->flags_to_set
;
562 existing_ref
->extent_op
->update_flags
= 1;
564 btrfs_free_delayed_extent_op(ref
->extent_op
);
568 * update the reference mod on the head to reflect this new operation,
569 * only need the lock for this case cause we could be processing it
570 * currently, for refs we just added we know we're a-ok.
572 old_ref_mod
= existing_ref
->total_ref_mod
;
573 existing
->ref_mod
+= update
->ref_mod
;
574 existing_ref
->total_ref_mod
+= update
->ref_mod
;
577 * If we are going to from a positive ref mod to a negative or vice
578 * versa we need to make sure to adjust pending_csums accordingly.
580 if (existing_ref
->is_data
) {
581 if (existing_ref
->total_ref_mod
>= 0 && old_ref_mod
< 0)
582 delayed_refs
->pending_csums
-= existing
->num_bytes
;
583 if (existing_ref
->total_ref_mod
< 0 && old_ref_mod
>= 0)
584 delayed_refs
->pending_csums
+= existing
->num_bytes
;
586 spin_unlock(&existing_ref
->lock
);
590 * helper function to actually insert a head node into the rbtree.
591 * this does all the dirty work in terms of maintaining the correct
592 * overall modification count.
594 static noinline
struct btrfs_delayed_ref_head
*
595 add_delayed_ref_head(struct btrfs_fs_info
*fs_info
,
596 struct btrfs_trans_handle
*trans
,
597 struct btrfs_delayed_ref_node
*ref
, u64 bytenr
,
598 u64 num_bytes
, int action
, int is_data
)
600 struct btrfs_delayed_ref_head
*existing
;
601 struct btrfs_delayed_ref_head
*head_ref
= NULL
;
602 struct btrfs_delayed_ref_root
*delayed_refs
;
604 int must_insert_reserved
= 0;
607 * the head node stores the sum of all the mods, so dropping a ref
608 * should drop the sum in the head node by one.
610 if (action
== BTRFS_UPDATE_DELAYED_HEAD
)
612 else if (action
== BTRFS_DROP_DELAYED_REF
)
616 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
617 * the reserved accounting when the extent is finally added, or
618 * if a later modification deletes the delayed ref without ever
619 * inserting the extent into the extent allocation tree.
620 * ref->must_insert_reserved is the flag used to record
621 * that accounting mods are required.
623 * Once we record must_insert_reserved, switch the action to
624 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
626 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
627 must_insert_reserved
= 1;
629 must_insert_reserved
= 0;
631 delayed_refs
= &trans
->transaction
->delayed_refs
;
633 /* first set the basic ref node struct up */
634 atomic_set(&ref
->refs
, 1);
635 ref
->bytenr
= bytenr
;
636 ref
->num_bytes
= num_bytes
;
637 ref
->ref_mod
= count_mod
;
644 head_ref
= btrfs_delayed_node_to_head(ref
);
645 head_ref
->must_insert_reserved
= must_insert_reserved
;
646 head_ref
->is_data
= is_data
;
647 INIT_LIST_HEAD(&head_ref
->ref_list
);
648 head_ref
->processing
= 0;
649 head_ref
->total_ref_mod
= count_mod
;
651 spin_lock_init(&head_ref
->lock
);
652 mutex_init(&head_ref
->mutex
);
654 trace_add_delayed_ref_head(ref
, head_ref
, action
);
656 existing
= htree_insert(&delayed_refs
->href_root
,
657 &head_ref
->href_node
);
659 update_existing_head_ref(delayed_refs
, &existing
->node
, ref
);
661 * we've updated the existing ref, free the newly
664 kmem_cache_free(btrfs_delayed_ref_head_cachep
, head_ref
);
667 if (is_data
&& count_mod
< 0)
668 delayed_refs
->pending_csums
+= num_bytes
;
669 delayed_refs
->num_heads
++;
670 delayed_refs
->num_heads_ready
++;
671 atomic_inc(&delayed_refs
->num_entries
);
672 trans
->delayed_ref_updates
++;
678 * helper to insert a delayed tree ref into the rbtree.
681 add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
682 struct btrfs_trans_handle
*trans
,
683 struct btrfs_delayed_ref_head
*head_ref
,
684 struct btrfs_delayed_ref_node
*ref
, u64 bytenr
,
685 u64 num_bytes
, u64 parent
, u64 ref_root
, int level
,
686 int action
, int no_quota
)
688 struct btrfs_delayed_tree_ref
*full_ref
;
689 struct btrfs_delayed_ref_root
*delayed_refs
;
693 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
694 action
= BTRFS_ADD_DELAYED_REF
;
696 if (is_fstree(ref_root
))
697 seq
= atomic64_read(&fs_info
->tree_mod_seq
);
698 delayed_refs
= &trans
->transaction
->delayed_refs
;
700 /* first set the basic ref node struct up */
701 atomic_set(&ref
->refs
, 1);
702 ref
->bytenr
= bytenr
;
703 ref
->num_bytes
= num_bytes
;
705 ref
->action
= action
;
708 ref
->no_quota
= no_quota
;
711 full_ref
= btrfs_delayed_node_to_tree_ref(ref
);
712 full_ref
->parent
= parent
;
713 full_ref
->root
= ref_root
;
715 ref
->type
= BTRFS_SHARED_BLOCK_REF_KEY
;
717 ref
->type
= BTRFS_TREE_BLOCK_REF_KEY
;
718 full_ref
->level
= level
;
720 trace_add_delayed_tree_ref(ref
, full_ref
, action
);
722 ret
= add_delayed_ref_tail_merge(trans
, delayed_refs
, head_ref
, ref
);
725 * XXX: memory should be freed at the same level allocated.
726 * But bad practice is anywhere... Follow it now. Need cleanup.
729 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, full_ref
);
733 * helper to insert a delayed data ref into the rbtree.
736 add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
737 struct btrfs_trans_handle
*trans
,
738 struct btrfs_delayed_ref_head
*head_ref
,
739 struct btrfs_delayed_ref_node
*ref
, u64 bytenr
,
740 u64 num_bytes
, u64 parent
, u64 ref_root
, u64 owner
,
741 u64 offset
, int action
, int no_quota
)
743 struct btrfs_delayed_data_ref
*full_ref
;
744 struct btrfs_delayed_ref_root
*delayed_refs
;
748 if (action
== BTRFS_ADD_DELAYED_EXTENT
)
749 action
= BTRFS_ADD_DELAYED_REF
;
751 delayed_refs
= &trans
->transaction
->delayed_refs
;
753 if (is_fstree(ref_root
))
754 seq
= atomic64_read(&fs_info
->tree_mod_seq
);
756 /* first set the basic ref node struct up */
757 atomic_set(&ref
->refs
, 1);
758 ref
->bytenr
= bytenr
;
759 ref
->num_bytes
= num_bytes
;
761 ref
->action
= action
;
764 ref
->no_quota
= no_quota
;
767 full_ref
= btrfs_delayed_node_to_data_ref(ref
);
768 full_ref
->parent
= parent
;
769 full_ref
->root
= ref_root
;
771 ref
->type
= BTRFS_SHARED_DATA_REF_KEY
;
773 ref
->type
= BTRFS_EXTENT_DATA_REF_KEY
;
775 full_ref
->objectid
= owner
;
776 full_ref
->offset
= offset
;
778 trace_add_delayed_data_ref(ref
, full_ref
, action
);
780 ret
= add_delayed_ref_tail_merge(trans
, delayed_refs
, head_ref
, ref
);
783 kmem_cache_free(btrfs_delayed_data_ref_cachep
, full_ref
);
787 * add a delayed tree ref. This does all of the accounting required
788 * to make sure the delayed ref is eventually processed before this
789 * transaction commits.
791 int btrfs_add_delayed_tree_ref(struct btrfs_fs_info
*fs_info
,
792 struct btrfs_trans_handle
*trans
,
793 u64 bytenr
, u64 num_bytes
, u64 parent
,
794 u64 ref_root
, int level
, int action
,
795 struct btrfs_delayed_extent_op
*extent_op
,
798 struct btrfs_delayed_tree_ref
*ref
;
799 struct btrfs_delayed_ref_head
*head_ref
;
800 struct btrfs_delayed_ref_root
*delayed_refs
;
802 if (!is_fstree(ref_root
) || !fs_info
->quota_enabled
)
805 BUG_ON(extent_op
&& extent_op
->is_data
);
806 ref
= kmem_cache_alloc(btrfs_delayed_tree_ref_cachep
, GFP_NOFS
);
810 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
812 kmem_cache_free(btrfs_delayed_tree_ref_cachep
, ref
);
816 head_ref
->extent_op
= extent_op
;
818 delayed_refs
= &trans
->transaction
->delayed_refs
;
819 spin_lock(&delayed_refs
->lock
);
822 * insert both the head node and the new ref without dropping
825 head_ref
= add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
,
826 bytenr
, num_bytes
, action
, 0);
828 add_delayed_tree_ref(fs_info
, trans
, head_ref
, &ref
->node
, bytenr
,
829 num_bytes
, parent
, ref_root
, level
, action
,
831 spin_unlock(&delayed_refs
->lock
);
837 * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
839 int btrfs_add_delayed_data_ref(struct btrfs_fs_info
*fs_info
,
840 struct btrfs_trans_handle
*trans
,
841 u64 bytenr
, u64 num_bytes
,
842 u64 parent
, u64 ref_root
,
843 u64 owner
, u64 offset
, int action
,
844 struct btrfs_delayed_extent_op
*extent_op
,
847 struct btrfs_delayed_data_ref
*ref
;
848 struct btrfs_delayed_ref_head
*head_ref
;
849 struct btrfs_delayed_ref_root
*delayed_refs
;
851 if (!is_fstree(ref_root
) || !fs_info
->quota_enabled
)
854 BUG_ON(extent_op
&& !extent_op
->is_data
);
855 ref
= kmem_cache_alloc(btrfs_delayed_data_ref_cachep
, GFP_NOFS
);
859 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
861 kmem_cache_free(btrfs_delayed_data_ref_cachep
, ref
);
865 head_ref
->extent_op
= extent_op
;
867 delayed_refs
= &trans
->transaction
->delayed_refs
;
868 spin_lock(&delayed_refs
->lock
);
871 * insert both the head node and the new ref without dropping
874 head_ref
= add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
,
875 bytenr
, num_bytes
, action
, 1);
877 add_delayed_data_ref(fs_info
, trans
, head_ref
, &ref
->node
, bytenr
,
878 num_bytes
, parent
, ref_root
, owner
, offset
,
880 spin_unlock(&delayed_refs
->lock
);
885 int btrfs_add_delayed_extent_op(struct btrfs_fs_info
*fs_info
,
886 struct btrfs_trans_handle
*trans
,
887 u64 bytenr
, u64 num_bytes
,
888 struct btrfs_delayed_extent_op
*extent_op
)
890 struct btrfs_delayed_ref_head
*head_ref
;
891 struct btrfs_delayed_ref_root
*delayed_refs
;
893 head_ref
= kmem_cache_alloc(btrfs_delayed_ref_head_cachep
, GFP_NOFS
);
897 head_ref
->extent_op
= extent_op
;
899 delayed_refs
= &trans
->transaction
->delayed_refs
;
900 spin_lock(&delayed_refs
->lock
);
902 add_delayed_ref_head(fs_info
, trans
, &head_ref
->node
, bytenr
,
903 num_bytes
, BTRFS_UPDATE_DELAYED_HEAD
,
906 spin_unlock(&delayed_refs
->lock
);
911 * this does a simple search for the head node for a given extent.
912 * It must be called with the delayed ref spinlock held, and it returns
913 * the head node if any where found, or NULL if not.
915 struct btrfs_delayed_ref_head
*
916 btrfs_find_delayed_ref_head(struct btrfs_trans_handle
*trans
, u64 bytenr
)
918 struct btrfs_delayed_ref_root
*delayed_refs
;
920 delayed_refs
= &trans
->transaction
->delayed_refs
;
921 return find_ref_head(&delayed_refs
->href_root
, bytenr
, 0);
924 void btrfs_delayed_ref_exit(void)
926 if (btrfs_delayed_ref_head_cachep
)
927 kmem_cache_destroy(btrfs_delayed_ref_head_cachep
);
928 if (btrfs_delayed_tree_ref_cachep
)
929 kmem_cache_destroy(btrfs_delayed_tree_ref_cachep
);
930 if (btrfs_delayed_data_ref_cachep
)
931 kmem_cache_destroy(btrfs_delayed_data_ref_cachep
);
932 if (btrfs_delayed_extent_op_cachep
)
933 kmem_cache_destroy(btrfs_delayed_extent_op_cachep
);
936 int btrfs_delayed_ref_init(void)
938 btrfs_delayed_ref_head_cachep
= kmem_cache_create(
939 "btrfs_delayed_ref_head",
940 sizeof(struct btrfs_delayed_ref_head
), 0,
941 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
942 if (!btrfs_delayed_ref_head_cachep
)
945 btrfs_delayed_tree_ref_cachep
= kmem_cache_create(
946 "btrfs_delayed_tree_ref",
947 sizeof(struct btrfs_delayed_tree_ref
), 0,
948 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
949 if (!btrfs_delayed_tree_ref_cachep
)
952 btrfs_delayed_data_ref_cachep
= kmem_cache_create(
953 "btrfs_delayed_data_ref",
954 sizeof(struct btrfs_delayed_data_ref
), 0,
955 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
956 if (!btrfs_delayed_data_ref_cachep
)
959 btrfs_delayed_extent_op_cachep
= kmem_cache_create(
960 "btrfs_delayed_extent_op",
961 sizeof(struct btrfs_delayed_extent_op
), 0,
962 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
, NULL
);
963 if (!btrfs_delayed_extent_op_cachep
)
968 btrfs_delayed_ref_exit();