2 * Copyright (C) 2009 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/sort.h>
21 #include <linux/ftrace.h>
23 #include "delayed-ref.h"
24 #include "transaction.h"
27 * delayed back reference update tracking. For subvolume trees
28 * we queue up extent allocations and backref maintenance for
29 * delayed processing. This avoids deep call chains where we
30 * add extents in the middle of btrfs_search_slot, and it allows
31 * us to buffer up frequently modified backrefs in an rb tree instead
32 * of hammering updates on the extent allocation tree.
34 * Right now this code is only used for reference counted trees, but
35 * the long term goal is to get rid of the similar code for delayed
36 * extent tree modifications.
40 * entries in the rb tree are ordered by the byte number of the extent
41 * and by the byte number of the parent block.
43 static int comp_entry(struct btrfs_delayed_ref_node
*ref
,
44 u64 bytenr
, u64 parent
)
46 if (bytenr
< ref
->bytenr
)
48 if (bytenr
> ref
->bytenr
)
50 if (parent
< ref
->parent
)
52 if (parent
> ref
->parent
)
58 * insert a new ref into the rbtree. This returns any existing refs
59 * for the same (bytenr,parent) tuple, or NULL if the new node was properly
62 static struct btrfs_delayed_ref_node
*tree_insert(struct rb_root
*root
,
63 u64 bytenr
, u64 parent
,
66 struct rb_node
**p
= &root
->rb_node
;
67 struct rb_node
*parent_node
= NULL
;
68 struct btrfs_delayed_ref_node
*entry
;
73 entry
= rb_entry(parent_node
, struct btrfs_delayed_ref_node
,
76 cmp
= comp_entry(entry
, bytenr
, parent
);
85 entry
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
86 rb_link_node(node
, parent_node
, p
);
87 rb_insert_color(node
, root
);
92 * find an entry based on (bytenr,parent). This returns the delayed
93 * ref if it was able to find one, or NULL if nothing was in that spot
95 static struct btrfs_delayed_ref_node
*tree_search(struct rb_root
*root
,
96 u64 bytenr
, u64 parent
)
98 struct rb_node
*n
= root
->rb_node
;
99 struct btrfs_delayed_ref_node
*entry
;
103 entry
= rb_entry(n
, struct btrfs_delayed_ref_node
, rb_node
);
104 WARN_ON(!entry
->in_tree
);
106 cmp
= comp_entry(entry
, bytenr
, parent
);
118 * Locking on delayed refs is done by taking a lock on the head node,
119 * which has the (impossible) parent id of (u64)-1. Once a lock is held
120 * on the head node, you're allowed (and required) to process all the
121 * delayed refs for a given byte number in the tree.
123 * This will walk forward in the rbtree until it finds a head node it
124 * is able to lock. It might not lock the delayed ref you asked for,
125 * and so it will return the one it did lock in next_ret and return 0.
127 * If no locks are taken, next_ret is set to null and 1 is returned. This
128 * means there are no more unlocked head nodes in the rbtree.
130 int btrfs_lock_delayed_ref(struct btrfs_trans_handle
*trans
,
131 struct btrfs_delayed_ref_node
*ref
,
132 struct btrfs_delayed_ref_head
**next_ret
)
134 struct rb_node
*node
;
135 struct btrfs_delayed_ref_head
*head
;
139 if (btrfs_delayed_ref_is_head(ref
)) {
140 head
= btrfs_delayed_node_to_head(ref
);
141 if (mutex_trylock(&head
->mutex
)) {
147 node
= rb_next(&ref
->rb_node
);
153 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
159 * This checks to see if there are any delayed refs in the
160 * btree for a given bytenr. It returns one if it finds any
161 * and zero otherwise.
163 * If it only finds a head node, it returns 0.
165 * The idea is to use this when deciding if you can safely delete an
166 * extent from the extent allocation tree. There may be a pending
167 * ref in the rbtree that adds or removes references, so as long as this
168 * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent
171 int btrfs_delayed_ref_pending(struct btrfs_trans_handle
*trans
, u64 bytenr
)
173 struct btrfs_delayed_ref_node
*ref
;
174 struct btrfs_delayed_ref_root
*delayed_refs
;
175 struct rb_node
*prev_node
;
178 delayed_refs
= &trans
->transaction
->delayed_refs
;
179 spin_lock(&delayed_refs
->lock
);
181 ref
= tree_search(&delayed_refs
->root
, bytenr
, (u64
)-1);
183 prev_node
= rb_prev(&ref
->rb_node
);
186 ref
= rb_entry(prev_node
, struct btrfs_delayed_ref_node
,
188 if (ref
->bytenr
== bytenr
)
192 spin_unlock(&delayed_refs
->lock
);
197 * helper function to lookup reference count
199 * the head node for delayed ref is used to store the sum of all the
200 * reference count modifications queued up in the rbtree. This way you
201 * can check to see what the reference count would be if all of the
202 * delayed refs are processed.
204 int btrfs_lookup_extent_ref(struct btrfs_trans_handle
*trans
,
205 struct btrfs_root
*root
, u64 bytenr
,
206 u64 num_bytes
, u32
*refs
)
208 struct btrfs_delayed_ref_node
*ref
;
209 struct btrfs_delayed_ref_head
*head
;
210 struct btrfs_delayed_ref_root
*delayed_refs
;
211 struct btrfs_path
*path
;
212 struct extent_buffer
*leaf
;
213 struct btrfs_extent_item
*ei
;
214 struct btrfs_key key
;
218 path
= btrfs_alloc_path();
222 key
.objectid
= bytenr
;
223 key
.type
= BTRFS_EXTENT_ITEM_KEY
;
224 key
.offset
= num_bytes
;
225 delayed_refs
= &trans
->transaction
->delayed_refs
;
227 ret
= btrfs_search_slot(trans
, root
->fs_info
->extent_root
,
233 leaf
= path
->nodes
[0];
234 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
235 struct btrfs_extent_item
);
236 num_refs
= btrfs_extent_refs(leaf
, ei
);
242 spin_lock(&delayed_refs
->lock
);
243 ref
= tree_search(&delayed_refs
->root
, bytenr
, (u64
)-1);
245 head
= btrfs_delayed_node_to_head(ref
);
246 if (mutex_trylock(&head
->mutex
)) {
247 num_refs
+= ref
->ref_mod
;
248 mutex_unlock(&head
->mutex
);
253 atomic_inc(&ref
->refs
);
254 spin_unlock(&delayed_refs
->lock
);
256 btrfs_release_path(root
->fs_info
->extent_root
, path
);
258 mutex_lock(&head
->mutex
);
259 mutex_unlock(&head
->mutex
);
260 btrfs_put_delayed_ref(ref
);
266 spin_unlock(&delayed_refs
->lock
);
267 btrfs_free_path(path
);
272 * helper function to update an extent delayed ref in the
273 * rbtree. existing and update must both have the same
276 * This may free existing if the update cancels out whatever
277 * operation it was doing.
280 update_existing_ref(struct btrfs_trans_handle
*trans
,
281 struct btrfs_delayed_ref_root
*delayed_refs
,
282 struct btrfs_delayed_ref_node
*existing
,
283 struct btrfs_delayed_ref_node
*update
)
285 struct btrfs_delayed_ref
*existing_ref
;
286 struct btrfs_delayed_ref
*ref
;
288 existing_ref
= btrfs_delayed_node_to_ref(existing
);
289 ref
= btrfs_delayed_node_to_ref(update
);
292 existing_ref
->pin
= 1;
294 if (ref
->action
!= existing_ref
->action
) {
296 * this is effectively undoing either an add or a
297 * drop. We decrement the ref_mod, and if it goes
298 * down to zero we just delete the entry without
299 * every changing the extent allocation tree.
302 if (existing
->ref_mod
== 0) {
303 rb_erase(&existing
->rb_node
,
304 &delayed_refs
->root
);
305 existing
->in_tree
= 0;
306 btrfs_put_delayed_ref(existing
);
307 delayed_refs
->num_entries
--;
308 if (trans
->delayed_ref_updates
)
309 trans
->delayed_ref_updates
--;
312 if (existing_ref
->action
== BTRFS_ADD_DELAYED_REF
) {
313 /* if we're adding refs, make sure all the
314 * details match up. The extent could
315 * have been totally freed and reallocated
316 * by a different owner before the delayed
317 * ref entries were removed.
319 existing_ref
->owner_objectid
= ref
->owner_objectid
;
320 existing_ref
->generation
= ref
->generation
;
321 existing_ref
->root
= ref
->root
;
322 existing
->num_bytes
= update
->num_bytes
;
325 * the action on the existing ref matches
326 * the action on the ref we're trying to add.
327 * Bump the ref_mod by one so the backref that
328 * is eventually added/removed has the correct
331 existing
->ref_mod
+= update
->ref_mod
;
336 * helper function to update the accounting in the head ref
337 * existing and update must have the same bytenr
340 update_existing_head_ref(struct btrfs_delayed_ref_node
*existing
,
341 struct btrfs_delayed_ref_node
*update
)
343 struct btrfs_delayed_ref_head
*existing_ref
;
344 struct btrfs_delayed_ref_head
*ref
;
346 existing_ref
= btrfs_delayed_node_to_head(existing
);
347 ref
= btrfs_delayed_node_to_head(update
);
349 if (ref
->must_insert_reserved
) {
350 /* if the extent was freed and then
351 * reallocated before the delayed ref
352 * entries were processed, we can end up
353 * with an existing head ref without
354 * the must_insert_reserved flag set.
357 existing_ref
->must_insert_reserved
= ref
->must_insert_reserved
;
360 * update the num_bytes so we make sure the accounting
363 existing
->num_bytes
= update
->num_bytes
;
368 * update the reference mod on the head to reflect this new operation
370 existing
->ref_mod
+= update
->ref_mod
;
374 * helper function to actually insert a delayed ref into the rbtree.
375 * this does all the dirty work in terms of maintaining the correct
376 * overall modification count in the head node and properly dealing
377 * with updating existing nodes as new modifications are queued.
379 static noinline
int __btrfs_add_delayed_ref(struct btrfs_trans_handle
*trans
,
380 struct btrfs_delayed_ref_node
*ref
,
381 u64 bytenr
, u64 num_bytes
, u64 parent
, u64 ref_root
,
382 u64 ref_generation
, u64 owner_objectid
, int action
,
385 struct btrfs_delayed_ref_node
*existing
;
386 struct btrfs_delayed_ref
*full_ref
;
387 struct btrfs_delayed_ref_head
*head_ref
;
388 struct btrfs_delayed_ref_root
*delayed_refs
;
390 int must_insert_reserved
= 0;
393 * the head node stores the sum of all the mods, so dropping a ref
394 * should drop the sum in the head node by one.
396 if (parent
== (u64
)-1 && action
== BTRFS_DROP_DELAYED_REF
)
400 * BTRFS_ADD_DELAYED_EXTENT means that we need to update
401 * the reserved accounting when the extent is finally added, or
402 * if a later modification deletes the delayed ref without ever
403 * inserting the extent into the extent allocation tree.
404 * ref->must_insert_reserved is the flag used to record
405 * that accounting mods are required.
407 * Once we record must_insert_reserved, switch the action to
408 * BTRFS_ADD_DELAYED_REF because other special casing is not required.
410 if (action
== BTRFS_ADD_DELAYED_EXTENT
) {
411 must_insert_reserved
= 1;
412 action
= BTRFS_ADD_DELAYED_REF
;
414 must_insert_reserved
= 0;
418 delayed_refs
= &trans
->transaction
->delayed_refs
;
420 /* first set the basic ref node struct up */
421 atomic_set(&ref
->refs
, 1);
422 ref
->bytenr
= bytenr
;
423 ref
->parent
= parent
;
424 ref
->ref_mod
= count_mod
;
426 ref
->num_bytes
= num_bytes
;
428 if (btrfs_delayed_ref_is_head(ref
)) {
429 head_ref
= btrfs_delayed_node_to_head(ref
);
430 head_ref
->must_insert_reserved
= must_insert_reserved
;
431 mutex_init(&head_ref
->mutex
);
433 full_ref
= btrfs_delayed_node_to_ref(ref
);
434 full_ref
->root
= ref_root
;
435 full_ref
->generation
= ref_generation
;
436 full_ref
->owner_objectid
= owner_objectid
;
438 full_ref
->action
= action
;
441 existing
= tree_insert(&delayed_refs
->root
, bytenr
,
442 parent
, &ref
->rb_node
);
445 if (btrfs_delayed_ref_is_head(ref
))
446 update_existing_head_ref(existing
, ref
);
448 update_existing_ref(trans
, delayed_refs
, existing
, ref
);
451 * we've updated the existing ref, free the newly
456 delayed_refs
->num_entries
++;
457 trans
->delayed_ref_updates
++;
463 * add a delayed ref to the tree. This does all of the accounting required
464 * to make sure the delayed ref is eventually processed before this
465 * transaction commits.
467 int btrfs_add_delayed_ref(struct btrfs_trans_handle
*trans
,
468 u64 bytenr
, u64 num_bytes
, u64 parent
, u64 ref_root
,
469 u64 ref_generation
, u64 owner_objectid
, int action
,
472 struct btrfs_delayed_ref
*ref
;
473 struct btrfs_delayed_ref_head
*head_ref
;
474 struct btrfs_delayed_ref_root
*delayed_refs
;
477 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
482 * the parent = 0 case comes from cases where we don't actually
483 * know the parent yet. It will get updated later via a add/drop
489 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
494 delayed_refs
= &trans
->transaction
->delayed_refs
;
495 spin_lock(&delayed_refs
->lock
);
498 * insert both the head node and the new ref without dropping
501 ret
= __btrfs_add_delayed_ref(trans
, &head_ref
->node
, bytenr
, num_bytes
,
502 (u64
)-1, 0, 0, 0, action
, pin
);
505 ret
= __btrfs_add_delayed_ref(trans
, &ref
->node
, bytenr
, num_bytes
,
506 parent
, ref_root
, ref_generation
,
507 owner_objectid
, action
, pin
);
509 spin_unlock(&delayed_refs
->lock
);
514 * add a delayed ref to the tree. This does all of the accounting required
515 * to make sure the delayed ref is eventually processed before this
516 * transaction commits.
518 * The main point of this call is to add and remove a backreference in a single
519 * shot, taking the lock only once, and only searching for the head node once.
521 * It is the same as doing a ref add and delete in two separate calls.
523 int btrfs_update_delayed_ref(struct btrfs_trans_handle
*trans
,
524 u64 bytenr
, u64 num_bytes
, u64 orig_parent
,
525 u64 parent
, u64 orig_ref_root
, u64 ref_root
,
526 u64 orig_ref_generation
, u64 ref_generation
,
527 u64 owner_objectid
, int pin
)
529 struct btrfs_delayed_ref
*ref
;
530 struct btrfs_delayed_ref
*old_ref
;
531 struct btrfs_delayed_ref_head
*head_ref
;
532 struct btrfs_delayed_ref_root
*delayed_refs
;
535 ref
= kmalloc(sizeof(*ref
), GFP_NOFS
);
539 old_ref
= kmalloc(sizeof(*old_ref
), GFP_NOFS
);
546 * the parent = 0 case comes from cases where we don't actually
547 * know the parent yet. It will get updated later via a add/drop
552 if (orig_parent
== 0)
553 orig_parent
= bytenr
;
555 head_ref
= kmalloc(sizeof(*head_ref
), GFP_NOFS
);
561 delayed_refs
= &trans
->transaction
->delayed_refs
;
562 spin_lock(&delayed_refs
->lock
);
565 * insert both the head node and the new ref without dropping
568 ret
= __btrfs_add_delayed_ref(trans
, &head_ref
->node
, bytenr
, num_bytes
,
570 BTRFS_ADD_DELAYED_REF
, 0);
573 ret
= __btrfs_add_delayed_ref(trans
, &ref
->node
, bytenr
, num_bytes
,
574 parent
, ref_root
, ref_generation
,
575 owner_objectid
, BTRFS_ADD_DELAYED_REF
, 0);
578 ret
= __btrfs_add_delayed_ref(trans
, &old_ref
->node
, bytenr
, num_bytes
,
579 orig_parent
, orig_ref_root
,
580 orig_ref_generation
, owner_objectid
,
581 BTRFS_DROP_DELAYED_REF
, pin
);
583 spin_unlock(&delayed_refs
->lock
);
This page took 0.045949 seconds and 6 git commands to generate.