2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
23 #include "transaction.h"
26 #define BTRFS_DELAYED_WRITEBACK 512
27 #define BTRFS_DELAYED_BACKGROUND 128
28 #define BTRFS_DELAYED_BATCH 16
30 static struct kmem_cache
*delayed_node_cache
;
32 int __init
btrfs_delayed_inode_init(void)
34 delayed_node_cache
= kmem_cache_create("btrfs_delayed_node",
35 sizeof(struct btrfs_delayed_node
),
37 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
39 if (!delayed_node_cache
)
44 void btrfs_delayed_inode_exit(void)
46 if (delayed_node_cache
)
47 kmem_cache_destroy(delayed_node_cache
);
50 static inline void btrfs_init_delayed_node(
51 struct btrfs_delayed_node
*delayed_node
,
52 struct btrfs_root
*root
, u64 inode_id
)
54 delayed_node
->root
= root
;
55 delayed_node
->inode_id
= inode_id
;
56 atomic_set(&delayed_node
->refs
, 0);
57 delayed_node
->ins_root
= RB_ROOT
;
58 delayed_node
->del_root
= RB_ROOT
;
59 mutex_init(&delayed_node
->mutex
);
60 INIT_LIST_HEAD(&delayed_node
->n_list
);
61 INIT_LIST_HEAD(&delayed_node
->p_list
);
64 static inline int btrfs_is_continuous_delayed_item(
65 struct btrfs_delayed_item
*item1
,
66 struct btrfs_delayed_item
*item2
)
68 if (item1
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
69 item1
->key
.objectid
== item2
->key
.objectid
&&
70 item1
->key
.type
== item2
->key
.type
&&
71 item1
->key
.offset
+ 1 == item2
->key
.offset
)
76 static inline struct btrfs_delayed_root
*btrfs_get_delayed_root(
77 struct btrfs_root
*root
)
79 return root
->fs_info
->delayed_root
;
82 static struct btrfs_delayed_node
*btrfs_get_delayed_node(struct inode
*inode
)
84 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
85 struct btrfs_root
*root
= btrfs_inode
->root
;
86 u64 ino
= btrfs_ino(inode
);
87 struct btrfs_delayed_node
*node
;
89 node
= ACCESS_ONCE(btrfs_inode
->delayed_node
);
91 atomic_inc(&node
->refs
);
95 spin_lock(&root
->inode_lock
);
96 node
= radix_tree_lookup(&root
->delayed_nodes_tree
, ino
);
98 if (btrfs_inode
->delayed_node
) {
99 atomic_inc(&node
->refs
); /* can be accessed */
100 BUG_ON(btrfs_inode
->delayed_node
!= node
);
101 spin_unlock(&root
->inode_lock
);
104 btrfs_inode
->delayed_node
= node
;
105 /* can be accessed and cached in the inode */
106 atomic_add(2, &node
->refs
);
107 spin_unlock(&root
->inode_lock
);
110 spin_unlock(&root
->inode_lock
);
115 /* Will return either the node or PTR_ERR(-ENOMEM) */
116 static struct btrfs_delayed_node
*btrfs_get_or_create_delayed_node(
119 struct btrfs_delayed_node
*node
;
120 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
121 struct btrfs_root
*root
= btrfs_inode
->root
;
122 u64 ino
= btrfs_ino(inode
);
126 node
= btrfs_get_delayed_node(inode
);
130 node
= kmem_cache_zalloc(delayed_node_cache
, GFP_NOFS
);
132 return ERR_PTR(-ENOMEM
);
133 btrfs_init_delayed_node(node
, root
, ino
);
135 /* cached in the btrfs inode and can be accessed */
136 atomic_add(2, &node
->refs
);
138 ret
= radix_tree_preload(GFP_NOFS
& ~__GFP_HIGHMEM
);
140 kmem_cache_free(delayed_node_cache
, node
);
144 spin_lock(&root
->inode_lock
);
145 ret
= radix_tree_insert(&root
->delayed_nodes_tree
, ino
, node
);
146 if (ret
== -EEXIST
) {
147 spin_unlock(&root
->inode_lock
);
148 kmem_cache_free(delayed_node_cache
, node
);
149 radix_tree_preload_end();
152 btrfs_inode
->delayed_node
= node
;
153 spin_unlock(&root
->inode_lock
);
154 radix_tree_preload_end();
160 * Call it when holding delayed_node->mutex
162 * If mod = 1, add this node into the prepared list.
164 static void btrfs_queue_delayed_node(struct btrfs_delayed_root
*root
,
165 struct btrfs_delayed_node
*node
,
168 spin_lock(&root
->lock
);
169 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
170 if (!list_empty(&node
->p_list
))
171 list_move_tail(&node
->p_list
, &root
->prepare_list
);
173 list_add_tail(&node
->p_list
, &root
->prepare_list
);
175 list_add_tail(&node
->n_list
, &root
->node_list
);
176 list_add_tail(&node
->p_list
, &root
->prepare_list
);
177 atomic_inc(&node
->refs
); /* inserted into list */
179 set_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
181 spin_unlock(&root
->lock
);
184 /* Call it when holding delayed_node->mutex */
185 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root
*root
,
186 struct btrfs_delayed_node
*node
)
188 spin_lock(&root
->lock
);
189 if (test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
191 atomic_dec(&node
->refs
); /* not in the list */
192 list_del_init(&node
->n_list
);
193 if (!list_empty(&node
->p_list
))
194 list_del_init(&node
->p_list
);
195 clear_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
);
197 spin_unlock(&root
->lock
);
200 static struct btrfs_delayed_node
*btrfs_first_delayed_node(
201 struct btrfs_delayed_root
*delayed_root
)
204 struct btrfs_delayed_node
*node
= NULL
;
206 spin_lock(&delayed_root
->lock
);
207 if (list_empty(&delayed_root
->node_list
))
210 p
= delayed_root
->node_list
.next
;
211 node
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
212 atomic_inc(&node
->refs
);
214 spin_unlock(&delayed_root
->lock
);
219 static struct btrfs_delayed_node
*btrfs_next_delayed_node(
220 struct btrfs_delayed_node
*node
)
222 struct btrfs_delayed_root
*delayed_root
;
224 struct btrfs_delayed_node
*next
= NULL
;
226 delayed_root
= node
->root
->fs_info
->delayed_root
;
227 spin_lock(&delayed_root
->lock
);
228 if (!test_bit(BTRFS_DELAYED_NODE_IN_LIST
, &node
->flags
)) {
229 /* not in the list */
230 if (list_empty(&delayed_root
->node_list
))
232 p
= delayed_root
->node_list
.next
;
233 } else if (list_is_last(&node
->n_list
, &delayed_root
->node_list
))
236 p
= node
->n_list
.next
;
238 next
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
239 atomic_inc(&next
->refs
);
241 spin_unlock(&delayed_root
->lock
);
246 static void __btrfs_release_delayed_node(
247 struct btrfs_delayed_node
*delayed_node
,
250 struct btrfs_delayed_root
*delayed_root
;
255 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
257 mutex_lock(&delayed_node
->mutex
);
258 if (delayed_node
->count
)
259 btrfs_queue_delayed_node(delayed_root
, delayed_node
, mod
);
261 btrfs_dequeue_delayed_node(delayed_root
, delayed_node
);
262 mutex_unlock(&delayed_node
->mutex
);
264 if (atomic_dec_and_test(&delayed_node
->refs
)) {
266 struct btrfs_root
*root
= delayed_node
->root
;
267 spin_lock(&root
->inode_lock
);
268 if (atomic_read(&delayed_node
->refs
) == 0) {
269 radix_tree_delete(&root
->delayed_nodes_tree
,
270 delayed_node
->inode_id
);
273 spin_unlock(&root
->inode_lock
);
275 kmem_cache_free(delayed_node_cache
, delayed_node
);
279 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node
*node
)
281 __btrfs_release_delayed_node(node
, 0);
284 static struct btrfs_delayed_node
*btrfs_first_prepared_delayed_node(
285 struct btrfs_delayed_root
*delayed_root
)
288 struct btrfs_delayed_node
*node
= NULL
;
290 spin_lock(&delayed_root
->lock
);
291 if (list_empty(&delayed_root
->prepare_list
))
294 p
= delayed_root
->prepare_list
.next
;
296 node
= list_entry(p
, struct btrfs_delayed_node
, p_list
);
297 atomic_inc(&node
->refs
);
299 spin_unlock(&delayed_root
->lock
);
304 static inline void btrfs_release_prepared_delayed_node(
305 struct btrfs_delayed_node
*node
)
307 __btrfs_release_delayed_node(node
, 1);
310 static struct btrfs_delayed_item
*btrfs_alloc_delayed_item(u32 data_len
)
312 struct btrfs_delayed_item
*item
;
313 item
= kmalloc(sizeof(*item
) + data_len
, GFP_NOFS
);
315 item
->data_len
= data_len
;
316 item
->ins_or_del
= 0;
317 item
->bytes_reserved
= 0;
318 item
->delayed_node
= NULL
;
319 atomic_set(&item
->refs
, 1);
325 * __btrfs_lookup_delayed_item - look up the delayed item by key
326 * @delayed_node: pointer to the delayed node
327 * @key: the key to look up
328 * @prev: used to store the prev item if the right item isn't found
329 * @next: used to store the next item if the right item isn't found
331 * Note: if we don't find the right item, we will return the prev item and
334 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_item(
335 struct rb_root
*root
,
336 struct btrfs_key
*key
,
337 struct btrfs_delayed_item
**prev
,
338 struct btrfs_delayed_item
**next
)
340 struct rb_node
*node
, *prev_node
= NULL
;
341 struct btrfs_delayed_item
*delayed_item
= NULL
;
344 node
= root
->rb_node
;
347 delayed_item
= rb_entry(node
, struct btrfs_delayed_item
,
350 ret
= btrfs_comp_cpu_keys(&delayed_item
->key
, key
);
352 node
= node
->rb_right
;
354 node
= node
->rb_left
;
363 *prev
= delayed_item
;
364 else if ((node
= rb_prev(prev_node
)) != NULL
) {
365 *prev
= rb_entry(node
, struct btrfs_delayed_item
,
375 *next
= delayed_item
;
376 else if ((node
= rb_next(prev_node
)) != NULL
) {
377 *next
= rb_entry(node
, struct btrfs_delayed_item
,
385 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_insertion_item(
386 struct btrfs_delayed_node
*delayed_node
,
387 struct btrfs_key
*key
)
389 struct btrfs_delayed_item
*item
;
391 item
= __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
396 static int __btrfs_add_delayed_item(struct btrfs_delayed_node
*delayed_node
,
397 struct btrfs_delayed_item
*ins
,
400 struct rb_node
**p
, *node
;
401 struct rb_node
*parent_node
= NULL
;
402 struct rb_root
*root
;
403 struct btrfs_delayed_item
*item
;
406 if (action
== BTRFS_DELAYED_INSERTION_ITEM
)
407 root
= &delayed_node
->ins_root
;
408 else if (action
== BTRFS_DELAYED_DELETION_ITEM
)
409 root
= &delayed_node
->del_root
;
413 node
= &ins
->rb_node
;
417 item
= rb_entry(parent_node
, struct btrfs_delayed_item
,
420 cmp
= btrfs_comp_cpu_keys(&item
->key
, &ins
->key
);
429 rb_link_node(node
, parent_node
, p
);
430 rb_insert_color(node
, root
);
431 ins
->delayed_node
= delayed_node
;
432 ins
->ins_or_del
= action
;
434 if (ins
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
435 action
== BTRFS_DELAYED_INSERTION_ITEM
&&
436 ins
->key
.offset
>= delayed_node
->index_cnt
)
437 delayed_node
->index_cnt
= ins
->key
.offset
+ 1;
439 delayed_node
->count
++;
440 atomic_inc(&delayed_node
->root
->fs_info
->delayed_root
->items
);
444 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node
*node
,
445 struct btrfs_delayed_item
*item
)
447 return __btrfs_add_delayed_item(node
, item
,
448 BTRFS_DELAYED_INSERTION_ITEM
);
451 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node
*node
,
452 struct btrfs_delayed_item
*item
)
454 return __btrfs_add_delayed_item(node
, item
,
455 BTRFS_DELAYED_DELETION_ITEM
);
458 static void finish_one_item(struct btrfs_delayed_root
*delayed_root
)
460 int seq
= atomic_inc_return(&delayed_root
->items_seq
);
463 * atomic_dec_return implies a barrier for waitqueue_active
465 if ((atomic_dec_return(&delayed_root
->items
) <
466 BTRFS_DELAYED_BACKGROUND
|| seq
% BTRFS_DELAYED_BATCH
== 0) &&
467 waitqueue_active(&delayed_root
->wait
))
468 wake_up(&delayed_root
->wait
);
471 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item
*delayed_item
)
473 struct rb_root
*root
;
474 struct btrfs_delayed_root
*delayed_root
;
476 delayed_root
= delayed_item
->delayed_node
->root
->fs_info
->delayed_root
;
478 BUG_ON(!delayed_root
);
479 BUG_ON(delayed_item
->ins_or_del
!= BTRFS_DELAYED_DELETION_ITEM
&&
480 delayed_item
->ins_or_del
!= BTRFS_DELAYED_INSERTION_ITEM
);
482 if (delayed_item
->ins_or_del
== BTRFS_DELAYED_INSERTION_ITEM
)
483 root
= &delayed_item
->delayed_node
->ins_root
;
485 root
= &delayed_item
->delayed_node
->del_root
;
487 rb_erase(&delayed_item
->rb_node
, root
);
488 delayed_item
->delayed_node
->count
--;
490 finish_one_item(delayed_root
);
493 static void btrfs_release_delayed_item(struct btrfs_delayed_item
*item
)
496 __btrfs_remove_delayed_item(item
);
497 if (atomic_dec_and_test(&item
->refs
))
502 static struct btrfs_delayed_item
*__btrfs_first_delayed_insertion_item(
503 struct btrfs_delayed_node
*delayed_node
)
506 struct btrfs_delayed_item
*item
= NULL
;
508 p
= rb_first(&delayed_node
->ins_root
);
510 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
515 static struct btrfs_delayed_item
*__btrfs_first_delayed_deletion_item(
516 struct btrfs_delayed_node
*delayed_node
)
519 struct btrfs_delayed_item
*item
= NULL
;
521 p
= rb_first(&delayed_node
->del_root
);
523 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
528 static struct btrfs_delayed_item
*__btrfs_next_delayed_item(
529 struct btrfs_delayed_item
*item
)
532 struct btrfs_delayed_item
*next
= NULL
;
534 p
= rb_next(&item
->rb_node
);
536 next
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
541 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle
*trans
,
542 struct btrfs_root
*root
,
543 struct btrfs_delayed_item
*item
)
545 struct btrfs_block_rsv
*src_rsv
;
546 struct btrfs_block_rsv
*dst_rsv
;
550 if (!trans
->bytes_reserved
)
553 src_rsv
= trans
->block_rsv
;
554 dst_rsv
= &root
->fs_info
->delayed_block_rsv
;
556 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
557 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
559 trace_btrfs_space_reservation(root
->fs_info
, "delayed_item",
562 item
->bytes_reserved
= num_bytes
;
568 static void btrfs_delayed_item_release_metadata(struct btrfs_root
*root
,
569 struct btrfs_delayed_item
*item
)
571 struct btrfs_block_rsv
*rsv
;
573 if (!item
->bytes_reserved
)
576 rsv
= &root
->fs_info
->delayed_block_rsv
;
577 trace_btrfs_space_reservation(root
->fs_info
, "delayed_item",
578 item
->key
.objectid
, item
->bytes_reserved
,
580 btrfs_block_rsv_release(root
, rsv
,
581 item
->bytes_reserved
);
584 static int btrfs_delayed_inode_reserve_metadata(
585 struct btrfs_trans_handle
*trans
,
586 struct btrfs_root
*root
,
588 struct btrfs_delayed_node
*node
)
590 struct btrfs_block_rsv
*src_rsv
;
591 struct btrfs_block_rsv
*dst_rsv
;
594 bool release
= false;
596 src_rsv
= trans
->block_rsv
;
597 dst_rsv
= &root
->fs_info
->delayed_block_rsv
;
599 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
602 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
603 * which doesn't reserve space for speed. This is a problem since we
604 * still need to reserve space for this update, so try to reserve the
607 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
608 * we're accounted for.
610 if (!src_rsv
|| (!trans
->bytes_reserved
&&
611 src_rsv
->type
!= BTRFS_BLOCK_RSV_DELALLOC
)) {
612 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
613 BTRFS_RESERVE_NO_FLUSH
);
615 * Since we're under a transaction reserve_metadata_bytes could
616 * try to commit the transaction which will make it return
617 * EAGAIN to make us stop the transaction we have, so return
618 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
623 node
->bytes_reserved
= num_bytes
;
624 trace_btrfs_space_reservation(root
->fs_info
,
630 } else if (src_rsv
->type
== BTRFS_BLOCK_RSV_DELALLOC
) {
631 spin_lock(&BTRFS_I(inode
)->lock
);
632 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED
,
633 &BTRFS_I(inode
)->runtime_flags
)) {
634 spin_unlock(&BTRFS_I(inode
)->lock
);
638 spin_unlock(&BTRFS_I(inode
)->lock
);
640 /* Ok we didn't have space pre-reserved. This shouldn't happen
641 * too often but it can happen if we do delalloc to an existing
642 * inode which gets dirtied because of the time update, and then
643 * isn't touched again until after the transaction commits and
644 * then we try to write out the data. First try to be nice and
645 * reserve something strictly for us. If not be a pain and try
646 * to steal from the delalloc block rsv.
648 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
649 BTRFS_RESERVE_NO_FLUSH
);
653 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
658 * Ok this is a problem, let's just steal from the global rsv
659 * since this really shouldn't happen that often.
661 ret
= btrfs_block_rsv_migrate(&root
->fs_info
->global_block_rsv
,
667 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
671 * Migrate only takes a reservation, it doesn't touch the size of the
672 * block_rsv. This is to simplify people who don't normally have things
673 * migrated from their block rsv. If they go to release their
674 * reservation, that will decrease the size as well, so if migrate
675 * reduced size we'd end up with a negative size. But for the
676 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
677 * but we could in fact do this reserve/migrate dance several times
678 * between the time we did the original reservation and we'd clean it
679 * up. So to take care of this, release the space for the meta
680 * reservation here. I think it may be time for a documentation page on
681 * how block rsvs. work.
684 trace_btrfs_space_reservation(root
->fs_info
, "delayed_inode",
685 btrfs_ino(inode
), num_bytes
, 1);
686 node
->bytes_reserved
= num_bytes
;
690 trace_btrfs_space_reservation(root
->fs_info
, "delalloc",
691 btrfs_ino(inode
), num_bytes
, 0);
692 btrfs_block_rsv_release(root
, src_rsv
, num_bytes
);
698 static void btrfs_delayed_inode_release_metadata(struct btrfs_root
*root
,
699 struct btrfs_delayed_node
*node
)
701 struct btrfs_block_rsv
*rsv
;
703 if (!node
->bytes_reserved
)
706 rsv
= &root
->fs_info
->delayed_block_rsv
;
707 trace_btrfs_space_reservation(root
->fs_info
, "delayed_inode",
708 node
->inode_id
, node
->bytes_reserved
, 0);
709 btrfs_block_rsv_release(root
, rsv
,
710 node
->bytes_reserved
);
711 node
->bytes_reserved
= 0;
715 * This helper will insert some continuous items into the same leaf according
716 * to the free space of the leaf.
718 static int btrfs_batch_insert_items(struct btrfs_root
*root
,
719 struct btrfs_path
*path
,
720 struct btrfs_delayed_item
*item
)
722 struct btrfs_delayed_item
*curr
, *next
;
724 int total_data_size
= 0, total_size
= 0;
725 struct extent_buffer
*leaf
;
727 struct btrfs_key
*keys
;
729 struct list_head head
;
735 BUG_ON(!path
->nodes
[0]);
737 leaf
= path
->nodes
[0];
738 free_space
= btrfs_leaf_free_space(root
, leaf
);
739 INIT_LIST_HEAD(&head
);
745 * count the number of the continuous items that we can insert in batch
747 while (total_size
+ next
->data_len
+ sizeof(struct btrfs_item
) <=
749 total_data_size
+= next
->data_len
;
750 total_size
+= next
->data_len
+ sizeof(struct btrfs_item
);
751 list_add_tail(&next
->tree_list
, &head
);
755 next
= __btrfs_next_delayed_item(curr
);
759 if (!btrfs_is_continuous_delayed_item(curr
, next
))
769 * we need allocate some memory space, but it might cause the task
770 * to sleep, so we set all locked nodes in the path to blocking locks
773 btrfs_set_path_blocking(path
);
775 keys
= kmalloc_array(nitems
, sizeof(struct btrfs_key
), GFP_NOFS
);
781 data_size
= kmalloc_array(nitems
, sizeof(u32
), GFP_NOFS
);
787 /* get keys of all the delayed items */
789 list_for_each_entry(next
, &head
, tree_list
) {
791 data_size
[i
] = next
->data_len
;
795 /* reset all the locked nodes in the patch to spinning locks. */
796 btrfs_clear_path_blocking(path
, NULL
, 0);
798 /* insert the keys of the items */
799 setup_items_for_insert(root
, path
, keys
, data_size
,
800 total_data_size
, total_size
, nitems
);
802 /* insert the dir index items */
803 slot
= path
->slots
[0];
804 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
805 data_ptr
= btrfs_item_ptr(leaf
, slot
, char);
806 write_extent_buffer(leaf
, &curr
->data
,
807 (unsigned long)data_ptr
,
811 btrfs_delayed_item_release_metadata(root
, curr
);
813 list_del(&curr
->tree_list
);
814 btrfs_release_delayed_item(curr
);
825 * This helper can just do simple insertion that needn't extend item for new
826 * data, such as directory name index insertion, inode insertion.
828 static int btrfs_insert_delayed_item(struct btrfs_trans_handle
*trans
,
829 struct btrfs_root
*root
,
830 struct btrfs_path
*path
,
831 struct btrfs_delayed_item
*delayed_item
)
833 struct extent_buffer
*leaf
;
837 ret
= btrfs_insert_empty_item(trans
, root
, path
, &delayed_item
->key
,
838 delayed_item
->data_len
);
839 if (ret
< 0 && ret
!= -EEXIST
)
842 leaf
= path
->nodes
[0];
844 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0], char);
846 write_extent_buffer(leaf
, delayed_item
->data
, (unsigned long)ptr
,
847 delayed_item
->data_len
);
848 btrfs_mark_buffer_dirty(leaf
);
850 btrfs_delayed_item_release_metadata(root
, delayed_item
);
855 * we insert an item first, then if there are some continuous items, we try
856 * to insert those items into the same leaf.
858 static int btrfs_insert_delayed_items(struct btrfs_trans_handle
*trans
,
859 struct btrfs_path
*path
,
860 struct btrfs_root
*root
,
861 struct btrfs_delayed_node
*node
)
863 struct btrfs_delayed_item
*curr
, *prev
;
867 mutex_lock(&node
->mutex
);
868 curr
= __btrfs_first_delayed_insertion_item(node
);
872 ret
= btrfs_insert_delayed_item(trans
, root
, path
, curr
);
874 btrfs_release_path(path
);
879 curr
= __btrfs_next_delayed_item(prev
);
880 if (curr
&& btrfs_is_continuous_delayed_item(prev
, curr
)) {
881 /* insert the continuous items into the same leaf */
883 btrfs_batch_insert_items(root
, path
, curr
);
885 btrfs_release_delayed_item(prev
);
886 btrfs_mark_buffer_dirty(path
->nodes
[0]);
888 btrfs_release_path(path
);
889 mutex_unlock(&node
->mutex
);
893 mutex_unlock(&node
->mutex
);
897 static int btrfs_batch_delete_items(struct btrfs_trans_handle
*trans
,
898 struct btrfs_root
*root
,
899 struct btrfs_path
*path
,
900 struct btrfs_delayed_item
*item
)
902 struct btrfs_delayed_item
*curr
, *next
;
903 struct extent_buffer
*leaf
;
904 struct btrfs_key key
;
905 struct list_head head
;
906 int nitems
, i
, last_item
;
909 BUG_ON(!path
->nodes
[0]);
911 leaf
= path
->nodes
[0];
914 last_item
= btrfs_header_nritems(leaf
) - 1;
916 return -ENOENT
; /* FIXME: Is errno suitable? */
919 INIT_LIST_HEAD(&head
);
920 btrfs_item_key_to_cpu(leaf
, &key
, i
);
923 * count the number of the dir index items that we can delete in batch
925 while (btrfs_comp_cpu_keys(&next
->key
, &key
) == 0) {
926 list_add_tail(&next
->tree_list
, &head
);
930 next
= __btrfs_next_delayed_item(curr
);
934 if (!btrfs_is_continuous_delayed_item(curr
, next
))
940 btrfs_item_key_to_cpu(leaf
, &key
, i
);
946 ret
= btrfs_del_items(trans
, root
, path
, path
->slots
[0], nitems
);
950 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
951 btrfs_delayed_item_release_metadata(root
, curr
);
952 list_del(&curr
->tree_list
);
953 btrfs_release_delayed_item(curr
);
960 static int btrfs_delete_delayed_items(struct btrfs_trans_handle
*trans
,
961 struct btrfs_path
*path
,
962 struct btrfs_root
*root
,
963 struct btrfs_delayed_node
*node
)
965 struct btrfs_delayed_item
*curr
, *prev
;
969 mutex_lock(&node
->mutex
);
970 curr
= __btrfs_first_delayed_deletion_item(node
);
974 ret
= btrfs_search_slot(trans
, root
, &curr
->key
, path
, -1, 1);
979 * can't find the item which the node points to, so this node
980 * is invalid, just drop it.
983 curr
= __btrfs_next_delayed_item(prev
);
984 btrfs_release_delayed_item(prev
);
986 btrfs_release_path(path
);
988 mutex_unlock(&node
->mutex
);
994 btrfs_batch_delete_items(trans
, root
, path
, curr
);
995 btrfs_release_path(path
);
996 mutex_unlock(&node
->mutex
);
1000 btrfs_release_path(path
);
1001 mutex_unlock(&node
->mutex
);
1005 static void btrfs_release_delayed_inode(struct btrfs_delayed_node
*delayed_node
)
1007 struct btrfs_delayed_root
*delayed_root
;
1010 test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1011 BUG_ON(!delayed_node
->root
);
1012 clear_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
1013 delayed_node
->count
--;
1015 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
1016 finish_one_item(delayed_root
);
1020 static void btrfs_release_delayed_iref(struct btrfs_delayed_node
*delayed_node
)
1022 struct btrfs_delayed_root
*delayed_root
;
1024 ASSERT(delayed_node
->root
);
1025 clear_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
1026 delayed_node
->count
--;
1028 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
1029 finish_one_item(delayed_root
);
1032 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1033 struct btrfs_root
*root
,
1034 struct btrfs_path
*path
,
1035 struct btrfs_delayed_node
*node
)
1037 struct btrfs_key key
;
1038 struct btrfs_inode_item
*inode_item
;
1039 struct extent_buffer
*leaf
;
1043 key
.objectid
= node
->inode_id
;
1044 key
.type
= BTRFS_INODE_ITEM_KEY
;
1047 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1052 ret
= btrfs_lookup_inode(trans
, root
, path
, &key
, mod
);
1054 btrfs_release_path(path
);
1056 } else if (ret
< 0) {
1060 leaf
= path
->nodes
[0];
1061 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1062 struct btrfs_inode_item
);
1063 write_extent_buffer(leaf
, &node
->inode_item
, (unsigned long)inode_item
,
1064 sizeof(struct btrfs_inode_item
));
1065 btrfs_mark_buffer_dirty(leaf
);
1067 if (!test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &node
->flags
))
1071 if (path
->slots
[0] >= btrfs_header_nritems(leaf
))
1074 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1075 if (key
.objectid
!= node
->inode_id
)
1078 if (key
.type
!= BTRFS_INODE_REF_KEY
&&
1079 key
.type
!= BTRFS_INODE_EXTREF_KEY
)
1083 * Delayed iref deletion is for the inode who has only one link,
1084 * so there is only one iref. The case that several irefs are
1085 * in the same item doesn't exist.
1087 btrfs_del_item(trans
, root
, path
);
1089 btrfs_release_delayed_iref(node
);
1091 btrfs_release_path(path
);
1093 btrfs_delayed_inode_release_metadata(root
, node
);
1094 btrfs_release_delayed_inode(node
);
1099 btrfs_release_path(path
);
1101 key
.type
= BTRFS_INODE_EXTREF_KEY
;
1103 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1109 leaf
= path
->nodes
[0];
1114 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1115 struct btrfs_root
*root
,
1116 struct btrfs_path
*path
,
1117 struct btrfs_delayed_node
*node
)
1121 mutex_lock(&node
->mutex
);
1122 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &node
->flags
)) {
1123 mutex_unlock(&node
->mutex
);
1127 ret
= __btrfs_update_delayed_inode(trans
, root
, path
, node
);
1128 mutex_unlock(&node
->mutex
);
1133 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1134 struct btrfs_path
*path
,
1135 struct btrfs_delayed_node
*node
)
1139 ret
= btrfs_insert_delayed_items(trans
, path
, node
->root
, node
);
1143 ret
= btrfs_delete_delayed_items(trans
, path
, node
->root
, node
);
1147 ret
= btrfs_update_delayed_inode(trans
, node
->root
, path
, node
);
1152 * Called when committing the transaction.
1153 * Returns 0 on success.
1154 * Returns < 0 on error and returns with an aborted transaction with any
1155 * outstanding delayed items cleaned up.
1157 static int __btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1158 struct btrfs_root
*root
, int nr
)
1160 struct btrfs_delayed_root
*delayed_root
;
1161 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1162 struct btrfs_path
*path
;
1163 struct btrfs_block_rsv
*block_rsv
;
1165 bool count
= (nr
> 0);
1170 path
= btrfs_alloc_path();
1173 path
->leave_spinning
= 1;
1175 block_rsv
= trans
->block_rsv
;
1176 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1178 delayed_root
= btrfs_get_delayed_root(root
);
1180 curr_node
= btrfs_first_delayed_node(delayed_root
);
1181 while (curr_node
&& (!count
|| (count
&& nr
--))) {
1182 ret
= __btrfs_commit_inode_delayed_items(trans
, path
,
1185 btrfs_release_delayed_node(curr_node
);
1187 btrfs_abort_transaction(trans
, root
, ret
);
1191 prev_node
= curr_node
;
1192 curr_node
= btrfs_next_delayed_node(curr_node
);
1193 btrfs_release_delayed_node(prev_node
);
1197 btrfs_release_delayed_node(curr_node
);
1198 btrfs_free_path(path
);
1199 trans
->block_rsv
= block_rsv
;
1204 int btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1205 struct btrfs_root
*root
)
1207 return __btrfs_run_delayed_items(trans
, root
, -1);
1210 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle
*trans
,
1211 struct btrfs_root
*root
, int nr
)
1213 return __btrfs_run_delayed_items(trans
, root
, nr
);
1216 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1217 struct inode
*inode
)
1219 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1220 struct btrfs_path
*path
;
1221 struct btrfs_block_rsv
*block_rsv
;
1227 mutex_lock(&delayed_node
->mutex
);
1228 if (!delayed_node
->count
) {
1229 mutex_unlock(&delayed_node
->mutex
);
1230 btrfs_release_delayed_node(delayed_node
);
1233 mutex_unlock(&delayed_node
->mutex
);
1235 path
= btrfs_alloc_path();
1237 btrfs_release_delayed_node(delayed_node
);
1240 path
->leave_spinning
= 1;
1242 block_rsv
= trans
->block_rsv
;
1243 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1245 ret
= __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1247 btrfs_release_delayed_node(delayed_node
);
1248 btrfs_free_path(path
);
1249 trans
->block_rsv
= block_rsv
;
1254 int btrfs_commit_inode_delayed_inode(struct inode
*inode
)
1256 struct btrfs_trans_handle
*trans
;
1257 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1258 struct btrfs_path
*path
;
1259 struct btrfs_block_rsv
*block_rsv
;
1265 mutex_lock(&delayed_node
->mutex
);
1266 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1267 mutex_unlock(&delayed_node
->mutex
);
1268 btrfs_release_delayed_node(delayed_node
);
1271 mutex_unlock(&delayed_node
->mutex
);
1273 trans
= btrfs_join_transaction(delayed_node
->root
);
1274 if (IS_ERR(trans
)) {
1275 ret
= PTR_ERR(trans
);
1279 path
= btrfs_alloc_path();
1284 path
->leave_spinning
= 1;
1286 block_rsv
= trans
->block_rsv
;
1287 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1289 mutex_lock(&delayed_node
->mutex
);
1290 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
))
1291 ret
= __btrfs_update_delayed_inode(trans
, delayed_node
->root
,
1292 path
, delayed_node
);
1295 mutex_unlock(&delayed_node
->mutex
);
1297 btrfs_free_path(path
);
1298 trans
->block_rsv
= block_rsv
;
1300 btrfs_end_transaction(trans
, delayed_node
->root
);
1301 btrfs_btree_balance_dirty(delayed_node
->root
);
1303 btrfs_release_delayed_node(delayed_node
);
1308 void btrfs_remove_delayed_node(struct inode
*inode
)
1310 struct btrfs_delayed_node
*delayed_node
;
1312 delayed_node
= ACCESS_ONCE(BTRFS_I(inode
)->delayed_node
);
1316 BTRFS_I(inode
)->delayed_node
= NULL
;
1317 btrfs_release_delayed_node(delayed_node
);
1320 struct btrfs_async_delayed_work
{
1321 struct btrfs_delayed_root
*delayed_root
;
1323 struct btrfs_work work
;
1326 static void btrfs_async_run_delayed_root(struct btrfs_work
*work
)
1328 struct btrfs_async_delayed_work
*async_work
;
1329 struct btrfs_delayed_root
*delayed_root
;
1330 struct btrfs_trans_handle
*trans
;
1331 struct btrfs_path
*path
;
1332 struct btrfs_delayed_node
*delayed_node
= NULL
;
1333 struct btrfs_root
*root
;
1334 struct btrfs_block_rsv
*block_rsv
;
1337 async_work
= container_of(work
, struct btrfs_async_delayed_work
, work
);
1338 delayed_root
= async_work
->delayed_root
;
1340 path
= btrfs_alloc_path();
1345 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
/ 2)
1348 delayed_node
= btrfs_first_prepared_delayed_node(delayed_root
);
1352 path
->leave_spinning
= 1;
1353 root
= delayed_node
->root
;
1355 trans
= btrfs_join_transaction(root
);
1359 block_rsv
= trans
->block_rsv
;
1360 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1362 __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1364 trans
->block_rsv
= block_rsv
;
1365 btrfs_end_transaction(trans
, root
);
1366 btrfs_btree_balance_dirty_nodelay(root
);
1369 btrfs_release_path(path
);
1372 btrfs_release_prepared_delayed_node(delayed_node
);
1373 if (async_work
->nr
== 0 || total_done
< async_work
->nr
)
1377 btrfs_free_path(path
);
1379 wake_up(&delayed_root
->wait
);
1384 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root
*delayed_root
,
1385 struct btrfs_fs_info
*fs_info
, int nr
)
1387 struct btrfs_async_delayed_work
*async_work
;
1389 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1392 async_work
= kmalloc(sizeof(*async_work
), GFP_NOFS
);
1396 async_work
->delayed_root
= delayed_root
;
1397 btrfs_init_work(&async_work
->work
, btrfs_delayed_meta_helper
,
1398 btrfs_async_run_delayed_root
, NULL
, NULL
);
1399 async_work
->nr
= nr
;
1401 btrfs_queue_work(fs_info
->delayed_workers
, &async_work
->work
);
1405 void btrfs_assert_delayed_root_empty(struct btrfs_root
*root
)
1407 struct btrfs_delayed_root
*delayed_root
;
1408 delayed_root
= btrfs_get_delayed_root(root
);
1409 WARN_ON(btrfs_first_delayed_node(delayed_root
));
1412 static int could_end_wait(struct btrfs_delayed_root
*delayed_root
, int seq
)
1414 int val
= atomic_read(&delayed_root
->items_seq
);
1416 if (val
< seq
|| val
>= seq
+ BTRFS_DELAYED_BATCH
)
1419 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1425 void btrfs_balance_delayed_items(struct btrfs_root
*root
)
1427 struct btrfs_delayed_root
*delayed_root
;
1428 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1430 delayed_root
= btrfs_get_delayed_root(root
);
1432 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1435 if (atomic_read(&delayed_root
->items
) >= BTRFS_DELAYED_WRITEBACK
) {
1439 seq
= atomic_read(&delayed_root
->items_seq
);
1441 ret
= btrfs_wq_run_delayed_node(delayed_root
, fs_info
, 0);
1445 wait_event_interruptible(delayed_root
->wait
,
1446 could_end_wait(delayed_root
, seq
));
1450 btrfs_wq_run_delayed_node(delayed_root
, fs_info
, BTRFS_DELAYED_BATCH
);
1453 /* Will return 0 or -ENOMEM */
1454 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1455 struct btrfs_root
*root
, const char *name
,
1456 int name_len
, struct inode
*dir
,
1457 struct btrfs_disk_key
*disk_key
, u8 type
,
1460 struct btrfs_delayed_node
*delayed_node
;
1461 struct btrfs_delayed_item
*delayed_item
;
1462 struct btrfs_dir_item
*dir_item
;
1465 delayed_node
= btrfs_get_or_create_delayed_node(dir
);
1466 if (IS_ERR(delayed_node
))
1467 return PTR_ERR(delayed_node
);
1469 delayed_item
= btrfs_alloc_delayed_item(sizeof(*dir_item
) + name_len
);
1470 if (!delayed_item
) {
1475 delayed_item
->key
.objectid
= btrfs_ino(dir
);
1476 delayed_item
->key
.type
= BTRFS_DIR_INDEX_KEY
;
1477 delayed_item
->key
.offset
= index
;
1479 dir_item
= (struct btrfs_dir_item
*)delayed_item
->data
;
1480 dir_item
->location
= *disk_key
;
1481 btrfs_set_stack_dir_transid(dir_item
, trans
->transid
);
1482 btrfs_set_stack_dir_data_len(dir_item
, 0);
1483 btrfs_set_stack_dir_name_len(dir_item
, name_len
);
1484 btrfs_set_stack_dir_type(dir_item
, type
);
1485 memcpy((char *)(dir_item
+ 1), name
, name_len
);
1487 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, delayed_item
);
1489 * we have reserved enough space when we start a new transaction,
1490 * so reserving metadata failure is impossible
1495 mutex_lock(&delayed_node
->mutex
);
1496 ret
= __btrfs_add_delayed_insertion_item(delayed_node
, delayed_item
);
1497 if (unlikely(ret
)) {
1498 btrfs_err(root
->fs_info
, "err add delayed dir index item(name: %.*s) "
1499 "into the insertion tree of the delayed node"
1500 "(root id: %llu, inode id: %llu, errno: %d)",
1501 name_len
, name
, delayed_node
->root
->objectid
,
1502 delayed_node
->inode_id
, ret
);
1505 mutex_unlock(&delayed_node
->mutex
);
1508 btrfs_release_delayed_node(delayed_node
);
1512 static int btrfs_delete_delayed_insertion_item(struct btrfs_root
*root
,
1513 struct btrfs_delayed_node
*node
,
1514 struct btrfs_key
*key
)
1516 struct btrfs_delayed_item
*item
;
1518 mutex_lock(&node
->mutex
);
1519 item
= __btrfs_lookup_delayed_insertion_item(node
, key
);
1521 mutex_unlock(&node
->mutex
);
1525 btrfs_delayed_item_release_metadata(root
, item
);
1526 btrfs_release_delayed_item(item
);
1527 mutex_unlock(&node
->mutex
);
1531 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1532 struct btrfs_root
*root
, struct inode
*dir
,
1535 struct btrfs_delayed_node
*node
;
1536 struct btrfs_delayed_item
*item
;
1537 struct btrfs_key item_key
;
1540 node
= btrfs_get_or_create_delayed_node(dir
);
1542 return PTR_ERR(node
);
1544 item_key
.objectid
= btrfs_ino(dir
);
1545 item_key
.type
= BTRFS_DIR_INDEX_KEY
;
1546 item_key
.offset
= index
;
1548 ret
= btrfs_delete_delayed_insertion_item(root
, node
, &item_key
);
1552 item
= btrfs_alloc_delayed_item(0);
1558 item
->key
= item_key
;
1560 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, item
);
1562 * we have reserved enough space when we start a new transaction,
1563 * so reserving metadata failure is impossible.
1567 mutex_lock(&node
->mutex
);
1568 ret
= __btrfs_add_delayed_deletion_item(node
, item
);
1569 if (unlikely(ret
)) {
1570 btrfs_err(root
->fs_info
, "err add delayed dir index item(index: %llu) "
1571 "into the deletion tree of the delayed node"
1572 "(root id: %llu, inode id: %llu, errno: %d)",
1573 index
, node
->root
->objectid
, node
->inode_id
,
1577 mutex_unlock(&node
->mutex
);
1579 btrfs_release_delayed_node(node
);
1583 int btrfs_inode_delayed_dir_index_count(struct inode
*inode
)
1585 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1591 * Since we have held i_mutex of this directory, it is impossible that
1592 * a new directory index is added into the delayed node and index_cnt
1593 * is updated now. So we needn't lock the delayed node.
1595 if (!delayed_node
->index_cnt
) {
1596 btrfs_release_delayed_node(delayed_node
);
1600 BTRFS_I(inode
)->index_cnt
= delayed_node
->index_cnt
;
1601 btrfs_release_delayed_node(delayed_node
);
1605 void btrfs_get_delayed_items(struct inode
*inode
, struct list_head
*ins_list
,
1606 struct list_head
*del_list
)
1608 struct btrfs_delayed_node
*delayed_node
;
1609 struct btrfs_delayed_item
*item
;
1611 delayed_node
= btrfs_get_delayed_node(inode
);
1615 mutex_lock(&delayed_node
->mutex
);
1616 item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1618 atomic_inc(&item
->refs
);
1619 list_add_tail(&item
->readdir_list
, ins_list
);
1620 item
= __btrfs_next_delayed_item(item
);
1623 item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1625 atomic_inc(&item
->refs
);
1626 list_add_tail(&item
->readdir_list
, del_list
);
1627 item
= __btrfs_next_delayed_item(item
);
1629 mutex_unlock(&delayed_node
->mutex
);
1631 * This delayed node is still cached in the btrfs inode, so refs
1632 * must be > 1 now, and we needn't check it is going to be freed
1635 * Besides that, this function is used to read dir, we do not
1636 * insert/delete delayed items in this period. So we also needn't
1637 * requeue or dequeue this delayed node.
1639 atomic_dec(&delayed_node
->refs
);
1642 void btrfs_put_delayed_items(struct list_head
*ins_list
,
1643 struct list_head
*del_list
)
1645 struct btrfs_delayed_item
*curr
, *next
;
1647 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1648 list_del(&curr
->readdir_list
);
1649 if (atomic_dec_and_test(&curr
->refs
))
1653 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1654 list_del(&curr
->readdir_list
);
1655 if (atomic_dec_and_test(&curr
->refs
))
1660 int btrfs_should_delete_dir_index(struct list_head
*del_list
,
1663 struct btrfs_delayed_item
*curr
, *next
;
1666 if (list_empty(del_list
))
1669 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1670 if (curr
->key
.offset
> index
)
1673 list_del(&curr
->readdir_list
);
1674 ret
= (curr
->key
.offset
== index
);
1676 if (atomic_dec_and_test(&curr
->refs
))
1688 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1691 int btrfs_readdir_delayed_dir_index(struct dir_context
*ctx
,
1692 struct list_head
*ins_list
, bool *emitted
)
1694 struct btrfs_dir_item
*di
;
1695 struct btrfs_delayed_item
*curr
, *next
;
1696 struct btrfs_key location
;
1700 unsigned char d_type
;
1702 if (list_empty(ins_list
))
1706 * Changing the data of the delayed item is impossible. So
1707 * we needn't lock them. And we have held i_mutex of the
1708 * directory, nobody can delete any directory indexes now.
1710 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1711 list_del(&curr
->readdir_list
);
1713 if (curr
->key
.offset
< ctx
->pos
) {
1714 if (atomic_dec_and_test(&curr
->refs
))
1719 ctx
->pos
= curr
->key
.offset
;
1721 di
= (struct btrfs_dir_item
*)curr
->data
;
1722 name
= (char *)(di
+ 1);
1723 name_len
= btrfs_stack_dir_name_len(di
);
1725 d_type
= btrfs_filetype_table
[di
->type
];
1726 btrfs_disk_key_to_cpu(&location
, &di
->location
);
1728 over
= !dir_emit(ctx
, name
, name_len
,
1729 location
.objectid
, d_type
);
1731 if (atomic_dec_and_test(&curr
->refs
))
1741 static void fill_stack_inode_item(struct btrfs_trans_handle
*trans
,
1742 struct btrfs_inode_item
*inode_item
,
1743 struct inode
*inode
)
1745 btrfs_set_stack_inode_uid(inode_item
, i_uid_read(inode
));
1746 btrfs_set_stack_inode_gid(inode_item
, i_gid_read(inode
));
1747 btrfs_set_stack_inode_size(inode_item
, BTRFS_I(inode
)->disk_i_size
);
1748 btrfs_set_stack_inode_mode(inode_item
, inode
->i_mode
);
1749 btrfs_set_stack_inode_nlink(inode_item
, inode
->i_nlink
);
1750 btrfs_set_stack_inode_nbytes(inode_item
, inode_get_bytes(inode
));
1751 btrfs_set_stack_inode_generation(inode_item
,
1752 BTRFS_I(inode
)->generation
);
1753 btrfs_set_stack_inode_sequence(inode_item
, inode
->i_version
);
1754 btrfs_set_stack_inode_transid(inode_item
, trans
->transid
);
1755 btrfs_set_stack_inode_rdev(inode_item
, inode
->i_rdev
);
1756 btrfs_set_stack_inode_flags(inode_item
, BTRFS_I(inode
)->flags
);
1757 btrfs_set_stack_inode_block_group(inode_item
, 0);
1759 btrfs_set_stack_timespec_sec(&inode_item
->atime
,
1760 inode
->i_atime
.tv_sec
);
1761 btrfs_set_stack_timespec_nsec(&inode_item
->atime
,
1762 inode
->i_atime
.tv_nsec
);
1764 btrfs_set_stack_timespec_sec(&inode_item
->mtime
,
1765 inode
->i_mtime
.tv_sec
);
1766 btrfs_set_stack_timespec_nsec(&inode_item
->mtime
,
1767 inode
->i_mtime
.tv_nsec
);
1769 btrfs_set_stack_timespec_sec(&inode_item
->ctime
,
1770 inode
->i_ctime
.tv_sec
);
1771 btrfs_set_stack_timespec_nsec(&inode_item
->ctime
,
1772 inode
->i_ctime
.tv_nsec
);
1774 btrfs_set_stack_timespec_sec(&inode_item
->otime
,
1775 BTRFS_I(inode
)->i_otime
.tv_sec
);
1776 btrfs_set_stack_timespec_nsec(&inode_item
->otime
,
1777 BTRFS_I(inode
)->i_otime
.tv_nsec
);
1780 int btrfs_fill_inode(struct inode
*inode
, u32
*rdev
)
1782 struct btrfs_delayed_node
*delayed_node
;
1783 struct btrfs_inode_item
*inode_item
;
1785 delayed_node
= btrfs_get_delayed_node(inode
);
1789 mutex_lock(&delayed_node
->mutex
);
1790 if (!test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1791 mutex_unlock(&delayed_node
->mutex
);
1792 btrfs_release_delayed_node(delayed_node
);
1796 inode_item
= &delayed_node
->inode_item
;
1798 i_uid_write(inode
, btrfs_stack_inode_uid(inode_item
));
1799 i_gid_write(inode
, btrfs_stack_inode_gid(inode_item
));
1800 btrfs_i_size_write(inode
, btrfs_stack_inode_size(inode_item
));
1801 inode
->i_mode
= btrfs_stack_inode_mode(inode_item
);
1802 set_nlink(inode
, btrfs_stack_inode_nlink(inode_item
));
1803 inode_set_bytes(inode
, btrfs_stack_inode_nbytes(inode_item
));
1804 BTRFS_I(inode
)->generation
= btrfs_stack_inode_generation(inode_item
);
1805 BTRFS_I(inode
)->last_trans
= btrfs_stack_inode_transid(inode_item
);
1807 inode
->i_version
= btrfs_stack_inode_sequence(inode_item
);
1809 *rdev
= btrfs_stack_inode_rdev(inode_item
);
1810 BTRFS_I(inode
)->flags
= btrfs_stack_inode_flags(inode_item
);
1812 inode
->i_atime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->atime
);
1813 inode
->i_atime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->atime
);
1815 inode
->i_mtime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->mtime
);
1816 inode
->i_mtime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->mtime
);
1818 inode
->i_ctime
.tv_sec
= btrfs_stack_timespec_sec(&inode_item
->ctime
);
1819 inode
->i_ctime
.tv_nsec
= btrfs_stack_timespec_nsec(&inode_item
->ctime
);
1821 BTRFS_I(inode
)->i_otime
.tv_sec
=
1822 btrfs_stack_timespec_sec(&inode_item
->otime
);
1823 BTRFS_I(inode
)->i_otime
.tv_nsec
=
1824 btrfs_stack_timespec_nsec(&inode_item
->otime
);
1826 inode
->i_generation
= BTRFS_I(inode
)->generation
;
1827 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1829 mutex_unlock(&delayed_node
->mutex
);
1830 btrfs_release_delayed_node(delayed_node
);
1834 int btrfs_delayed_update_inode(struct btrfs_trans_handle
*trans
,
1835 struct btrfs_root
*root
, struct inode
*inode
)
1837 struct btrfs_delayed_node
*delayed_node
;
1840 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1841 if (IS_ERR(delayed_node
))
1842 return PTR_ERR(delayed_node
);
1844 mutex_lock(&delayed_node
->mutex
);
1845 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1846 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1850 ret
= btrfs_delayed_inode_reserve_metadata(trans
, root
, inode
,
1855 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1856 set_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
);
1857 delayed_node
->count
++;
1858 atomic_inc(&root
->fs_info
->delayed_root
->items
);
1860 mutex_unlock(&delayed_node
->mutex
);
1861 btrfs_release_delayed_node(delayed_node
);
1865 int btrfs_delayed_delete_inode_ref(struct inode
*inode
)
1867 struct btrfs_delayed_node
*delayed_node
;
1870 * we don't do delayed inode updates during log recovery because it
1871 * leads to enospc problems. This means we also can't do
1872 * delayed inode refs
1874 if (BTRFS_I(inode
)->root
->fs_info
->log_root_recovering
)
1877 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1878 if (IS_ERR(delayed_node
))
1879 return PTR_ERR(delayed_node
);
1882 * We don't reserve space for inode ref deletion is because:
1883 * - We ONLY do async inode ref deletion for the inode who has only
1884 * one link(i_nlink == 1), it means there is only one inode ref.
1885 * And in most case, the inode ref and the inode item are in the
1886 * same leaf, and we will deal with them at the same time.
1887 * Since we are sure we will reserve the space for the inode item,
1888 * it is unnecessary to reserve space for inode ref deletion.
1889 * - If the inode ref and the inode item are not in the same leaf,
1890 * We also needn't worry about enospc problem, because we reserve
1891 * much more space for the inode update than it needs.
1892 * - At the worst, we can steal some space from the global reservation.
1895 mutex_lock(&delayed_node
->mutex
);
1896 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1899 set_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
);
1900 delayed_node
->count
++;
1901 atomic_inc(&BTRFS_I(inode
)->root
->fs_info
->delayed_root
->items
);
1903 mutex_unlock(&delayed_node
->mutex
);
1904 btrfs_release_delayed_node(delayed_node
);
1908 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node
*delayed_node
)
1910 struct btrfs_root
*root
= delayed_node
->root
;
1911 struct btrfs_delayed_item
*curr_item
, *prev_item
;
1913 mutex_lock(&delayed_node
->mutex
);
1914 curr_item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1916 btrfs_delayed_item_release_metadata(root
, curr_item
);
1917 prev_item
= curr_item
;
1918 curr_item
= __btrfs_next_delayed_item(prev_item
);
1919 btrfs_release_delayed_item(prev_item
);
1922 curr_item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1924 btrfs_delayed_item_release_metadata(root
, curr_item
);
1925 prev_item
= curr_item
;
1926 curr_item
= __btrfs_next_delayed_item(prev_item
);
1927 btrfs_release_delayed_item(prev_item
);
1930 if (test_bit(BTRFS_DELAYED_NODE_DEL_IREF
, &delayed_node
->flags
))
1931 btrfs_release_delayed_iref(delayed_node
);
1933 if (test_bit(BTRFS_DELAYED_NODE_INODE_DIRTY
, &delayed_node
->flags
)) {
1934 btrfs_delayed_inode_release_metadata(root
, delayed_node
);
1935 btrfs_release_delayed_inode(delayed_node
);
1937 mutex_unlock(&delayed_node
->mutex
);
1940 void btrfs_kill_delayed_inode_items(struct inode
*inode
)
1942 struct btrfs_delayed_node
*delayed_node
;
1944 delayed_node
= btrfs_get_delayed_node(inode
);
1948 __btrfs_kill_delayed_node(delayed_node
);
1949 btrfs_release_delayed_node(delayed_node
);
1952 void btrfs_kill_all_delayed_nodes(struct btrfs_root
*root
)
1955 struct btrfs_delayed_node
*delayed_nodes
[8];
1959 spin_lock(&root
->inode_lock
);
1960 n
= radix_tree_gang_lookup(&root
->delayed_nodes_tree
,
1961 (void **)delayed_nodes
, inode_id
,
1962 ARRAY_SIZE(delayed_nodes
));
1964 spin_unlock(&root
->inode_lock
);
1968 inode_id
= delayed_nodes
[n
- 1]->inode_id
+ 1;
1970 for (i
= 0; i
< n
; i
++)
1971 atomic_inc(&delayed_nodes
[i
]->refs
);
1972 spin_unlock(&root
->inode_lock
);
1974 for (i
= 0; i
< n
; i
++) {
1975 __btrfs_kill_delayed_node(delayed_nodes
[i
]);
1976 btrfs_release_delayed_node(delayed_nodes
[i
]);
1981 void btrfs_destroy_delayed_inodes(struct btrfs_root
*root
)
1983 struct btrfs_delayed_root
*delayed_root
;
1984 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1986 delayed_root
= btrfs_get_delayed_root(root
);
1988 curr_node
= btrfs_first_delayed_node(delayed_root
);
1990 __btrfs_kill_delayed_node(curr_node
);
1992 prev_node
= curr_node
;
1993 curr_node
= btrfs_next_delayed_node(curr_node
);
1994 btrfs_release_delayed_node(prev_node
);