2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
23 #include "transaction.h"
25 #define BTRFS_DELAYED_WRITEBACK 400
26 #define BTRFS_DELAYED_BACKGROUND 100
28 static struct kmem_cache
*delayed_node_cache
;
30 int __init
btrfs_delayed_inode_init(void)
32 delayed_node_cache
= kmem_cache_create("btrfs_delayed_node",
33 sizeof(struct btrfs_delayed_node
),
35 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
37 if (!delayed_node_cache
)
42 void btrfs_delayed_inode_exit(void)
44 if (delayed_node_cache
)
45 kmem_cache_destroy(delayed_node_cache
);
48 static inline void btrfs_init_delayed_node(
49 struct btrfs_delayed_node
*delayed_node
,
50 struct btrfs_root
*root
, u64 inode_id
)
52 delayed_node
->root
= root
;
53 delayed_node
->inode_id
= inode_id
;
54 atomic_set(&delayed_node
->refs
, 0);
55 delayed_node
->count
= 0;
56 delayed_node
->in_list
= 0;
57 delayed_node
->inode_dirty
= 0;
58 delayed_node
->ins_root
= RB_ROOT
;
59 delayed_node
->del_root
= RB_ROOT
;
60 mutex_init(&delayed_node
->mutex
);
61 delayed_node
->index_cnt
= 0;
62 INIT_LIST_HEAD(&delayed_node
->n_list
);
63 INIT_LIST_HEAD(&delayed_node
->p_list
);
64 delayed_node
->bytes_reserved
= 0;
65 memset(&delayed_node
->inode_item
, 0, sizeof(delayed_node
->inode_item
));
68 static inline int btrfs_is_continuous_delayed_item(
69 struct btrfs_delayed_item
*item1
,
70 struct btrfs_delayed_item
*item2
)
72 if (item1
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
73 item1
->key
.objectid
== item2
->key
.objectid
&&
74 item1
->key
.type
== item2
->key
.type
&&
75 item1
->key
.offset
+ 1 == item2
->key
.offset
)
80 static inline struct btrfs_delayed_root
*btrfs_get_delayed_root(
81 struct btrfs_root
*root
)
83 return root
->fs_info
->delayed_root
;
86 static struct btrfs_delayed_node
*btrfs_get_delayed_node(struct inode
*inode
)
88 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
89 struct btrfs_root
*root
= btrfs_inode
->root
;
90 u64 ino
= btrfs_ino(inode
);
91 struct btrfs_delayed_node
*node
;
93 node
= ACCESS_ONCE(btrfs_inode
->delayed_node
);
95 atomic_inc(&node
->refs
);
99 spin_lock(&root
->inode_lock
);
100 node
= radix_tree_lookup(&root
->delayed_nodes_tree
, ino
);
102 if (btrfs_inode
->delayed_node
) {
103 atomic_inc(&node
->refs
); /* can be accessed */
104 BUG_ON(btrfs_inode
->delayed_node
!= node
);
105 spin_unlock(&root
->inode_lock
);
108 btrfs_inode
->delayed_node
= node
;
109 atomic_inc(&node
->refs
); /* can be accessed */
110 atomic_inc(&node
->refs
); /* cached in the inode */
111 spin_unlock(&root
->inode_lock
);
114 spin_unlock(&root
->inode_lock
);
119 /* Will return either the node or PTR_ERR(-ENOMEM) */
120 static struct btrfs_delayed_node
*btrfs_get_or_create_delayed_node(
123 struct btrfs_delayed_node
*node
;
124 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
125 struct btrfs_root
*root
= btrfs_inode
->root
;
126 u64 ino
= btrfs_ino(inode
);
130 node
= btrfs_get_delayed_node(inode
);
134 node
= kmem_cache_alloc(delayed_node_cache
, GFP_NOFS
);
136 return ERR_PTR(-ENOMEM
);
137 btrfs_init_delayed_node(node
, root
, ino
);
139 atomic_inc(&node
->refs
); /* cached in the btrfs inode */
140 atomic_inc(&node
->refs
); /* can be accessed */
142 ret
= radix_tree_preload(GFP_NOFS
& ~__GFP_HIGHMEM
);
144 kmem_cache_free(delayed_node_cache
, node
);
148 spin_lock(&root
->inode_lock
);
149 ret
= radix_tree_insert(&root
->delayed_nodes_tree
, ino
, node
);
150 if (ret
== -EEXIST
) {
151 kmem_cache_free(delayed_node_cache
, node
);
152 spin_unlock(&root
->inode_lock
);
153 radix_tree_preload_end();
156 btrfs_inode
->delayed_node
= node
;
157 spin_unlock(&root
->inode_lock
);
158 radix_tree_preload_end();
164 * Call it when holding delayed_node->mutex
166 * If mod = 1, add this node into the prepared list.
168 static void btrfs_queue_delayed_node(struct btrfs_delayed_root
*root
,
169 struct btrfs_delayed_node
*node
,
172 spin_lock(&root
->lock
);
174 if (!list_empty(&node
->p_list
))
175 list_move_tail(&node
->p_list
, &root
->prepare_list
);
177 list_add_tail(&node
->p_list
, &root
->prepare_list
);
179 list_add_tail(&node
->n_list
, &root
->node_list
);
180 list_add_tail(&node
->p_list
, &root
->prepare_list
);
181 atomic_inc(&node
->refs
); /* inserted into list */
185 spin_unlock(&root
->lock
);
188 /* Call it when holding delayed_node->mutex */
189 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root
*root
,
190 struct btrfs_delayed_node
*node
)
192 spin_lock(&root
->lock
);
195 atomic_dec(&node
->refs
); /* not in the list */
196 list_del_init(&node
->n_list
);
197 if (!list_empty(&node
->p_list
))
198 list_del_init(&node
->p_list
);
201 spin_unlock(&root
->lock
);
204 struct btrfs_delayed_node
*btrfs_first_delayed_node(
205 struct btrfs_delayed_root
*delayed_root
)
208 struct btrfs_delayed_node
*node
= NULL
;
210 spin_lock(&delayed_root
->lock
);
211 if (list_empty(&delayed_root
->node_list
))
214 p
= delayed_root
->node_list
.next
;
215 node
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
216 atomic_inc(&node
->refs
);
218 spin_unlock(&delayed_root
->lock
);
223 struct btrfs_delayed_node
*btrfs_next_delayed_node(
224 struct btrfs_delayed_node
*node
)
226 struct btrfs_delayed_root
*delayed_root
;
228 struct btrfs_delayed_node
*next
= NULL
;
230 delayed_root
= node
->root
->fs_info
->delayed_root
;
231 spin_lock(&delayed_root
->lock
);
232 if (!node
->in_list
) { /* not in the list */
233 if (list_empty(&delayed_root
->node_list
))
235 p
= delayed_root
->node_list
.next
;
236 } else if (list_is_last(&node
->n_list
, &delayed_root
->node_list
))
239 p
= node
->n_list
.next
;
241 next
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
242 atomic_inc(&next
->refs
);
244 spin_unlock(&delayed_root
->lock
);
249 static void __btrfs_release_delayed_node(
250 struct btrfs_delayed_node
*delayed_node
,
253 struct btrfs_delayed_root
*delayed_root
;
258 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
260 mutex_lock(&delayed_node
->mutex
);
261 if (delayed_node
->count
)
262 btrfs_queue_delayed_node(delayed_root
, delayed_node
, mod
);
264 btrfs_dequeue_delayed_node(delayed_root
, delayed_node
);
265 mutex_unlock(&delayed_node
->mutex
);
267 if (atomic_dec_and_test(&delayed_node
->refs
)) {
268 struct btrfs_root
*root
= delayed_node
->root
;
269 spin_lock(&root
->inode_lock
);
270 if (atomic_read(&delayed_node
->refs
) == 0) {
271 radix_tree_delete(&root
->delayed_nodes_tree
,
272 delayed_node
->inode_id
);
273 kmem_cache_free(delayed_node_cache
, delayed_node
);
275 spin_unlock(&root
->inode_lock
);
279 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node
*node
)
281 __btrfs_release_delayed_node(node
, 0);
284 struct btrfs_delayed_node
*btrfs_first_prepared_delayed_node(
285 struct btrfs_delayed_root
*delayed_root
)
288 struct btrfs_delayed_node
*node
= NULL
;
290 spin_lock(&delayed_root
->lock
);
291 if (list_empty(&delayed_root
->prepare_list
))
294 p
= delayed_root
->prepare_list
.next
;
296 node
= list_entry(p
, struct btrfs_delayed_node
, p_list
);
297 atomic_inc(&node
->refs
);
299 spin_unlock(&delayed_root
->lock
);
304 static inline void btrfs_release_prepared_delayed_node(
305 struct btrfs_delayed_node
*node
)
307 __btrfs_release_delayed_node(node
, 1);
310 struct btrfs_delayed_item
*btrfs_alloc_delayed_item(u32 data_len
)
312 struct btrfs_delayed_item
*item
;
313 item
= kmalloc(sizeof(*item
) + data_len
, GFP_NOFS
);
315 item
->data_len
= data_len
;
316 item
->ins_or_del
= 0;
317 item
->bytes_reserved
= 0;
318 item
->delayed_node
= NULL
;
319 atomic_set(&item
->refs
, 1);
325 * __btrfs_lookup_delayed_item - look up the delayed item by key
326 * @delayed_node: pointer to the delayed node
327 * @key: the key to look up
328 * @prev: used to store the prev item if the right item isn't found
329 * @next: used to store the next item if the right item isn't found
331 * Note: if we don't find the right item, we will return the prev item and
334 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_item(
335 struct rb_root
*root
,
336 struct btrfs_key
*key
,
337 struct btrfs_delayed_item
**prev
,
338 struct btrfs_delayed_item
**next
)
340 struct rb_node
*node
, *prev_node
= NULL
;
341 struct btrfs_delayed_item
*delayed_item
= NULL
;
344 node
= root
->rb_node
;
347 delayed_item
= rb_entry(node
, struct btrfs_delayed_item
,
350 ret
= btrfs_comp_cpu_keys(&delayed_item
->key
, key
);
352 node
= node
->rb_right
;
354 node
= node
->rb_left
;
363 *prev
= delayed_item
;
364 else if ((node
= rb_prev(prev_node
)) != NULL
) {
365 *prev
= rb_entry(node
, struct btrfs_delayed_item
,
375 *next
= delayed_item
;
376 else if ((node
= rb_next(prev_node
)) != NULL
) {
377 *next
= rb_entry(node
, struct btrfs_delayed_item
,
385 struct btrfs_delayed_item
*__btrfs_lookup_delayed_insertion_item(
386 struct btrfs_delayed_node
*delayed_node
,
387 struct btrfs_key
*key
)
389 struct btrfs_delayed_item
*item
;
391 item
= __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
396 struct btrfs_delayed_item
*__btrfs_lookup_delayed_deletion_item(
397 struct btrfs_delayed_node
*delayed_node
,
398 struct btrfs_key
*key
)
400 struct btrfs_delayed_item
*item
;
402 item
= __btrfs_lookup_delayed_item(&delayed_node
->del_root
, key
,
407 struct btrfs_delayed_item
*__btrfs_search_delayed_insertion_item(
408 struct btrfs_delayed_node
*delayed_node
,
409 struct btrfs_key
*key
)
411 struct btrfs_delayed_item
*item
, *next
;
413 item
= __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
421 struct btrfs_delayed_item
*__btrfs_search_delayed_deletion_item(
422 struct btrfs_delayed_node
*delayed_node
,
423 struct btrfs_key
*key
)
425 struct btrfs_delayed_item
*item
, *next
;
427 item
= __btrfs_lookup_delayed_item(&delayed_node
->del_root
, key
,
435 static int __btrfs_add_delayed_item(struct btrfs_delayed_node
*delayed_node
,
436 struct btrfs_delayed_item
*ins
,
439 struct rb_node
**p
, *node
;
440 struct rb_node
*parent_node
= NULL
;
441 struct rb_root
*root
;
442 struct btrfs_delayed_item
*item
;
445 if (action
== BTRFS_DELAYED_INSERTION_ITEM
)
446 root
= &delayed_node
->ins_root
;
447 else if (action
== BTRFS_DELAYED_DELETION_ITEM
)
448 root
= &delayed_node
->del_root
;
452 node
= &ins
->rb_node
;
456 item
= rb_entry(parent_node
, struct btrfs_delayed_item
,
459 cmp
= btrfs_comp_cpu_keys(&item
->key
, &ins
->key
);
468 rb_link_node(node
, parent_node
, p
);
469 rb_insert_color(node
, root
);
470 ins
->delayed_node
= delayed_node
;
471 ins
->ins_or_del
= action
;
473 if (ins
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
474 action
== BTRFS_DELAYED_INSERTION_ITEM
&&
475 ins
->key
.offset
>= delayed_node
->index_cnt
)
476 delayed_node
->index_cnt
= ins
->key
.offset
+ 1;
478 delayed_node
->count
++;
479 atomic_inc(&delayed_node
->root
->fs_info
->delayed_root
->items
);
483 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node
*node
,
484 struct btrfs_delayed_item
*item
)
486 return __btrfs_add_delayed_item(node
, item
,
487 BTRFS_DELAYED_INSERTION_ITEM
);
490 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node
*node
,
491 struct btrfs_delayed_item
*item
)
493 return __btrfs_add_delayed_item(node
, item
,
494 BTRFS_DELAYED_DELETION_ITEM
);
497 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item
*delayed_item
)
499 struct rb_root
*root
;
500 struct btrfs_delayed_root
*delayed_root
;
502 delayed_root
= delayed_item
->delayed_node
->root
->fs_info
->delayed_root
;
504 BUG_ON(!delayed_root
);
505 BUG_ON(delayed_item
->ins_or_del
!= BTRFS_DELAYED_DELETION_ITEM
&&
506 delayed_item
->ins_or_del
!= BTRFS_DELAYED_INSERTION_ITEM
);
508 if (delayed_item
->ins_or_del
== BTRFS_DELAYED_INSERTION_ITEM
)
509 root
= &delayed_item
->delayed_node
->ins_root
;
511 root
= &delayed_item
->delayed_node
->del_root
;
513 rb_erase(&delayed_item
->rb_node
, root
);
514 delayed_item
->delayed_node
->count
--;
515 if (atomic_dec_return(&delayed_root
->items
) <
516 BTRFS_DELAYED_BACKGROUND
&&
517 waitqueue_active(&delayed_root
->wait
))
518 wake_up(&delayed_root
->wait
);
521 static void btrfs_release_delayed_item(struct btrfs_delayed_item
*item
)
524 __btrfs_remove_delayed_item(item
);
525 if (atomic_dec_and_test(&item
->refs
))
530 struct btrfs_delayed_item
*__btrfs_first_delayed_insertion_item(
531 struct btrfs_delayed_node
*delayed_node
)
534 struct btrfs_delayed_item
*item
= NULL
;
536 p
= rb_first(&delayed_node
->ins_root
);
538 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
543 struct btrfs_delayed_item
*__btrfs_first_delayed_deletion_item(
544 struct btrfs_delayed_node
*delayed_node
)
547 struct btrfs_delayed_item
*item
= NULL
;
549 p
= rb_first(&delayed_node
->del_root
);
551 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
556 struct btrfs_delayed_item
*__btrfs_next_delayed_item(
557 struct btrfs_delayed_item
*item
)
560 struct btrfs_delayed_item
*next
= NULL
;
562 p
= rb_next(&item
->rb_node
);
564 next
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
569 static inline struct btrfs_root
*btrfs_get_fs_root(struct btrfs_root
*root
,
572 struct btrfs_key root_key
;
574 if (root
->objectid
== root_id
)
577 root_key
.objectid
= root_id
;
578 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
579 root_key
.offset
= (u64
)-1;
580 return btrfs_read_fs_root_no_name(root
->fs_info
, &root_key
);
583 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle
*trans
,
584 struct btrfs_root
*root
,
585 struct btrfs_delayed_item
*item
)
587 struct btrfs_block_rsv
*src_rsv
;
588 struct btrfs_block_rsv
*dst_rsv
;
592 if (!trans
->bytes_reserved
)
595 src_rsv
= trans
->block_rsv
;
596 dst_rsv
= &root
->fs_info
->delayed_block_rsv
;
598 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
599 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
601 trace_btrfs_space_reservation(root
->fs_info
, "delayed_item",
604 item
->bytes_reserved
= num_bytes
;
610 static void btrfs_delayed_item_release_metadata(struct btrfs_root
*root
,
611 struct btrfs_delayed_item
*item
)
613 struct btrfs_block_rsv
*rsv
;
615 if (!item
->bytes_reserved
)
618 rsv
= &root
->fs_info
->delayed_block_rsv
;
619 trace_btrfs_space_reservation(root
->fs_info
, "delayed_item",
620 item
->key
.objectid
, item
->bytes_reserved
,
622 btrfs_block_rsv_release(root
, rsv
,
623 item
->bytes_reserved
);
626 static int btrfs_delayed_inode_reserve_metadata(
627 struct btrfs_trans_handle
*trans
,
628 struct btrfs_root
*root
,
630 struct btrfs_delayed_node
*node
)
632 struct btrfs_block_rsv
*src_rsv
;
633 struct btrfs_block_rsv
*dst_rsv
;
636 bool release
= false;
638 src_rsv
= trans
->block_rsv
;
639 dst_rsv
= &root
->fs_info
->delayed_block_rsv
;
641 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
644 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
645 * which doesn't reserve space for speed. This is a problem since we
646 * still need to reserve space for this update, so try to reserve the
649 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
650 * we're accounted for.
652 if (!src_rsv
|| (!trans
->bytes_reserved
&&
653 src_rsv
->type
!= BTRFS_BLOCK_RSV_DELALLOC
)) {
654 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
655 BTRFS_RESERVE_NO_FLUSH
);
657 * Since we're under a transaction reserve_metadata_bytes could
658 * try to commit the transaction which will make it return
659 * EAGAIN to make us stop the transaction we have, so return
660 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
665 node
->bytes_reserved
= num_bytes
;
666 trace_btrfs_space_reservation(root
->fs_info
,
672 } else if (src_rsv
->type
== BTRFS_BLOCK_RSV_DELALLOC
) {
673 spin_lock(&BTRFS_I(inode
)->lock
);
674 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED
,
675 &BTRFS_I(inode
)->runtime_flags
)) {
676 spin_unlock(&BTRFS_I(inode
)->lock
);
680 spin_unlock(&BTRFS_I(inode
)->lock
);
682 /* Ok we didn't have space pre-reserved. This shouldn't happen
683 * too often but it can happen if we do delalloc to an existing
684 * inode which gets dirtied because of the time update, and then
685 * isn't touched again until after the transaction commits and
686 * then we try to write out the data. First try to be nice and
687 * reserve something strictly for us. If not be a pain and try
688 * to steal from the delalloc block rsv.
690 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
691 BTRFS_RESERVE_NO_FLUSH
);
695 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
700 * Ok this is a problem, let's just steal from the global rsv
701 * since this really shouldn't happen that often.
704 ret
= btrfs_block_rsv_migrate(&root
->fs_info
->global_block_rsv
,
710 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
714 * Migrate only takes a reservation, it doesn't touch the size of the
715 * block_rsv. This is to simplify people who don't normally have things
716 * migrated from their block rsv. If they go to release their
717 * reservation, that will decrease the size as well, so if migrate
718 * reduced size we'd end up with a negative size. But for the
719 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
720 * but we could in fact do this reserve/migrate dance several times
721 * between the time we did the original reservation and we'd clean it
722 * up. So to take care of this, release the space for the meta
723 * reservation here. I think it may be time for a documentation page on
724 * how block rsvs. work.
727 trace_btrfs_space_reservation(root
->fs_info
, "delayed_inode",
728 btrfs_ino(inode
), num_bytes
, 1);
729 node
->bytes_reserved
= num_bytes
;
733 trace_btrfs_space_reservation(root
->fs_info
, "delalloc",
734 btrfs_ino(inode
), num_bytes
, 0);
735 btrfs_block_rsv_release(root
, src_rsv
, num_bytes
);
741 static void btrfs_delayed_inode_release_metadata(struct btrfs_root
*root
,
742 struct btrfs_delayed_node
*node
)
744 struct btrfs_block_rsv
*rsv
;
746 if (!node
->bytes_reserved
)
749 rsv
= &root
->fs_info
->delayed_block_rsv
;
750 trace_btrfs_space_reservation(root
->fs_info
, "delayed_inode",
751 node
->inode_id
, node
->bytes_reserved
, 0);
752 btrfs_block_rsv_release(root
, rsv
,
753 node
->bytes_reserved
);
754 node
->bytes_reserved
= 0;
758 * This helper will insert some continuous items into the same leaf according
759 * to the free space of the leaf.
761 static int btrfs_batch_insert_items(struct btrfs_trans_handle
*trans
,
762 struct btrfs_root
*root
,
763 struct btrfs_path
*path
,
764 struct btrfs_delayed_item
*item
)
766 struct btrfs_delayed_item
*curr
, *next
;
768 int total_data_size
= 0, total_size
= 0;
769 struct extent_buffer
*leaf
;
771 struct btrfs_key
*keys
;
773 struct list_head head
;
779 BUG_ON(!path
->nodes
[0]);
781 leaf
= path
->nodes
[0];
782 free_space
= btrfs_leaf_free_space(root
, leaf
);
783 INIT_LIST_HEAD(&head
);
789 * count the number of the continuous items that we can insert in batch
791 while (total_size
+ next
->data_len
+ sizeof(struct btrfs_item
) <=
793 total_data_size
+= next
->data_len
;
794 total_size
+= next
->data_len
+ sizeof(struct btrfs_item
);
795 list_add_tail(&next
->tree_list
, &head
);
799 next
= __btrfs_next_delayed_item(curr
);
803 if (!btrfs_is_continuous_delayed_item(curr
, next
))
813 * we need allocate some memory space, but it might cause the task
814 * to sleep, so we set all locked nodes in the path to blocking locks
817 btrfs_set_path_blocking(path
);
819 keys
= kmalloc(sizeof(struct btrfs_key
) * nitems
, GFP_NOFS
);
825 data_size
= kmalloc(sizeof(u32
) * nitems
, GFP_NOFS
);
831 /* get keys of all the delayed items */
833 list_for_each_entry(next
, &head
, tree_list
) {
835 data_size
[i
] = next
->data_len
;
839 /* reset all the locked nodes in the patch to spinning locks. */
840 btrfs_clear_path_blocking(path
, NULL
, 0);
842 /* insert the keys of the items */
843 setup_items_for_insert(trans
, root
, path
, keys
, data_size
,
844 total_data_size
, total_size
, nitems
);
846 /* insert the dir index items */
847 slot
= path
->slots
[0];
848 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
849 data_ptr
= btrfs_item_ptr(leaf
, slot
, char);
850 write_extent_buffer(leaf
, &curr
->data
,
851 (unsigned long)data_ptr
,
855 btrfs_delayed_item_release_metadata(root
, curr
);
857 list_del(&curr
->tree_list
);
858 btrfs_release_delayed_item(curr
);
869 * This helper can just do simple insertion that needn't extend item for new
870 * data, such as directory name index insertion, inode insertion.
872 static int btrfs_insert_delayed_item(struct btrfs_trans_handle
*trans
,
873 struct btrfs_root
*root
,
874 struct btrfs_path
*path
,
875 struct btrfs_delayed_item
*delayed_item
)
877 struct extent_buffer
*leaf
;
878 struct btrfs_item
*item
;
882 ret
= btrfs_insert_empty_item(trans
, root
, path
, &delayed_item
->key
,
883 delayed_item
->data_len
);
884 if (ret
< 0 && ret
!= -EEXIST
)
887 leaf
= path
->nodes
[0];
889 item
= btrfs_item_nr(leaf
, path
->slots
[0]);
890 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0], char);
892 write_extent_buffer(leaf
, delayed_item
->data
, (unsigned long)ptr
,
893 delayed_item
->data_len
);
894 btrfs_mark_buffer_dirty(leaf
);
896 btrfs_delayed_item_release_metadata(root
, delayed_item
);
901 * we insert an item first, then if there are some continuous items, we try
902 * to insert those items into the same leaf.
904 static int btrfs_insert_delayed_items(struct btrfs_trans_handle
*trans
,
905 struct btrfs_path
*path
,
906 struct btrfs_root
*root
,
907 struct btrfs_delayed_node
*node
)
909 struct btrfs_delayed_item
*curr
, *prev
;
913 mutex_lock(&node
->mutex
);
914 curr
= __btrfs_first_delayed_insertion_item(node
);
918 ret
= btrfs_insert_delayed_item(trans
, root
, path
, curr
);
920 btrfs_release_path(path
);
925 curr
= __btrfs_next_delayed_item(prev
);
926 if (curr
&& btrfs_is_continuous_delayed_item(prev
, curr
)) {
927 /* insert the continuous items into the same leaf */
929 btrfs_batch_insert_items(trans
, root
, path
, curr
);
931 btrfs_release_delayed_item(prev
);
932 btrfs_mark_buffer_dirty(path
->nodes
[0]);
934 btrfs_release_path(path
);
935 mutex_unlock(&node
->mutex
);
939 mutex_unlock(&node
->mutex
);
943 static int btrfs_batch_delete_items(struct btrfs_trans_handle
*trans
,
944 struct btrfs_root
*root
,
945 struct btrfs_path
*path
,
946 struct btrfs_delayed_item
*item
)
948 struct btrfs_delayed_item
*curr
, *next
;
949 struct extent_buffer
*leaf
;
950 struct btrfs_key key
;
951 struct list_head head
;
952 int nitems
, i
, last_item
;
955 BUG_ON(!path
->nodes
[0]);
957 leaf
= path
->nodes
[0];
960 last_item
= btrfs_header_nritems(leaf
) - 1;
962 return -ENOENT
; /* FIXME: Is errno suitable? */
965 INIT_LIST_HEAD(&head
);
966 btrfs_item_key_to_cpu(leaf
, &key
, i
);
969 * count the number of the dir index items that we can delete in batch
971 while (btrfs_comp_cpu_keys(&next
->key
, &key
) == 0) {
972 list_add_tail(&next
->tree_list
, &head
);
976 next
= __btrfs_next_delayed_item(curr
);
980 if (!btrfs_is_continuous_delayed_item(curr
, next
))
986 btrfs_item_key_to_cpu(leaf
, &key
, i
);
992 ret
= btrfs_del_items(trans
, root
, path
, path
->slots
[0], nitems
);
996 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
997 btrfs_delayed_item_release_metadata(root
, curr
);
998 list_del(&curr
->tree_list
);
999 btrfs_release_delayed_item(curr
);
1006 static int btrfs_delete_delayed_items(struct btrfs_trans_handle
*trans
,
1007 struct btrfs_path
*path
,
1008 struct btrfs_root
*root
,
1009 struct btrfs_delayed_node
*node
)
1011 struct btrfs_delayed_item
*curr
, *prev
;
1015 mutex_lock(&node
->mutex
);
1016 curr
= __btrfs_first_delayed_deletion_item(node
);
1020 ret
= btrfs_search_slot(trans
, root
, &curr
->key
, path
, -1, 1);
1025 * can't find the item which the node points to, so this node
1026 * is invalid, just drop it.
1029 curr
= __btrfs_next_delayed_item(prev
);
1030 btrfs_release_delayed_item(prev
);
1032 btrfs_release_path(path
);
1034 mutex_unlock(&node
->mutex
);
1040 btrfs_batch_delete_items(trans
, root
, path
, curr
);
1041 btrfs_release_path(path
);
1042 mutex_unlock(&node
->mutex
);
1046 btrfs_release_path(path
);
1047 mutex_unlock(&node
->mutex
);
1051 static void btrfs_release_delayed_inode(struct btrfs_delayed_node
*delayed_node
)
1053 struct btrfs_delayed_root
*delayed_root
;
1055 if (delayed_node
&& delayed_node
->inode_dirty
) {
1056 BUG_ON(!delayed_node
->root
);
1057 delayed_node
->inode_dirty
= 0;
1058 delayed_node
->count
--;
1060 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
1061 if (atomic_dec_return(&delayed_root
->items
) <
1062 BTRFS_DELAYED_BACKGROUND
&&
1063 waitqueue_active(&delayed_root
->wait
))
1064 wake_up(&delayed_root
->wait
);
1068 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1069 struct btrfs_root
*root
,
1070 struct btrfs_path
*path
,
1071 struct btrfs_delayed_node
*node
)
1073 struct btrfs_key key
;
1074 struct btrfs_inode_item
*inode_item
;
1075 struct extent_buffer
*leaf
;
1078 key
.objectid
= node
->inode_id
;
1079 btrfs_set_key_type(&key
, BTRFS_INODE_ITEM_KEY
);
1082 ret
= btrfs_lookup_inode(trans
, root
, path
, &key
, 1);
1084 btrfs_release_path(path
);
1086 } else if (ret
< 0) {
1090 btrfs_unlock_up_safe(path
, 1);
1091 leaf
= path
->nodes
[0];
1092 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1093 struct btrfs_inode_item
);
1094 write_extent_buffer(leaf
, &node
->inode_item
, (unsigned long)inode_item
,
1095 sizeof(struct btrfs_inode_item
));
1096 btrfs_mark_buffer_dirty(leaf
);
1097 btrfs_release_path(path
);
1099 btrfs_delayed_inode_release_metadata(root
, node
);
1100 btrfs_release_delayed_inode(node
);
1105 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1106 struct btrfs_root
*root
,
1107 struct btrfs_path
*path
,
1108 struct btrfs_delayed_node
*node
)
1112 mutex_lock(&node
->mutex
);
1113 if (!node
->inode_dirty
) {
1114 mutex_unlock(&node
->mutex
);
1118 ret
= __btrfs_update_delayed_inode(trans
, root
, path
, node
);
1119 mutex_unlock(&node
->mutex
);
1124 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1125 struct btrfs_path
*path
,
1126 struct btrfs_delayed_node
*node
)
1130 ret
= btrfs_insert_delayed_items(trans
, path
, node
->root
, node
);
1134 ret
= btrfs_delete_delayed_items(trans
, path
, node
->root
, node
);
1138 ret
= btrfs_update_delayed_inode(trans
, node
->root
, path
, node
);
1143 * Called when committing the transaction.
1144 * Returns 0 on success.
1145 * Returns < 0 on error and returns with an aborted transaction with any
1146 * outstanding delayed items cleaned up.
1148 static int __btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1149 struct btrfs_root
*root
, int nr
)
1151 struct btrfs_delayed_root
*delayed_root
;
1152 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1153 struct btrfs_path
*path
;
1154 struct btrfs_block_rsv
*block_rsv
;
1156 bool count
= (nr
> 0);
1161 path
= btrfs_alloc_path();
1164 path
->leave_spinning
= 1;
1166 block_rsv
= trans
->block_rsv
;
1167 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1169 delayed_root
= btrfs_get_delayed_root(root
);
1171 curr_node
= btrfs_first_delayed_node(delayed_root
);
1172 while (curr_node
&& (!count
|| (count
&& nr
--))) {
1173 ret
= __btrfs_commit_inode_delayed_items(trans
, path
,
1176 btrfs_release_delayed_node(curr_node
);
1178 btrfs_abort_transaction(trans
, root
, ret
);
1182 prev_node
= curr_node
;
1183 curr_node
= btrfs_next_delayed_node(curr_node
);
1184 btrfs_release_delayed_node(prev_node
);
1188 btrfs_release_delayed_node(curr_node
);
1189 btrfs_free_path(path
);
1190 trans
->block_rsv
= block_rsv
;
1195 int btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1196 struct btrfs_root
*root
)
1198 return __btrfs_run_delayed_items(trans
, root
, -1);
1201 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle
*trans
,
1202 struct btrfs_root
*root
, int nr
)
1204 return __btrfs_run_delayed_items(trans
, root
, nr
);
1207 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1208 struct inode
*inode
)
1210 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1211 struct btrfs_path
*path
;
1212 struct btrfs_block_rsv
*block_rsv
;
1218 mutex_lock(&delayed_node
->mutex
);
1219 if (!delayed_node
->count
) {
1220 mutex_unlock(&delayed_node
->mutex
);
1221 btrfs_release_delayed_node(delayed_node
);
1224 mutex_unlock(&delayed_node
->mutex
);
1226 path
= btrfs_alloc_path();
1229 path
->leave_spinning
= 1;
1231 block_rsv
= trans
->block_rsv
;
1232 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1234 ret
= __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1236 btrfs_release_delayed_node(delayed_node
);
1237 btrfs_free_path(path
);
1238 trans
->block_rsv
= block_rsv
;
1243 int btrfs_commit_inode_delayed_inode(struct inode
*inode
)
1245 struct btrfs_trans_handle
*trans
;
1246 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1247 struct btrfs_path
*path
;
1248 struct btrfs_block_rsv
*block_rsv
;
1254 mutex_lock(&delayed_node
->mutex
);
1255 if (!delayed_node
->inode_dirty
) {
1256 mutex_unlock(&delayed_node
->mutex
);
1257 btrfs_release_delayed_node(delayed_node
);
1260 mutex_unlock(&delayed_node
->mutex
);
1262 trans
= btrfs_join_transaction(delayed_node
->root
);
1263 if (IS_ERR(trans
)) {
1264 ret
= PTR_ERR(trans
);
1268 path
= btrfs_alloc_path();
1273 path
->leave_spinning
= 1;
1275 block_rsv
= trans
->block_rsv
;
1276 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1278 mutex_lock(&delayed_node
->mutex
);
1279 if (delayed_node
->inode_dirty
)
1280 ret
= __btrfs_update_delayed_inode(trans
, delayed_node
->root
,
1281 path
, delayed_node
);
1284 mutex_unlock(&delayed_node
->mutex
);
1286 btrfs_free_path(path
);
1287 trans
->block_rsv
= block_rsv
;
1289 btrfs_end_transaction(trans
, delayed_node
->root
);
1290 btrfs_btree_balance_dirty(delayed_node
->root
);
1292 btrfs_release_delayed_node(delayed_node
);
1297 void btrfs_remove_delayed_node(struct inode
*inode
)
1299 struct btrfs_delayed_node
*delayed_node
;
1301 delayed_node
= ACCESS_ONCE(BTRFS_I(inode
)->delayed_node
);
1305 BTRFS_I(inode
)->delayed_node
= NULL
;
1306 btrfs_release_delayed_node(delayed_node
);
1309 struct btrfs_async_delayed_node
{
1310 struct btrfs_root
*root
;
1311 struct btrfs_delayed_node
*delayed_node
;
1312 struct btrfs_work work
;
1315 static void btrfs_async_run_delayed_node_done(struct btrfs_work
*work
)
1317 struct btrfs_async_delayed_node
*async_node
;
1318 struct btrfs_trans_handle
*trans
;
1319 struct btrfs_path
*path
;
1320 struct btrfs_delayed_node
*delayed_node
= NULL
;
1321 struct btrfs_root
*root
;
1322 struct btrfs_block_rsv
*block_rsv
;
1323 int need_requeue
= 0;
1325 async_node
= container_of(work
, struct btrfs_async_delayed_node
, work
);
1327 path
= btrfs_alloc_path();
1330 path
->leave_spinning
= 1;
1332 delayed_node
= async_node
->delayed_node
;
1333 root
= delayed_node
->root
;
1335 trans
= btrfs_join_transaction(root
);
1339 block_rsv
= trans
->block_rsv
;
1340 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1342 __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1344 * Maybe new delayed items have been inserted, so we need requeue
1345 * the work. Besides that, we must dequeue the empty delayed nodes
1346 * to avoid the race between delayed items balance and the worker.
1347 * The race like this:
1348 * Task1 Worker thread
1349 * count == 0, needn't requeue
1350 * also needn't insert the
1351 * delayed node into prepare
1353 * add lots of delayed items
1354 * queue the delayed node
1355 * already in the list,
1356 * and not in the prepare
1357 * list, it means the delayed
1358 * node is being dealt with
1360 * do delayed items balance
1361 * the delayed node is being
1362 * dealt with by the worker
1364 * the worker goto idle.
1365 * Task1 will sleep until the transaction is commited.
1367 mutex_lock(&delayed_node
->mutex
);
1368 if (delayed_node
->count
)
1371 btrfs_dequeue_delayed_node(root
->fs_info
->delayed_root
,
1373 mutex_unlock(&delayed_node
->mutex
);
1375 trans
->block_rsv
= block_rsv
;
1376 btrfs_end_transaction_dmeta(trans
, root
);
1377 btrfs_btree_balance_dirty_nodelay(root
);
1379 btrfs_free_path(path
);
1382 btrfs_requeue_work(&async_node
->work
);
1384 btrfs_release_prepared_delayed_node(delayed_node
);
1389 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root
*delayed_root
,
1390 struct btrfs_root
*root
, int all
)
1392 struct btrfs_async_delayed_node
*async_node
;
1393 struct btrfs_delayed_node
*curr
;
1397 curr
= btrfs_first_prepared_delayed_node(delayed_root
);
1401 async_node
= kmalloc(sizeof(*async_node
), GFP_NOFS
);
1403 btrfs_release_prepared_delayed_node(curr
);
1407 async_node
->root
= root
;
1408 async_node
->delayed_node
= curr
;
1410 async_node
->work
.func
= btrfs_async_run_delayed_node_done
;
1411 async_node
->work
.flags
= 0;
1413 btrfs_queue_worker(&root
->fs_info
->delayed_workers
, &async_node
->work
);
1416 if (all
|| count
< 4)
1422 void btrfs_assert_delayed_root_empty(struct btrfs_root
*root
)
1424 struct btrfs_delayed_root
*delayed_root
;
1425 delayed_root
= btrfs_get_delayed_root(root
);
1426 WARN_ON(btrfs_first_delayed_node(delayed_root
));
1429 void btrfs_balance_delayed_items(struct btrfs_root
*root
)
1431 struct btrfs_delayed_root
*delayed_root
;
1433 delayed_root
= btrfs_get_delayed_root(root
);
1435 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1438 if (atomic_read(&delayed_root
->items
) >= BTRFS_DELAYED_WRITEBACK
) {
1440 ret
= btrfs_wq_run_delayed_node(delayed_root
, root
, 1);
1444 wait_event_interruptible_timeout(
1446 (atomic_read(&delayed_root
->items
) <
1447 BTRFS_DELAYED_BACKGROUND
),
1452 btrfs_wq_run_delayed_node(delayed_root
, root
, 0);
1455 /* Will return 0 or -ENOMEM */
1456 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1457 struct btrfs_root
*root
, const char *name
,
1458 int name_len
, struct inode
*dir
,
1459 struct btrfs_disk_key
*disk_key
, u8 type
,
1462 struct btrfs_delayed_node
*delayed_node
;
1463 struct btrfs_delayed_item
*delayed_item
;
1464 struct btrfs_dir_item
*dir_item
;
1467 delayed_node
= btrfs_get_or_create_delayed_node(dir
);
1468 if (IS_ERR(delayed_node
))
1469 return PTR_ERR(delayed_node
);
1471 delayed_item
= btrfs_alloc_delayed_item(sizeof(*dir_item
) + name_len
);
1472 if (!delayed_item
) {
1477 delayed_item
->key
.objectid
= btrfs_ino(dir
);
1478 btrfs_set_key_type(&delayed_item
->key
, BTRFS_DIR_INDEX_KEY
);
1479 delayed_item
->key
.offset
= index
;
1481 dir_item
= (struct btrfs_dir_item
*)delayed_item
->data
;
1482 dir_item
->location
= *disk_key
;
1483 dir_item
->transid
= cpu_to_le64(trans
->transid
);
1484 dir_item
->data_len
= 0;
1485 dir_item
->name_len
= cpu_to_le16(name_len
);
1486 dir_item
->type
= type
;
1487 memcpy((char *)(dir_item
+ 1), name
, name_len
);
1489 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, delayed_item
);
1491 * we have reserved enough space when we start a new transaction,
1492 * so reserving metadata failure is impossible
1497 mutex_lock(&delayed_node
->mutex
);
1498 ret
= __btrfs_add_delayed_insertion_item(delayed_node
, delayed_item
);
1499 if (unlikely(ret
)) {
1500 printk(KERN_ERR
"err add delayed dir index item(name: %s) into "
1501 "the insertion tree of the delayed node"
1502 "(root id: %llu, inode id: %llu, errno: %d)\n",
1504 (unsigned long long)delayed_node
->root
->objectid
,
1505 (unsigned long long)delayed_node
->inode_id
,
1509 mutex_unlock(&delayed_node
->mutex
);
1512 btrfs_release_delayed_node(delayed_node
);
1516 static int btrfs_delete_delayed_insertion_item(struct btrfs_root
*root
,
1517 struct btrfs_delayed_node
*node
,
1518 struct btrfs_key
*key
)
1520 struct btrfs_delayed_item
*item
;
1522 mutex_lock(&node
->mutex
);
1523 item
= __btrfs_lookup_delayed_insertion_item(node
, key
);
1525 mutex_unlock(&node
->mutex
);
1529 btrfs_delayed_item_release_metadata(root
, item
);
1530 btrfs_release_delayed_item(item
);
1531 mutex_unlock(&node
->mutex
);
1535 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1536 struct btrfs_root
*root
, struct inode
*dir
,
1539 struct btrfs_delayed_node
*node
;
1540 struct btrfs_delayed_item
*item
;
1541 struct btrfs_key item_key
;
1544 node
= btrfs_get_or_create_delayed_node(dir
);
1546 return PTR_ERR(node
);
1548 item_key
.objectid
= btrfs_ino(dir
);
1549 btrfs_set_key_type(&item_key
, BTRFS_DIR_INDEX_KEY
);
1550 item_key
.offset
= index
;
1552 ret
= btrfs_delete_delayed_insertion_item(root
, node
, &item_key
);
1556 item
= btrfs_alloc_delayed_item(0);
1562 item
->key
= item_key
;
1564 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, item
);
1566 * we have reserved enough space when we start a new transaction,
1567 * so reserving metadata failure is impossible.
1571 mutex_lock(&node
->mutex
);
1572 ret
= __btrfs_add_delayed_deletion_item(node
, item
);
1573 if (unlikely(ret
)) {
1574 printk(KERN_ERR
"err add delayed dir index item(index: %llu) "
1575 "into the deletion tree of the delayed node"
1576 "(root id: %llu, inode id: %llu, errno: %d)\n",
1577 (unsigned long long)index
,
1578 (unsigned long long)node
->root
->objectid
,
1579 (unsigned long long)node
->inode_id
,
1583 mutex_unlock(&node
->mutex
);
1585 btrfs_release_delayed_node(node
);
1589 int btrfs_inode_delayed_dir_index_count(struct inode
*inode
)
1591 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1597 * Since we have held i_mutex of this directory, it is impossible that
1598 * a new directory index is added into the delayed node and index_cnt
1599 * is updated now. So we needn't lock the delayed node.
1601 if (!delayed_node
->index_cnt
) {
1602 btrfs_release_delayed_node(delayed_node
);
1606 BTRFS_I(inode
)->index_cnt
= delayed_node
->index_cnt
;
1607 btrfs_release_delayed_node(delayed_node
);
1611 void btrfs_get_delayed_items(struct inode
*inode
, struct list_head
*ins_list
,
1612 struct list_head
*del_list
)
1614 struct btrfs_delayed_node
*delayed_node
;
1615 struct btrfs_delayed_item
*item
;
1617 delayed_node
= btrfs_get_delayed_node(inode
);
1621 mutex_lock(&delayed_node
->mutex
);
1622 item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1624 atomic_inc(&item
->refs
);
1625 list_add_tail(&item
->readdir_list
, ins_list
);
1626 item
= __btrfs_next_delayed_item(item
);
1629 item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1631 atomic_inc(&item
->refs
);
1632 list_add_tail(&item
->readdir_list
, del_list
);
1633 item
= __btrfs_next_delayed_item(item
);
1635 mutex_unlock(&delayed_node
->mutex
);
1637 * This delayed node is still cached in the btrfs inode, so refs
1638 * must be > 1 now, and we needn't check it is going to be freed
1641 * Besides that, this function is used to read dir, we do not
1642 * insert/delete delayed items in this period. So we also needn't
1643 * requeue or dequeue this delayed node.
1645 atomic_dec(&delayed_node
->refs
);
1648 void btrfs_put_delayed_items(struct list_head
*ins_list
,
1649 struct list_head
*del_list
)
1651 struct btrfs_delayed_item
*curr
, *next
;
1653 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1654 list_del(&curr
->readdir_list
);
1655 if (atomic_dec_and_test(&curr
->refs
))
1659 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1660 list_del(&curr
->readdir_list
);
1661 if (atomic_dec_and_test(&curr
->refs
))
1666 int btrfs_should_delete_dir_index(struct list_head
*del_list
,
1669 struct btrfs_delayed_item
*curr
, *next
;
1672 if (list_empty(del_list
))
1675 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1676 if (curr
->key
.offset
> index
)
1679 list_del(&curr
->readdir_list
);
1680 ret
= (curr
->key
.offset
== index
);
1682 if (atomic_dec_and_test(&curr
->refs
))
1694 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1697 int btrfs_readdir_delayed_dir_index(struct file
*filp
, void *dirent
,
1699 struct list_head
*ins_list
)
1701 struct btrfs_dir_item
*di
;
1702 struct btrfs_delayed_item
*curr
, *next
;
1703 struct btrfs_key location
;
1707 unsigned char d_type
;
1709 if (list_empty(ins_list
))
1713 * Changing the data of the delayed item is impossible. So
1714 * we needn't lock them. And we have held i_mutex of the
1715 * directory, nobody can delete any directory indexes now.
1717 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1718 list_del(&curr
->readdir_list
);
1720 if (curr
->key
.offset
< filp
->f_pos
) {
1721 if (atomic_dec_and_test(&curr
->refs
))
1726 filp
->f_pos
= curr
->key
.offset
;
1728 di
= (struct btrfs_dir_item
*)curr
->data
;
1729 name
= (char *)(di
+ 1);
1730 name_len
= le16_to_cpu(di
->name_len
);
1732 d_type
= btrfs_filetype_table
[di
->type
];
1733 btrfs_disk_key_to_cpu(&location
, &di
->location
);
1735 over
= filldir(dirent
, name
, name_len
, curr
->key
.offset
,
1736 location
.objectid
, d_type
);
1738 if (atomic_dec_and_test(&curr
->refs
))
1747 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation
, struct btrfs_inode_item
,
1749 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence
, struct btrfs_inode_item
,
1751 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid
, struct btrfs_inode_item
,
1753 BTRFS_SETGET_STACK_FUNCS(stack_inode_size
, struct btrfs_inode_item
, size
, 64);
1754 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes
, struct btrfs_inode_item
,
1756 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group
, struct btrfs_inode_item
,
1758 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink
, struct btrfs_inode_item
, nlink
, 32);
1759 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid
, struct btrfs_inode_item
, uid
, 32);
1760 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid
, struct btrfs_inode_item
, gid
, 32);
1761 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode
, struct btrfs_inode_item
, mode
, 32);
1762 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev
, struct btrfs_inode_item
, rdev
, 64);
1763 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags
, struct btrfs_inode_item
, flags
, 64);
1765 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec
, struct btrfs_timespec
, sec
, 64);
1766 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec
, struct btrfs_timespec
, nsec
, 32);
1768 static void fill_stack_inode_item(struct btrfs_trans_handle
*trans
,
1769 struct btrfs_inode_item
*inode_item
,
1770 struct inode
*inode
)
1772 btrfs_set_stack_inode_uid(inode_item
, i_uid_read(inode
));
1773 btrfs_set_stack_inode_gid(inode_item
, i_gid_read(inode
));
1774 btrfs_set_stack_inode_size(inode_item
, BTRFS_I(inode
)->disk_i_size
);
1775 btrfs_set_stack_inode_mode(inode_item
, inode
->i_mode
);
1776 btrfs_set_stack_inode_nlink(inode_item
, inode
->i_nlink
);
1777 btrfs_set_stack_inode_nbytes(inode_item
, inode_get_bytes(inode
));
1778 btrfs_set_stack_inode_generation(inode_item
,
1779 BTRFS_I(inode
)->generation
);
1780 btrfs_set_stack_inode_sequence(inode_item
, inode
->i_version
);
1781 btrfs_set_stack_inode_transid(inode_item
, trans
->transid
);
1782 btrfs_set_stack_inode_rdev(inode_item
, inode
->i_rdev
);
1783 btrfs_set_stack_inode_flags(inode_item
, BTRFS_I(inode
)->flags
);
1784 btrfs_set_stack_inode_block_group(inode_item
, 0);
1786 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item
),
1787 inode
->i_atime
.tv_sec
);
1788 btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item
),
1789 inode
->i_atime
.tv_nsec
);
1791 btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item
),
1792 inode
->i_mtime
.tv_sec
);
1793 btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item
),
1794 inode
->i_mtime
.tv_nsec
);
1796 btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item
),
1797 inode
->i_ctime
.tv_sec
);
1798 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item
),
1799 inode
->i_ctime
.tv_nsec
);
1802 int btrfs_fill_inode(struct inode
*inode
, u32
*rdev
)
1804 struct btrfs_delayed_node
*delayed_node
;
1805 struct btrfs_inode_item
*inode_item
;
1806 struct btrfs_timespec
*tspec
;
1808 delayed_node
= btrfs_get_delayed_node(inode
);
1812 mutex_lock(&delayed_node
->mutex
);
1813 if (!delayed_node
->inode_dirty
) {
1814 mutex_unlock(&delayed_node
->mutex
);
1815 btrfs_release_delayed_node(delayed_node
);
1819 inode_item
= &delayed_node
->inode_item
;
1821 i_uid_write(inode
, btrfs_stack_inode_uid(inode_item
));
1822 i_gid_write(inode
, btrfs_stack_inode_gid(inode_item
));
1823 btrfs_i_size_write(inode
, btrfs_stack_inode_size(inode_item
));
1824 inode
->i_mode
= btrfs_stack_inode_mode(inode_item
);
1825 set_nlink(inode
, btrfs_stack_inode_nlink(inode_item
));
1826 inode_set_bytes(inode
, btrfs_stack_inode_nbytes(inode_item
));
1827 BTRFS_I(inode
)->generation
= btrfs_stack_inode_generation(inode_item
);
1828 inode
->i_version
= btrfs_stack_inode_sequence(inode_item
);
1830 *rdev
= btrfs_stack_inode_rdev(inode_item
);
1831 BTRFS_I(inode
)->flags
= btrfs_stack_inode_flags(inode_item
);
1833 tspec
= btrfs_inode_atime(inode_item
);
1834 inode
->i_atime
.tv_sec
= btrfs_stack_timespec_sec(tspec
);
1835 inode
->i_atime
.tv_nsec
= btrfs_stack_timespec_nsec(tspec
);
1837 tspec
= btrfs_inode_mtime(inode_item
);
1838 inode
->i_mtime
.tv_sec
= btrfs_stack_timespec_sec(tspec
);
1839 inode
->i_mtime
.tv_nsec
= btrfs_stack_timespec_nsec(tspec
);
1841 tspec
= btrfs_inode_ctime(inode_item
);
1842 inode
->i_ctime
.tv_sec
= btrfs_stack_timespec_sec(tspec
);
1843 inode
->i_ctime
.tv_nsec
= btrfs_stack_timespec_nsec(tspec
);
1845 inode
->i_generation
= BTRFS_I(inode
)->generation
;
1846 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1848 mutex_unlock(&delayed_node
->mutex
);
1849 btrfs_release_delayed_node(delayed_node
);
1853 int btrfs_delayed_update_inode(struct btrfs_trans_handle
*trans
,
1854 struct btrfs_root
*root
, struct inode
*inode
)
1856 struct btrfs_delayed_node
*delayed_node
;
1859 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1860 if (IS_ERR(delayed_node
))
1861 return PTR_ERR(delayed_node
);
1863 mutex_lock(&delayed_node
->mutex
);
1864 if (delayed_node
->inode_dirty
) {
1865 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1869 ret
= btrfs_delayed_inode_reserve_metadata(trans
, root
, inode
,
1874 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1875 delayed_node
->inode_dirty
= 1;
1876 delayed_node
->count
++;
1877 atomic_inc(&root
->fs_info
->delayed_root
->items
);
1879 mutex_unlock(&delayed_node
->mutex
);
1880 btrfs_release_delayed_node(delayed_node
);
1884 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node
*delayed_node
)
1886 struct btrfs_root
*root
= delayed_node
->root
;
1887 struct btrfs_delayed_item
*curr_item
, *prev_item
;
1889 mutex_lock(&delayed_node
->mutex
);
1890 curr_item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1892 btrfs_delayed_item_release_metadata(root
, curr_item
);
1893 prev_item
= curr_item
;
1894 curr_item
= __btrfs_next_delayed_item(prev_item
);
1895 btrfs_release_delayed_item(prev_item
);
1898 curr_item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1900 btrfs_delayed_item_release_metadata(root
, curr_item
);
1901 prev_item
= curr_item
;
1902 curr_item
= __btrfs_next_delayed_item(prev_item
);
1903 btrfs_release_delayed_item(prev_item
);
1906 if (delayed_node
->inode_dirty
) {
1907 btrfs_delayed_inode_release_metadata(root
, delayed_node
);
1908 btrfs_release_delayed_inode(delayed_node
);
1910 mutex_unlock(&delayed_node
->mutex
);
1913 void btrfs_kill_delayed_inode_items(struct inode
*inode
)
1915 struct btrfs_delayed_node
*delayed_node
;
1917 delayed_node
= btrfs_get_delayed_node(inode
);
1921 __btrfs_kill_delayed_node(delayed_node
);
1922 btrfs_release_delayed_node(delayed_node
);
1925 void btrfs_kill_all_delayed_nodes(struct btrfs_root
*root
)
1928 struct btrfs_delayed_node
*delayed_nodes
[8];
1932 spin_lock(&root
->inode_lock
);
1933 n
= radix_tree_gang_lookup(&root
->delayed_nodes_tree
,
1934 (void **)delayed_nodes
, inode_id
,
1935 ARRAY_SIZE(delayed_nodes
));
1937 spin_unlock(&root
->inode_lock
);
1941 inode_id
= delayed_nodes
[n
- 1]->inode_id
+ 1;
1943 for (i
= 0; i
< n
; i
++)
1944 atomic_inc(&delayed_nodes
[i
]->refs
);
1945 spin_unlock(&root
->inode_lock
);
1947 for (i
= 0; i
< n
; i
++) {
1948 __btrfs_kill_delayed_node(delayed_nodes
[i
]);
1949 btrfs_release_delayed_node(delayed_nodes
[i
]);
1954 void btrfs_destroy_delayed_inodes(struct btrfs_root
*root
)
1956 struct btrfs_delayed_root
*delayed_root
;
1957 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1959 delayed_root
= btrfs_get_delayed_root(root
);
1961 curr_node
= btrfs_first_delayed_node(delayed_root
);
1963 __btrfs_kill_delayed_node(curr_node
);
1965 prev_node
= curr_node
;
1966 curr_node
= btrfs_next_delayed_node(curr_node
);
1967 btrfs_release_delayed_node(prev_node
);