2 * Copyright (C) 2011 Fujitsu. All rights reserved.
3 * Written by Miao Xie <miaox@cn.fujitsu.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
20 #include <linux/slab.h>
21 #include "delayed-inode.h"
23 #include "transaction.h"
25 #define BTRFS_DELAYED_WRITEBACK 512
26 #define BTRFS_DELAYED_BACKGROUND 128
27 #define BTRFS_DELAYED_BATCH 16
29 static struct kmem_cache
*delayed_node_cache
;
31 int __init
btrfs_delayed_inode_init(void)
33 delayed_node_cache
= kmem_cache_create("btrfs_delayed_node",
34 sizeof(struct btrfs_delayed_node
),
36 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
38 if (!delayed_node_cache
)
43 void btrfs_delayed_inode_exit(void)
45 if (delayed_node_cache
)
46 kmem_cache_destroy(delayed_node_cache
);
49 static inline void btrfs_init_delayed_node(
50 struct btrfs_delayed_node
*delayed_node
,
51 struct btrfs_root
*root
, u64 inode_id
)
53 delayed_node
->root
= root
;
54 delayed_node
->inode_id
= inode_id
;
55 atomic_set(&delayed_node
->refs
, 0);
56 delayed_node
->count
= 0;
57 delayed_node
->in_list
= 0;
58 delayed_node
->inode_dirty
= 0;
59 delayed_node
->ins_root
= RB_ROOT
;
60 delayed_node
->del_root
= RB_ROOT
;
61 mutex_init(&delayed_node
->mutex
);
62 delayed_node
->index_cnt
= 0;
63 INIT_LIST_HEAD(&delayed_node
->n_list
);
64 INIT_LIST_HEAD(&delayed_node
->p_list
);
65 delayed_node
->bytes_reserved
= 0;
66 memset(&delayed_node
->inode_item
, 0, sizeof(delayed_node
->inode_item
));
69 static inline int btrfs_is_continuous_delayed_item(
70 struct btrfs_delayed_item
*item1
,
71 struct btrfs_delayed_item
*item2
)
73 if (item1
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
74 item1
->key
.objectid
== item2
->key
.objectid
&&
75 item1
->key
.type
== item2
->key
.type
&&
76 item1
->key
.offset
+ 1 == item2
->key
.offset
)
81 static inline struct btrfs_delayed_root
*btrfs_get_delayed_root(
82 struct btrfs_root
*root
)
84 return root
->fs_info
->delayed_root
;
87 static struct btrfs_delayed_node
*btrfs_get_delayed_node(struct inode
*inode
)
89 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
90 struct btrfs_root
*root
= btrfs_inode
->root
;
91 u64 ino
= btrfs_ino(inode
);
92 struct btrfs_delayed_node
*node
;
94 node
= ACCESS_ONCE(btrfs_inode
->delayed_node
);
96 atomic_inc(&node
->refs
);
100 spin_lock(&root
->inode_lock
);
101 node
= radix_tree_lookup(&root
->delayed_nodes_tree
, ino
);
103 if (btrfs_inode
->delayed_node
) {
104 atomic_inc(&node
->refs
); /* can be accessed */
105 BUG_ON(btrfs_inode
->delayed_node
!= node
);
106 spin_unlock(&root
->inode_lock
);
109 btrfs_inode
->delayed_node
= node
;
110 atomic_inc(&node
->refs
); /* can be accessed */
111 atomic_inc(&node
->refs
); /* cached in the inode */
112 spin_unlock(&root
->inode_lock
);
115 spin_unlock(&root
->inode_lock
);
120 /* Will return either the node or PTR_ERR(-ENOMEM) */
121 static struct btrfs_delayed_node
*btrfs_get_or_create_delayed_node(
124 struct btrfs_delayed_node
*node
;
125 struct btrfs_inode
*btrfs_inode
= BTRFS_I(inode
);
126 struct btrfs_root
*root
= btrfs_inode
->root
;
127 u64 ino
= btrfs_ino(inode
);
131 node
= btrfs_get_delayed_node(inode
);
135 node
= kmem_cache_alloc(delayed_node_cache
, GFP_NOFS
);
137 return ERR_PTR(-ENOMEM
);
138 btrfs_init_delayed_node(node
, root
, ino
);
140 atomic_inc(&node
->refs
); /* cached in the btrfs inode */
141 atomic_inc(&node
->refs
); /* can be accessed */
143 ret
= radix_tree_preload(GFP_NOFS
& ~__GFP_HIGHMEM
);
145 kmem_cache_free(delayed_node_cache
, node
);
149 spin_lock(&root
->inode_lock
);
150 ret
= radix_tree_insert(&root
->delayed_nodes_tree
, ino
, node
);
151 if (ret
== -EEXIST
) {
152 kmem_cache_free(delayed_node_cache
, node
);
153 spin_unlock(&root
->inode_lock
);
154 radix_tree_preload_end();
157 btrfs_inode
->delayed_node
= node
;
158 spin_unlock(&root
->inode_lock
);
159 radix_tree_preload_end();
165 * Call it when holding delayed_node->mutex
167 * If mod = 1, add this node into the prepared list.
169 static void btrfs_queue_delayed_node(struct btrfs_delayed_root
*root
,
170 struct btrfs_delayed_node
*node
,
173 spin_lock(&root
->lock
);
175 if (!list_empty(&node
->p_list
))
176 list_move_tail(&node
->p_list
, &root
->prepare_list
);
178 list_add_tail(&node
->p_list
, &root
->prepare_list
);
180 list_add_tail(&node
->n_list
, &root
->node_list
);
181 list_add_tail(&node
->p_list
, &root
->prepare_list
);
182 atomic_inc(&node
->refs
); /* inserted into list */
186 spin_unlock(&root
->lock
);
189 /* Call it when holding delayed_node->mutex */
190 static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root
*root
,
191 struct btrfs_delayed_node
*node
)
193 spin_lock(&root
->lock
);
196 atomic_dec(&node
->refs
); /* not in the list */
197 list_del_init(&node
->n_list
);
198 if (!list_empty(&node
->p_list
))
199 list_del_init(&node
->p_list
);
202 spin_unlock(&root
->lock
);
205 struct btrfs_delayed_node
*btrfs_first_delayed_node(
206 struct btrfs_delayed_root
*delayed_root
)
209 struct btrfs_delayed_node
*node
= NULL
;
211 spin_lock(&delayed_root
->lock
);
212 if (list_empty(&delayed_root
->node_list
))
215 p
= delayed_root
->node_list
.next
;
216 node
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
217 atomic_inc(&node
->refs
);
219 spin_unlock(&delayed_root
->lock
);
224 struct btrfs_delayed_node
*btrfs_next_delayed_node(
225 struct btrfs_delayed_node
*node
)
227 struct btrfs_delayed_root
*delayed_root
;
229 struct btrfs_delayed_node
*next
= NULL
;
231 delayed_root
= node
->root
->fs_info
->delayed_root
;
232 spin_lock(&delayed_root
->lock
);
233 if (!node
->in_list
) { /* not in the list */
234 if (list_empty(&delayed_root
->node_list
))
236 p
= delayed_root
->node_list
.next
;
237 } else if (list_is_last(&node
->n_list
, &delayed_root
->node_list
))
240 p
= node
->n_list
.next
;
242 next
= list_entry(p
, struct btrfs_delayed_node
, n_list
);
243 atomic_inc(&next
->refs
);
245 spin_unlock(&delayed_root
->lock
);
250 static void __btrfs_release_delayed_node(
251 struct btrfs_delayed_node
*delayed_node
,
254 struct btrfs_delayed_root
*delayed_root
;
259 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
261 mutex_lock(&delayed_node
->mutex
);
262 if (delayed_node
->count
)
263 btrfs_queue_delayed_node(delayed_root
, delayed_node
, mod
);
265 btrfs_dequeue_delayed_node(delayed_root
, delayed_node
);
266 mutex_unlock(&delayed_node
->mutex
);
268 if (atomic_dec_and_test(&delayed_node
->refs
)) {
269 struct btrfs_root
*root
= delayed_node
->root
;
270 spin_lock(&root
->inode_lock
);
271 if (atomic_read(&delayed_node
->refs
) == 0) {
272 radix_tree_delete(&root
->delayed_nodes_tree
,
273 delayed_node
->inode_id
);
274 kmem_cache_free(delayed_node_cache
, delayed_node
);
276 spin_unlock(&root
->inode_lock
);
280 static inline void btrfs_release_delayed_node(struct btrfs_delayed_node
*node
)
282 __btrfs_release_delayed_node(node
, 0);
285 struct btrfs_delayed_node
*btrfs_first_prepared_delayed_node(
286 struct btrfs_delayed_root
*delayed_root
)
289 struct btrfs_delayed_node
*node
= NULL
;
291 spin_lock(&delayed_root
->lock
);
292 if (list_empty(&delayed_root
->prepare_list
))
295 p
= delayed_root
->prepare_list
.next
;
297 node
= list_entry(p
, struct btrfs_delayed_node
, p_list
);
298 atomic_inc(&node
->refs
);
300 spin_unlock(&delayed_root
->lock
);
305 static inline void btrfs_release_prepared_delayed_node(
306 struct btrfs_delayed_node
*node
)
308 __btrfs_release_delayed_node(node
, 1);
311 struct btrfs_delayed_item
*btrfs_alloc_delayed_item(u32 data_len
)
313 struct btrfs_delayed_item
*item
;
314 item
= kmalloc(sizeof(*item
) + data_len
, GFP_NOFS
);
316 item
->data_len
= data_len
;
317 item
->ins_or_del
= 0;
318 item
->bytes_reserved
= 0;
319 item
->delayed_node
= NULL
;
320 atomic_set(&item
->refs
, 1);
326 * __btrfs_lookup_delayed_item - look up the delayed item by key
327 * @delayed_node: pointer to the delayed node
328 * @key: the key to look up
329 * @prev: used to store the prev item if the right item isn't found
330 * @next: used to store the next item if the right item isn't found
332 * Note: if we don't find the right item, we will return the prev item and
335 static struct btrfs_delayed_item
*__btrfs_lookup_delayed_item(
336 struct rb_root
*root
,
337 struct btrfs_key
*key
,
338 struct btrfs_delayed_item
**prev
,
339 struct btrfs_delayed_item
**next
)
341 struct rb_node
*node
, *prev_node
= NULL
;
342 struct btrfs_delayed_item
*delayed_item
= NULL
;
345 node
= root
->rb_node
;
348 delayed_item
= rb_entry(node
, struct btrfs_delayed_item
,
351 ret
= btrfs_comp_cpu_keys(&delayed_item
->key
, key
);
353 node
= node
->rb_right
;
355 node
= node
->rb_left
;
364 *prev
= delayed_item
;
365 else if ((node
= rb_prev(prev_node
)) != NULL
) {
366 *prev
= rb_entry(node
, struct btrfs_delayed_item
,
376 *next
= delayed_item
;
377 else if ((node
= rb_next(prev_node
)) != NULL
) {
378 *next
= rb_entry(node
, struct btrfs_delayed_item
,
386 struct btrfs_delayed_item
*__btrfs_lookup_delayed_insertion_item(
387 struct btrfs_delayed_node
*delayed_node
,
388 struct btrfs_key
*key
)
390 struct btrfs_delayed_item
*item
;
392 item
= __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
397 struct btrfs_delayed_item
*__btrfs_lookup_delayed_deletion_item(
398 struct btrfs_delayed_node
*delayed_node
,
399 struct btrfs_key
*key
)
401 struct btrfs_delayed_item
*item
;
403 item
= __btrfs_lookup_delayed_item(&delayed_node
->del_root
, key
,
408 struct btrfs_delayed_item
*__btrfs_search_delayed_insertion_item(
409 struct btrfs_delayed_node
*delayed_node
,
410 struct btrfs_key
*key
)
412 struct btrfs_delayed_item
*item
, *next
;
414 item
= __btrfs_lookup_delayed_item(&delayed_node
->ins_root
, key
,
422 struct btrfs_delayed_item
*__btrfs_search_delayed_deletion_item(
423 struct btrfs_delayed_node
*delayed_node
,
424 struct btrfs_key
*key
)
426 struct btrfs_delayed_item
*item
, *next
;
428 item
= __btrfs_lookup_delayed_item(&delayed_node
->del_root
, key
,
436 static int __btrfs_add_delayed_item(struct btrfs_delayed_node
*delayed_node
,
437 struct btrfs_delayed_item
*ins
,
440 struct rb_node
**p
, *node
;
441 struct rb_node
*parent_node
= NULL
;
442 struct rb_root
*root
;
443 struct btrfs_delayed_item
*item
;
446 if (action
== BTRFS_DELAYED_INSERTION_ITEM
)
447 root
= &delayed_node
->ins_root
;
448 else if (action
== BTRFS_DELAYED_DELETION_ITEM
)
449 root
= &delayed_node
->del_root
;
453 node
= &ins
->rb_node
;
457 item
= rb_entry(parent_node
, struct btrfs_delayed_item
,
460 cmp
= btrfs_comp_cpu_keys(&item
->key
, &ins
->key
);
469 rb_link_node(node
, parent_node
, p
);
470 rb_insert_color(node
, root
);
471 ins
->delayed_node
= delayed_node
;
472 ins
->ins_or_del
= action
;
474 if (ins
->key
.type
== BTRFS_DIR_INDEX_KEY
&&
475 action
== BTRFS_DELAYED_INSERTION_ITEM
&&
476 ins
->key
.offset
>= delayed_node
->index_cnt
)
477 delayed_node
->index_cnt
= ins
->key
.offset
+ 1;
479 delayed_node
->count
++;
480 atomic_inc(&delayed_node
->root
->fs_info
->delayed_root
->items
);
484 static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node
*node
,
485 struct btrfs_delayed_item
*item
)
487 return __btrfs_add_delayed_item(node
, item
,
488 BTRFS_DELAYED_INSERTION_ITEM
);
491 static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node
*node
,
492 struct btrfs_delayed_item
*item
)
494 return __btrfs_add_delayed_item(node
, item
,
495 BTRFS_DELAYED_DELETION_ITEM
);
498 static void finish_one_item(struct btrfs_delayed_root
*delayed_root
)
500 int seq
= atomic_inc_return(&delayed_root
->items_seq
);
501 if ((atomic_dec_return(&delayed_root
->items
) <
502 BTRFS_DELAYED_BACKGROUND
|| seq
% BTRFS_DELAYED_BATCH
== 0) &&
503 waitqueue_active(&delayed_root
->wait
))
504 wake_up(&delayed_root
->wait
);
507 static void __btrfs_remove_delayed_item(struct btrfs_delayed_item
*delayed_item
)
509 struct rb_root
*root
;
510 struct btrfs_delayed_root
*delayed_root
;
512 delayed_root
= delayed_item
->delayed_node
->root
->fs_info
->delayed_root
;
514 BUG_ON(!delayed_root
);
515 BUG_ON(delayed_item
->ins_or_del
!= BTRFS_DELAYED_DELETION_ITEM
&&
516 delayed_item
->ins_or_del
!= BTRFS_DELAYED_INSERTION_ITEM
);
518 if (delayed_item
->ins_or_del
== BTRFS_DELAYED_INSERTION_ITEM
)
519 root
= &delayed_item
->delayed_node
->ins_root
;
521 root
= &delayed_item
->delayed_node
->del_root
;
523 rb_erase(&delayed_item
->rb_node
, root
);
524 delayed_item
->delayed_node
->count
--;
526 finish_one_item(delayed_root
);
529 static void btrfs_release_delayed_item(struct btrfs_delayed_item
*item
)
532 __btrfs_remove_delayed_item(item
);
533 if (atomic_dec_and_test(&item
->refs
))
538 struct btrfs_delayed_item
*__btrfs_first_delayed_insertion_item(
539 struct btrfs_delayed_node
*delayed_node
)
542 struct btrfs_delayed_item
*item
= NULL
;
544 p
= rb_first(&delayed_node
->ins_root
);
546 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
551 struct btrfs_delayed_item
*__btrfs_first_delayed_deletion_item(
552 struct btrfs_delayed_node
*delayed_node
)
555 struct btrfs_delayed_item
*item
= NULL
;
557 p
= rb_first(&delayed_node
->del_root
);
559 item
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
564 struct btrfs_delayed_item
*__btrfs_next_delayed_item(
565 struct btrfs_delayed_item
*item
)
568 struct btrfs_delayed_item
*next
= NULL
;
570 p
= rb_next(&item
->rb_node
);
572 next
= rb_entry(p
, struct btrfs_delayed_item
, rb_node
);
577 static inline struct btrfs_root
*btrfs_get_fs_root(struct btrfs_root
*root
,
580 struct btrfs_key root_key
;
582 if (root
->objectid
== root_id
)
585 root_key
.objectid
= root_id
;
586 root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
587 root_key
.offset
= (u64
)-1;
588 return btrfs_read_fs_root_no_name(root
->fs_info
, &root_key
);
591 static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle
*trans
,
592 struct btrfs_root
*root
,
593 struct btrfs_delayed_item
*item
)
595 struct btrfs_block_rsv
*src_rsv
;
596 struct btrfs_block_rsv
*dst_rsv
;
600 if (!trans
->bytes_reserved
)
603 src_rsv
= trans
->block_rsv
;
604 dst_rsv
= &root
->fs_info
->delayed_block_rsv
;
606 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
607 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
609 trace_btrfs_space_reservation(root
->fs_info
, "delayed_item",
612 item
->bytes_reserved
= num_bytes
;
618 static void btrfs_delayed_item_release_metadata(struct btrfs_root
*root
,
619 struct btrfs_delayed_item
*item
)
621 struct btrfs_block_rsv
*rsv
;
623 if (!item
->bytes_reserved
)
626 rsv
= &root
->fs_info
->delayed_block_rsv
;
627 trace_btrfs_space_reservation(root
->fs_info
, "delayed_item",
628 item
->key
.objectid
, item
->bytes_reserved
,
630 btrfs_block_rsv_release(root
, rsv
,
631 item
->bytes_reserved
);
634 static int btrfs_delayed_inode_reserve_metadata(
635 struct btrfs_trans_handle
*trans
,
636 struct btrfs_root
*root
,
638 struct btrfs_delayed_node
*node
)
640 struct btrfs_block_rsv
*src_rsv
;
641 struct btrfs_block_rsv
*dst_rsv
;
644 bool release
= false;
646 src_rsv
= trans
->block_rsv
;
647 dst_rsv
= &root
->fs_info
->delayed_block_rsv
;
649 num_bytes
= btrfs_calc_trans_metadata_size(root
, 1);
652 * btrfs_dirty_inode will update the inode under btrfs_join_transaction
653 * which doesn't reserve space for speed. This is a problem since we
654 * still need to reserve space for this update, so try to reserve the
657 * Now if src_rsv == delalloc_block_rsv we'll let it just steal since
658 * we're accounted for.
660 if (!src_rsv
|| (!trans
->bytes_reserved
&&
661 src_rsv
->type
!= BTRFS_BLOCK_RSV_DELALLOC
)) {
662 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
663 BTRFS_RESERVE_NO_FLUSH
);
665 * Since we're under a transaction reserve_metadata_bytes could
666 * try to commit the transaction which will make it return
667 * EAGAIN to make us stop the transaction we have, so return
668 * ENOSPC instead so that btrfs_dirty_inode knows what to do.
673 node
->bytes_reserved
= num_bytes
;
674 trace_btrfs_space_reservation(root
->fs_info
,
680 } else if (src_rsv
->type
== BTRFS_BLOCK_RSV_DELALLOC
) {
681 spin_lock(&BTRFS_I(inode
)->lock
);
682 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED
,
683 &BTRFS_I(inode
)->runtime_flags
)) {
684 spin_unlock(&BTRFS_I(inode
)->lock
);
688 spin_unlock(&BTRFS_I(inode
)->lock
);
690 /* Ok we didn't have space pre-reserved. This shouldn't happen
691 * too often but it can happen if we do delalloc to an existing
692 * inode which gets dirtied because of the time update, and then
693 * isn't touched again until after the transaction commits and
694 * then we try to write out the data. First try to be nice and
695 * reserve something strictly for us. If not be a pain and try
696 * to steal from the delalloc block rsv.
698 ret
= btrfs_block_rsv_add(root
, dst_rsv
, num_bytes
,
699 BTRFS_RESERVE_NO_FLUSH
);
703 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
708 * Ok this is a problem, let's just steal from the global rsv
709 * since this really shouldn't happen that often.
712 ret
= btrfs_block_rsv_migrate(&root
->fs_info
->global_block_rsv
,
718 ret
= btrfs_block_rsv_migrate(src_rsv
, dst_rsv
, num_bytes
);
722 * Migrate only takes a reservation, it doesn't touch the size of the
723 * block_rsv. This is to simplify people who don't normally have things
724 * migrated from their block rsv. If they go to release their
725 * reservation, that will decrease the size as well, so if migrate
726 * reduced size we'd end up with a negative size. But for the
727 * delalloc_meta_reserved stuff we will only know to drop 1 reservation,
728 * but we could in fact do this reserve/migrate dance several times
729 * between the time we did the original reservation and we'd clean it
730 * up. So to take care of this, release the space for the meta
731 * reservation here. I think it may be time for a documentation page on
732 * how block rsvs. work.
735 trace_btrfs_space_reservation(root
->fs_info
, "delayed_inode",
736 btrfs_ino(inode
), num_bytes
, 1);
737 node
->bytes_reserved
= num_bytes
;
741 trace_btrfs_space_reservation(root
->fs_info
, "delalloc",
742 btrfs_ino(inode
), num_bytes
, 0);
743 btrfs_block_rsv_release(root
, src_rsv
, num_bytes
);
749 static void btrfs_delayed_inode_release_metadata(struct btrfs_root
*root
,
750 struct btrfs_delayed_node
*node
)
752 struct btrfs_block_rsv
*rsv
;
754 if (!node
->bytes_reserved
)
757 rsv
= &root
->fs_info
->delayed_block_rsv
;
758 trace_btrfs_space_reservation(root
->fs_info
, "delayed_inode",
759 node
->inode_id
, node
->bytes_reserved
, 0);
760 btrfs_block_rsv_release(root
, rsv
,
761 node
->bytes_reserved
);
762 node
->bytes_reserved
= 0;
766 * This helper will insert some continuous items into the same leaf according
767 * to the free space of the leaf.
769 static int btrfs_batch_insert_items(struct btrfs_root
*root
,
770 struct btrfs_path
*path
,
771 struct btrfs_delayed_item
*item
)
773 struct btrfs_delayed_item
*curr
, *next
;
775 int total_data_size
= 0, total_size
= 0;
776 struct extent_buffer
*leaf
;
778 struct btrfs_key
*keys
;
780 struct list_head head
;
786 BUG_ON(!path
->nodes
[0]);
788 leaf
= path
->nodes
[0];
789 free_space
= btrfs_leaf_free_space(root
, leaf
);
790 INIT_LIST_HEAD(&head
);
796 * count the number of the continuous items that we can insert in batch
798 while (total_size
+ next
->data_len
+ sizeof(struct btrfs_item
) <=
800 total_data_size
+= next
->data_len
;
801 total_size
+= next
->data_len
+ sizeof(struct btrfs_item
);
802 list_add_tail(&next
->tree_list
, &head
);
806 next
= __btrfs_next_delayed_item(curr
);
810 if (!btrfs_is_continuous_delayed_item(curr
, next
))
820 * we need allocate some memory space, but it might cause the task
821 * to sleep, so we set all locked nodes in the path to blocking locks
824 btrfs_set_path_blocking(path
);
826 keys
= kmalloc(sizeof(struct btrfs_key
) * nitems
, GFP_NOFS
);
832 data_size
= kmalloc(sizeof(u32
) * nitems
, GFP_NOFS
);
838 /* get keys of all the delayed items */
840 list_for_each_entry(next
, &head
, tree_list
) {
842 data_size
[i
] = next
->data_len
;
846 /* reset all the locked nodes in the patch to spinning locks. */
847 btrfs_clear_path_blocking(path
, NULL
, 0);
849 /* insert the keys of the items */
850 setup_items_for_insert(root
, path
, keys
, data_size
,
851 total_data_size
, total_size
, nitems
);
853 /* insert the dir index items */
854 slot
= path
->slots
[0];
855 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
856 data_ptr
= btrfs_item_ptr(leaf
, slot
, char);
857 write_extent_buffer(leaf
, &curr
->data
,
858 (unsigned long)data_ptr
,
862 btrfs_delayed_item_release_metadata(root
, curr
);
864 list_del(&curr
->tree_list
);
865 btrfs_release_delayed_item(curr
);
876 * This helper can just do simple insertion that needn't extend item for new
877 * data, such as directory name index insertion, inode insertion.
879 static int btrfs_insert_delayed_item(struct btrfs_trans_handle
*trans
,
880 struct btrfs_root
*root
,
881 struct btrfs_path
*path
,
882 struct btrfs_delayed_item
*delayed_item
)
884 struct extent_buffer
*leaf
;
888 ret
= btrfs_insert_empty_item(trans
, root
, path
, &delayed_item
->key
,
889 delayed_item
->data_len
);
890 if (ret
< 0 && ret
!= -EEXIST
)
893 leaf
= path
->nodes
[0];
895 ptr
= btrfs_item_ptr(leaf
, path
->slots
[0], char);
897 write_extent_buffer(leaf
, delayed_item
->data
, (unsigned long)ptr
,
898 delayed_item
->data_len
);
899 btrfs_mark_buffer_dirty(leaf
);
901 btrfs_delayed_item_release_metadata(root
, delayed_item
);
906 * we insert an item first, then if there are some continuous items, we try
907 * to insert those items into the same leaf.
909 static int btrfs_insert_delayed_items(struct btrfs_trans_handle
*trans
,
910 struct btrfs_path
*path
,
911 struct btrfs_root
*root
,
912 struct btrfs_delayed_node
*node
)
914 struct btrfs_delayed_item
*curr
, *prev
;
918 mutex_lock(&node
->mutex
);
919 curr
= __btrfs_first_delayed_insertion_item(node
);
923 ret
= btrfs_insert_delayed_item(trans
, root
, path
, curr
);
925 btrfs_release_path(path
);
930 curr
= __btrfs_next_delayed_item(prev
);
931 if (curr
&& btrfs_is_continuous_delayed_item(prev
, curr
)) {
932 /* insert the continuous items into the same leaf */
934 btrfs_batch_insert_items(root
, path
, curr
);
936 btrfs_release_delayed_item(prev
);
937 btrfs_mark_buffer_dirty(path
->nodes
[0]);
939 btrfs_release_path(path
);
940 mutex_unlock(&node
->mutex
);
944 mutex_unlock(&node
->mutex
);
948 static int btrfs_batch_delete_items(struct btrfs_trans_handle
*trans
,
949 struct btrfs_root
*root
,
950 struct btrfs_path
*path
,
951 struct btrfs_delayed_item
*item
)
953 struct btrfs_delayed_item
*curr
, *next
;
954 struct extent_buffer
*leaf
;
955 struct btrfs_key key
;
956 struct list_head head
;
957 int nitems
, i
, last_item
;
960 BUG_ON(!path
->nodes
[0]);
962 leaf
= path
->nodes
[0];
965 last_item
= btrfs_header_nritems(leaf
) - 1;
967 return -ENOENT
; /* FIXME: Is errno suitable? */
970 INIT_LIST_HEAD(&head
);
971 btrfs_item_key_to_cpu(leaf
, &key
, i
);
974 * count the number of the dir index items that we can delete in batch
976 while (btrfs_comp_cpu_keys(&next
->key
, &key
) == 0) {
977 list_add_tail(&next
->tree_list
, &head
);
981 next
= __btrfs_next_delayed_item(curr
);
985 if (!btrfs_is_continuous_delayed_item(curr
, next
))
991 btrfs_item_key_to_cpu(leaf
, &key
, i
);
997 ret
= btrfs_del_items(trans
, root
, path
, path
->slots
[0], nitems
);
1001 list_for_each_entry_safe(curr
, next
, &head
, tree_list
) {
1002 btrfs_delayed_item_release_metadata(root
, curr
);
1003 list_del(&curr
->tree_list
);
1004 btrfs_release_delayed_item(curr
);
1011 static int btrfs_delete_delayed_items(struct btrfs_trans_handle
*trans
,
1012 struct btrfs_path
*path
,
1013 struct btrfs_root
*root
,
1014 struct btrfs_delayed_node
*node
)
1016 struct btrfs_delayed_item
*curr
, *prev
;
1020 mutex_lock(&node
->mutex
);
1021 curr
= __btrfs_first_delayed_deletion_item(node
);
1025 ret
= btrfs_search_slot(trans
, root
, &curr
->key
, path
, -1, 1);
1030 * can't find the item which the node points to, so this node
1031 * is invalid, just drop it.
1034 curr
= __btrfs_next_delayed_item(prev
);
1035 btrfs_release_delayed_item(prev
);
1037 btrfs_release_path(path
);
1039 mutex_unlock(&node
->mutex
);
1045 btrfs_batch_delete_items(trans
, root
, path
, curr
);
1046 btrfs_release_path(path
);
1047 mutex_unlock(&node
->mutex
);
1051 btrfs_release_path(path
);
1052 mutex_unlock(&node
->mutex
);
1056 static void btrfs_release_delayed_inode(struct btrfs_delayed_node
*delayed_node
)
1058 struct btrfs_delayed_root
*delayed_root
;
1060 if (delayed_node
&& delayed_node
->inode_dirty
) {
1061 BUG_ON(!delayed_node
->root
);
1062 delayed_node
->inode_dirty
= 0;
1063 delayed_node
->count
--;
1065 delayed_root
= delayed_node
->root
->fs_info
->delayed_root
;
1066 finish_one_item(delayed_root
);
1070 static int __btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1071 struct btrfs_root
*root
,
1072 struct btrfs_path
*path
,
1073 struct btrfs_delayed_node
*node
)
1075 struct btrfs_key key
;
1076 struct btrfs_inode_item
*inode_item
;
1077 struct extent_buffer
*leaf
;
1080 key
.objectid
= node
->inode_id
;
1081 btrfs_set_key_type(&key
, BTRFS_INODE_ITEM_KEY
);
1084 ret
= btrfs_lookup_inode(trans
, root
, path
, &key
, 1);
1086 btrfs_release_path(path
);
1088 } else if (ret
< 0) {
1092 btrfs_unlock_up_safe(path
, 1);
1093 leaf
= path
->nodes
[0];
1094 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
1095 struct btrfs_inode_item
);
1096 write_extent_buffer(leaf
, &node
->inode_item
, (unsigned long)inode_item
,
1097 sizeof(struct btrfs_inode_item
));
1098 btrfs_mark_buffer_dirty(leaf
);
1099 btrfs_release_path(path
);
1101 btrfs_delayed_inode_release_metadata(root
, node
);
1102 btrfs_release_delayed_inode(node
);
1107 static inline int btrfs_update_delayed_inode(struct btrfs_trans_handle
*trans
,
1108 struct btrfs_root
*root
,
1109 struct btrfs_path
*path
,
1110 struct btrfs_delayed_node
*node
)
1114 mutex_lock(&node
->mutex
);
1115 if (!node
->inode_dirty
) {
1116 mutex_unlock(&node
->mutex
);
1120 ret
= __btrfs_update_delayed_inode(trans
, root
, path
, node
);
1121 mutex_unlock(&node
->mutex
);
1126 __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1127 struct btrfs_path
*path
,
1128 struct btrfs_delayed_node
*node
)
1132 ret
= btrfs_insert_delayed_items(trans
, path
, node
->root
, node
);
1136 ret
= btrfs_delete_delayed_items(trans
, path
, node
->root
, node
);
1140 ret
= btrfs_update_delayed_inode(trans
, node
->root
, path
, node
);
1145 * Called when committing the transaction.
1146 * Returns 0 on success.
1147 * Returns < 0 on error and returns with an aborted transaction with any
1148 * outstanding delayed items cleaned up.
1150 static int __btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1151 struct btrfs_root
*root
, int nr
)
1153 struct btrfs_delayed_root
*delayed_root
;
1154 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1155 struct btrfs_path
*path
;
1156 struct btrfs_block_rsv
*block_rsv
;
1158 bool count
= (nr
> 0);
1163 path
= btrfs_alloc_path();
1166 path
->leave_spinning
= 1;
1168 block_rsv
= trans
->block_rsv
;
1169 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1171 delayed_root
= btrfs_get_delayed_root(root
);
1173 curr_node
= btrfs_first_delayed_node(delayed_root
);
1174 while (curr_node
&& (!count
|| (count
&& nr
--))) {
1175 ret
= __btrfs_commit_inode_delayed_items(trans
, path
,
1178 btrfs_release_delayed_node(curr_node
);
1180 btrfs_abort_transaction(trans
, root
, ret
);
1184 prev_node
= curr_node
;
1185 curr_node
= btrfs_next_delayed_node(curr_node
);
1186 btrfs_release_delayed_node(prev_node
);
1190 btrfs_release_delayed_node(curr_node
);
1191 btrfs_free_path(path
);
1192 trans
->block_rsv
= block_rsv
;
1197 int btrfs_run_delayed_items(struct btrfs_trans_handle
*trans
,
1198 struct btrfs_root
*root
)
1200 return __btrfs_run_delayed_items(trans
, root
, -1);
1203 int btrfs_run_delayed_items_nr(struct btrfs_trans_handle
*trans
,
1204 struct btrfs_root
*root
, int nr
)
1206 return __btrfs_run_delayed_items(trans
, root
, nr
);
1209 int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle
*trans
,
1210 struct inode
*inode
)
1212 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1213 struct btrfs_path
*path
;
1214 struct btrfs_block_rsv
*block_rsv
;
1220 mutex_lock(&delayed_node
->mutex
);
1221 if (!delayed_node
->count
) {
1222 mutex_unlock(&delayed_node
->mutex
);
1223 btrfs_release_delayed_node(delayed_node
);
1226 mutex_unlock(&delayed_node
->mutex
);
1228 path
= btrfs_alloc_path();
1231 path
->leave_spinning
= 1;
1233 block_rsv
= trans
->block_rsv
;
1234 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1236 ret
= __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1238 btrfs_release_delayed_node(delayed_node
);
1239 btrfs_free_path(path
);
1240 trans
->block_rsv
= block_rsv
;
1245 int btrfs_commit_inode_delayed_inode(struct inode
*inode
)
1247 struct btrfs_trans_handle
*trans
;
1248 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1249 struct btrfs_path
*path
;
1250 struct btrfs_block_rsv
*block_rsv
;
1256 mutex_lock(&delayed_node
->mutex
);
1257 if (!delayed_node
->inode_dirty
) {
1258 mutex_unlock(&delayed_node
->mutex
);
1259 btrfs_release_delayed_node(delayed_node
);
1262 mutex_unlock(&delayed_node
->mutex
);
1264 trans
= btrfs_join_transaction(delayed_node
->root
);
1265 if (IS_ERR(trans
)) {
1266 ret
= PTR_ERR(trans
);
1270 path
= btrfs_alloc_path();
1275 path
->leave_spinning
= 1;
1277 block_rsv
= trans
->block_rsv
;
1278 trans
->block_rsv
= &delayed_node
->root
->fs_info
->delayed_block_rsv
;
1280 mutex_lock(&delayed_node
->mutex
);
1281 if (delayed_node
->inode_dirty
)
1282 ret
= __btrfs_update_delayed_inode(trans
, delayed_node
->root
,
1283 path
, delayed_node
);
1286 mutex_unlock(&delayed_node
->mutex
);
1288 btrfs_free_path(path
);
1289 trans
->block_rsv
= block_rsv
;
1291 btrfs_end_transaction(trans
, delayed_node
->root
);
1292 btrfs_btree_balance_dirty(delayed_node
->root
);
1294 btrfs_release_delayed_node(delayed_node
);
1299 void btrfs_remove_delayed_node(struct inode
*inode
)
1301 struct btrfs_delayed_node
*delayed_node
;
1303 delayed_node
= ACCESS_ONCE(BTRFS_I(inode
)->delayed_node
);
1307 BTRFS_I(inode
)->delayed_node
= NULL
;
1308 btrfs_release_delayed_node(delayed_node
);
1311 struct btrfs_async_delayed_work
{
1312 struct btrfs_delayed_root
*delayed_root
;
1314 struct btrfs_work work
;
1317 static void btrfs_async_run_delayed_root(struct btrfs_work
*work
)
1319 struct btrfs_async_delayed_work
*async_work
;
1320 struct btrfs_delayed_root
*delayed_root
;
1321 struct btrfs_trans_handle
*trans
;
1322 struct btrfs_path
*path
;
1323 struct btrfs_delayed_node
*delayed_node
= NULL
;
1324 struct btrfs_root
*root
;
1325 struct btrfs_block_rsv
*block_rsv
;
1328 async_work
= container_of(work
, struct btrfs_async_delayed_work
, work
);
1329 delayed_root
= async_work
->delayed_root
;
1331 path
= btrfs_alloc_path();
1336 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
/ 2)
1339 delayed_node
= btrfs_first_prepared_delayed_node(delayed_root
);
1343 path
->leave_spinning
= 1;
1344 root
= delayed_node
->root
;
1346 trans
= btrfs_join_transaction(root
);
1350 block_rsv
= trans
->block_rsv
;
1351 trans
->block_rsv
= &root
->fs_info
->delayed_block_rsv
;
1353 __btrfs_commit_inode_delayed_items(trans
, path
, delayed_node
);
1355 * Maybe new delayed items have been inserted, so we need requeue
1356 * the work. Besides that, we must dequeue the empty delayed nodes
1357 * to avoid the race between delayed items balance and the worker.
1358 * The race like this:
1359 * Task1 Worker thread
1360 * count == 0, needn't requeue
1361 * also needn't insert the
1362 * delayed node into prepare
1364 * add lots of delayed items
1365 * queue the delayed node
1366 * already in the list,
1367 * and not in the prepare
1368 * list, it means the delayed
1369 * node is being dealt with
1371 * do delayed items balance
1372 * the delayed node is being
1373 * dealt with by the worker
1375 * the worker goto idle.
1376 * Task1 will sleep until the transaction is commited.
1378 mutex_lock(&delayed_node
->mutex
);
1379 btrfs_dequeue_delayed_node(root
->fs_info
->delayed_root
, delayed_node
);
1380 mutex_unlock(&delayed_node
->mutex
);
1382 trans
->block_rsv
= block_rsv
;
1383 btrfs_end_transaction_dmeta(trans
, root
);
1384 btrfs_btree_balance_dirty_nodelay(root
);
1387 btrfs_release_path(path
);
1390 btrfs_release_prepared_delayed_node(delayed_node
);
1391 if (async_work
->nr
== 0 || total_done
< async_work
->nr
)
1395 btrfs_free_path(path
);
1397 wake_up(&delayed_root
->wait
);
1402 static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root
*delayed_root
,
1403 struct btrfs_root
*root
, int nr
)
1405 struct btrfs_async_delayed_work
*async_work
;
1407 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1410 async_work
= kmalloc(sizeof(*async_work
), GFP_NOFS
);
1414 async_work
->delayed_root
= delayed_root
;
1415 async_work
->work
.func
= btrfs_async_run_delayed_root
;
1416 async_work
->work
.flags
= 0;
1417 async_work
->nr
= nr
;
1419 btrfs_queue_worker(&root
->fs_info
->delayed_workers
, &async_work
->work
);
1423 void btrfs_assert_delayed_root_empty(struct btrfs_root
*root
)
1425 struct btrfs_delayed_root
*delayed_root
;
1426 delayed_root
= btrfs_get_delayed_root(root
);
1427 WARN_ON(btrfs_first_delayed_node(delayed_root
));
1430 static int refs_newer(struct btrfs_delayed_root
*delayed_root
,
1433 int val
= atomic_read(&delayed_root
->items_seq
);
1435 if (val
< seq
|| val
>= seq
+ count
)
1440 void btrfs_balance_delayed_items(struct btrfs_root
*root
)
1442 struct btrfs_delayed_root
*delayed_root
;
1445 delayed_root
= btrfs_get_delayed_root(root
);
1447 if (atomic_read(&delayed_root
->items
) < BTRFS_DELAYED_BACKGROUND
)
1450 seq
= atomic_read(&delayed_root
->items_seq
);
1452 if (atomic_read(&delayed_root
->items
) >= BTRFS_DELAYED_WRITEBACK
) {
1454 DEFINE_WAIT(__wait
);
1456 ret
= btrfs_wq_run_delayed_node(delayed_root
, root
, 0);
1461 prepare_to_wait(&delayed_root
->wait
, &__wait
,
1462 TASK_INTERRUPTIBLE
);
1464 if (refs_newer(delayed_root
, seq
,
1465 BTRFS_DELAYED_BATCH
) ||
1466 atomic_read(&delayed_root
->items
) <
1467 BTRFS_DELAYED_BACKGROUND
) {
1470 if (!signal_pending(current
))
1475 finish_wait(&delayed_root
->wait
, &__wait
);
1478 btrfs_wq_run_delayed_node(delayed_root
, root
, BTRFS_DELAYED_BATCH
);
1481 /* Will return 0 or -ENOMEM */
1482 int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1483 struct btrfs_root
*root
, const char *name
,
1484 int name_len
, struct inode
*dir
,
1485 struct btrfs_disk_key
*disk_key
, u8 type
,
1488 struct btrfs_delayed_node
*delayed_node
;
1489 struct btrfs_delayed_item
*delayed_item
;
1490 struct btrfs_dir_item
*dir_item
;
1493 delayed_node
= btrfs_get_or_create_delayed_node(dir
);
1494 if (IS_ERR(delayed_node
))
1495 return PTR_ERR(delayed_node
);
1497 delayed_item
= btrfs_alloc_delayed_item(sizeof(*dir_item
) + name_len
);
1498 if (!delayed_item
) {
1503 delayed_item
->key
.objectid
= btrfs_ino(dir
);
1504 btrfs_set_key_type(&delayed_item
->key
, BTRFS_DIR_INDEX_KEY
);
1505 delayed_item
->key
.offset
= index
;
1507 dir_item
= (struct btrfs_dir_item
*)delayed_item
->data
;
1508 dir_item
->location
= *disk_key
;
1509 dir_item
->transid
= cpu_to_le64(trans
->transid
);
1510 dir_item
->data_len
= 0;
1511 dir_item
->name_len
= cpu_to_le16(name_len
);
1512 dir_item
->type
= type
;
1513 memcpy((char *)(dir_item
+ 1), name
, name_len
);
1515 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, delayed_item
);
1517 * we have reserved enough space when we start a new transaction,
1518 * so reserving metadata failure is impossible
1523 mutex_lock(&delayed_node
->mutex
);
1524 ret
= __btrfs_add_delayed_insertion_item(delayed_node
, delayed_item
);
1525 if (unlikely(ret
)) {
1526 printk(KERN_ERR
"err add delayed dir index item(name: %s) into "
1527 "the insertion tree of the delayed node"
1528 "(root id: %llu, inode id: %llu, errno: %d)\n",
1530 (unsigned long long)delayed_node
->root
->objectid
,
1531 (unsigned long long)delayed_node
->inode_id
,
1535 mutex_unlock(&delayed_node
->mutex
);
1538 btrfs_release_delayed_node(delayed_node
);
1542 static int btrfs_delete_delayed_insertion_item(struct btrfs_root
*root
,
1543 struct btrfs_delayed_node
*node
,
1544 struct btrfs_key
*key
)
1546 struct btrfs_delayed_item
*item
;
1548 mutex_lock(&node
->mutex
);
1549 item
= __btrfs_lookup_delayed_insertion_item(node
, key
);
1551 mutex_unlock(&node
->mutex
);
1555 btrfs_delayed_item_release_metadata(root
, item
);
1556 btrfs_release_delayed_item(item
);
1557 mutex_unlock(&node
->mutex
);
1561 int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle
*trans
,
1562 struct btrfs_root
*root
, struct inode
*dir
,
1565 struct btrfs_delayed_node
*node
;
1566 struct btrfs_delayed_item
*item
;
1567 struct btrfs_key item_key
;
1570 node
= btrfs_get_or_create_delayed_node(dir
);
1572 return PTR_ERR(node
);
1574 item_key
.objectid
= btrfs_ino(dir
);
1575 btrfs_set_key_type(&item_key
, BTRFS_DIR_INDEX_KEY
);
1576 item_key
.offset
= index
;
1578 ret
= btrfs_delete_delayed_insertion_item(root
, node
, &item_key
);
1582 item
= btrfs_alloc_delayed_item(0);
1588 item
->key
= item_key
;
1590 ret
= btrfs_delayed_item_reserve_metadata(trans
, root
, item
);
1592 * we have reserved enough space when we start a new transaction,
1593 * so reserving metadata failure is impossible.
1597 mutex_lock(&node
->mutex
);
1598 ret
= __btrfs_add_delayed_deletion_item(node
, item
);
1599 if (unlikely(ret
)) {
1600 printk(KERN_ERR
"err add delayed dir index item(index: %llu) "
1601 "into the deletion tree of the delayed node"
1602 "(root id: %llu, inode id: %llu, errno: %d)\n",
1603 (unsigned long long)index
,
1604 (unsigned long long)node
->root
->objectid
,
1605 (unsigned long long)node
->inode_id
,
1609 mutex_unlock(&node
->mutex
);
1611 btrfs_release_delayed_node(node
);
1615 int btrfs_inode_delayed_dir_index_count(struct inode
*inode
)
1617 struct btrfs_delayed_node
*delayed_node
= btrfs_get_delayed_node(inode
);
1623 * Since we have held i_mutex of this directory, it is impossible that
1624 * a new directory index is added into the delayed node and index_cnt
1625 * is updated now. So we needn't lock the delayed node.
1627 if (!delayed_node
->index_cnt
) {
1628 btrfs_release_delayed_node(delayed_node
);
1632 BTRFS_I(inode
)->index_cnt
= delayed_node
->index_cnt
;
1633 btrfs_release_delayed_node(delayed_node
);
1637 void btrfs_get_delayed_items(struct inode
*inode
, struct list_head
*ins_list
,
1638 struct list_head
*del_list
)
1640 struct btrfs_delayed_node
*delayed_node
;
1641 struct btrfs_delayed_item
*item
;
1643 delayed_node
= btrfs_get_delayed_node(inode
);
1647 mutex_lock(&delayed_node
->mutex
);
1648 item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1650 atomic_inc(&item
->refs
);
1651 list_add_tail(&item
->readdir_list
, ins_list
);
1652 item
= __btrfs_next_delayed_item(item
);
1655 item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1657 atomic_inc(&item
->refs
);
1658 list_add_tail(&item
->readdir_list
, del_list
);
1659 item
= __btrfs_next_delayed_item(item
);
1661 mutex_unlock(&delayed_node
->mutex
);
1663 * This delayed node is still cached in the btrfs inode, so refs
1664 * must be > 1 now, and we needn't check it is going to be freed
1667 * Besides that, this function is used to read dir, we do not
1668 * insert/delete delayed items in this period. So we also needn't
1669 * requeue or dequeue this delayed node.
1671 atomic_dec(&delayed_node
->refs
);
1674 void btrfs_put_delayed_items(struct list_head
*ins_list
,
1675 struct list_head
*del_list
)
1677 struct btrfs_delayed_item
*curr
, *next
;
1679 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1680 list_del(&curr
->readdir_list
);
1681 if (atomic_dec_and_test(&curr
->refs
))
1685 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1686 list_del(&curr
->readdir_list
);
1687 if (atomic_dec_and_test(&curr
->refs
))
1692 int btrfs_should_delete_dir_index(struct list_head
*del_list
,
1695 struct btrfs_delayed_item
*curr
, *next
;
1698 if (list_empty(del_list
))
1701 list_for_each_entry_safe(curr
, next
, del_list
, readdir_list
) {
1702 if (curr
->key
.offset
> index
)
1705 list_del(&curr
->readdir_list
);
1706 ret
= (curr
->key
.offset
== index
);
1708 if (atomic_dec_and_test(&curr
->refs
))
1720 * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree
1723 int btrfs_readdir_delayed_dir_index(struct file
*filp
, void *dirent
,
1725 struct list_head
*ins_list
)
1727 struct btrfs_dir_item
*di
;
1728 struct btrfs_delayed_item
*curr
, *next
;
1729 struct btrfs_key location
;
1733 unsigned char d_type
;
1735 if (list_empty(ins_list
))
1739 * Changing the data of the delayed item is impossible. So
1740 * we needn't lock them. And we have held i_mutex of the
1741 * directory, nobody can delete any directory indexes now.
1743 list_for_each_entry_safe(curr
, next
, ins_list
, readdir_list
) {
1744 list_del(&curr
->readdir_list
);
1746 if (curr
->key
.offset
< filp
->f_pos
) {
1747 if (atomic_dec_and_test(&curr
->refs
))
1752 filp
->f_pos
= curr
->key
.offset
;
1754 di
= (struct btrfs_dir_item
*)curr
->data
;
1755 name
= (char *)(di
+ 1);
1756 name_len
= le16_to_cpu(di
->name_len
);
1758 d_type
= btrfs_filetype_table
[di
->type
];
1759 btrfs_disk_key_to_cpu(&location
, &di
->location
);
1761 over
= filldir(dirent
, name
, name_len
, curr
->key
.offset
,
1762 location
.objectid
, d_type
);
1764 if (atomic_dec_and_test(&curr
->refs
))
1773 BTRFS_SETGET_STACK_FUNCS(stack_inode_generation
, struct btrfs_inode_item
,
1775 BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence
, struct btrfs_inode_item
,
1777 BTRFS_SETGET_STACK_FUNCS(stack_inode_transid
, struct btrfs_inode_item
,
1779 BTRFS_SETGET_STACK_FUNCS(stack_inode_size
, struct btrfs_inode_item
, size
, 64);
1780 BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes
, struct btrfs_inode_item
,
1782 BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group
, struct btrfs_inode_item
,
1784 BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink
, struct btrfs_inode_item
, nlink
, 32);
1785 BTRFS_SETGET_STACK_FUNCS(stack_inode_uid
, struct btrfs_inode_item
, uid
, 32);
1786 BTRFS_SETGET_STACK_FUNCS(stack_inode_gid
, struct btrfs_inode_item
, gid
, 32);
1787 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode
, struct btrfs_inode_item
, mode
, 32);
1788 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev
, struct btrfs_inode_item
, rdev
, 64);
1789 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags
, struct btrfs_inode_item
, flags
, 64);
1791 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec
, struct btrfs_timespec
, sec
, 64);
1792 BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec
, struct btrfs_timespec
, nsec
, 32);
1794 static void fill_stack_inode_item(struct btrfs_trans_handle
*trans
,
1795 struct btrfs_inode_item
*inode_item
,
1796 struct inode
*inode
)
1798 btrfs_set_stack_inode_uid(inode_item
, i_uid_read(inode
));
1799 btrfs_set_stack_inode_gid(inode_item
, i_gid_read(inode
));
1800 btrfs_set_stack_inode_size(inode_item
, BTRFS_I(inode
)->disk_i_size
);
1801 btrfs_set_stack_inode_mode(inode_item
, inode
->i_mode
);
1802 btrfs_set_stack_inode_nlink(inode_item
, inode
->i_nlink
);
1803 btrfs_set_stack_inode_nbytes(inode_item
, inode_get_bytes(inode
));
1804 btrfs_set_stack_inode_generation(inode_item
,
1805 BTRFS_I(inode
)->generation
);
1806 btrfs_set_stack_inode_sequence(inode_item
, inode
->i_version
);
1807 btrfs_set_stack_inode_transid(inode_item
, trans
->transid
);
1808 btrfs_set_stack_inode_rdev(inode_item
, inode
->i_rdev
);
1809 btrfs_set_stack_inode_flags(inode_item
, BTRFS_I(inode
)->flags
);
1810 btrfs_set_stack_inode_block_group(inode_item
, 0);
1812 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item
),
1813 inode
->i_atime
.tv_sec
);
1814 btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item
),
1815 inode
->i_atime
.tv_nsec
);
1817 btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item
),
1818 inode
->i_mtime
.tv_sec
);
1819 btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item
),
1820 inode
->i_mtime
.tv_nsec
);
1822 btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item
),
1823 inode
->i_ctime
.tv_sec
);
1824 btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item
),
1825 inode
->i_ctime
.tv_nsec
);
1828 int btrfs_fill_inode(struct inode
*inode
, u32
*rdev
)
1830 struct btrfs_delayed_node
*delayed_node
;
1831 struct btrfs_inode_item
*inode_item
;
1832 struct btrfs_timespec
*tspec
;
1834 delayed_node
= btrfs_get_delayed_node(inode
);
1838 mutex_lock(&delayed_node
->mutex
);
1839 if (!delayed_node
->inode_dirty
) {
1840 mutex_unlock(&delayed_node
->mutex
);
1841 btrfs_release_delayed_node(delayed_node
);
1845 inode_item
= &delayed_node
->inode_item
;
1847 i_uid_write(inode
, btrfs_stack_inode_uid(inode_item
));
1848 i_gid_write(inode
, btrfs_stack_inode_gid(inode_item
));
1849 btrfs_i_size_write(inode
, btrfs_stack_inode_size(inode_item
));
1850 inode
->i_mode
= btrfs_stack_inode_mode(inode_item
);
1851 set_nlink(inode
, btrfs_stack_inode_nlink(inode_item
));
1852 inode_set_bytes(inode
, btrfs_stack_inode_nbytes(inode_item
));
1853 BTRFS_I(inode
)->generation
= btrfs_stack_inode_generation(inode_item
);
1854 inode
->i_version
= btrfs_stack_inode_sequence(inode_item
);
1856 *rdev
= btrfs_stack_inode_rdev(inode_item
);
1857 BTRFS_I(inode
)->flags
= btrfs_stack_inode_flags(inode_item
);
1859 tspec
= btrfs_inode_atime(inode_item
);
1860 inode
->i_atime
.tv_sec
= btrfs_stack_timespec_sec(tspec
);
1861 inode
->i_atime
.tv_nsec
= btrfs_stack_timespec_nsec(tspec
);
1863 tspec
= btrfs_inode_mtime(inode_item
);
1864 inode
->i_mtime
.tv_sec
= btrfs_stack_timespec_sec(tspec
);
1865 inode
->i_mtime
.tv_nsec
= btrfs_stack_timespec_nsec(tspec
);
1867 tspec
= btrfs_inode_ctime(inode_item
);
1868 inode
->i_ctime
.tv_sec
= btrfs_stack_timespec_sec(tspec
);
1869 inode
->i_ctime
.tv_nsec
= btrfs_stack_timespec_nsec(tspec
);
1871 inode
->i_generation
= BTRFS_I(inode
)->generation
;
1872 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
1874 mutex_unlock(&delayed_node
->mutex
);
1875 btrfs_release_delayed_node(delayed_node
);
1879 int btrfs_delayed_update_inode(struct btrfs_trans_handle
*trans
,
1880 struct btrfs_root
*root
, struct inode
*inode
)
1882 struct btrfs_delayed_node
*delayed_node
;
1885 delayed_node
= btrfs_get_or_create_delayed_node(inode
);
1886 if (IS_ERR(delayed_node
))
1887 return PTR_ERR(delayed_node
);
1889 mutex_lock(&delayed_node
->mutex
);
1890 if (delayed_node
->inode_dirty
) {
1891 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1895 ret
= btrfs_delayed_inode_reserve_metadata(trans
, root
, inode
,
1900 fill_stack_inode_item(trans
, &delayed_node
->inode_item
, inode
);
1901 delayed_node
->inode_dirty
= 1;
1902 delayed_node
->count
++;
1903 atomic_inc(&root
->fs_info
->delayed_root
->items
);
1905 mutex_unlock(&delayed_node
->mutex
);
1906 btrfs_release_delayed_node(delayed_node
);
1910 static void __btrfs_kill_delayed_node(struct btrfs_delayed_node
*delayed_node
)
1912 struct btrfs_root
*root
= delayed_node
->root
;
1913 struct btrfs_delayed_item
*curr_item
, *prev_item
;
1915 mutex_lock(&delayed_node
->mutex
);
1916 curr_item
= __btrfs_first_delayed_insertion_item(delayed_node
);
1918 btrfs_delayed_item_release_metadata(root
, curr_item
);
1919 prev_item
= curr_item
;
1920 curr_item
= __btrfs_next_delayed_item(prev_item
);
1921 btrfs_release_delayed_item(prev_item
);
1924 curr_item
= __btrfs_first_delayed_deletion_item(delayed_node
);
1926 btrfs_delayed_item_release_metadata(root
, curr_item
);
1927 prev_item
= curr_item
;
1928 curr_item
= __btrfs_next_delayed_item(prev_item
);
1929 btrfs_release_delayed_item(prev_item
);
1932 if (delayed_node
->inode_dirty
) {
1933 btrfs_delayed_inode_release_metadata(root
, delayed_node
);
1934 btrfs_release_delayed_inode(delayed_node
);
1936 mutex_unlock(&delayed_node
->mutex
);
1939 void btrfs_kill_delayed_inode_items(struct inode
*inode
)
1941 struct btrfs_delayed_node
*delayed_node
;
1943 delayed_node
= btrfs_get_delayed_node(inode
);
1947 __btrfs_kill_delayed_node(delayed_node
);
1948 btrfs_release_delayed_node(delayed_node
);
1951 void btrfs_kill_all_delayed_nodes(struct btrfs_root
*root
)
1954 struct btrfs_delayed_node
*delayed_nodes
[8];
1958 spin_lock(&root
->inode_lock
);
1959 n
= radix_tree_gang_lookup(&root
->delayed_nodes_tree
,
1960 (void **)delayed_nodes
, inode_id
,
1961 ARRAY_SIZE(delayed_nodes
));
1963 spin_unlock(&root
->inode_lock
);
1967 inode_id
= delayed_nodes
[n
- 1]->inode_id
+ 1;
1969 for (i
= 0; i
< n
; i
++)
1970 atomic_inc(&delayed_nodes
[i
]->refs
);
1971 spin_unlock(&root
->inode_lock
);
1973 for (i
= 0; i
< n
; i
++) {
1974 __btrfs_kill_delayed_node(delayed_nodes
[i
]);
1975 btrfs_release_delayed_node(delayed_nodes
[i
]);
1980 void btrfs_destroy_delayed_inodes(struct btrfs_root
*root
)
1982 struct btrfs_delayed_root
*delayed_root
;
1983 struct btrfs_delayed_node
*curr_node
, *prev_node
;
1985 delayed_root
= btrfs_get_delayed_root(root
);
1987 curr_node
= btrfs_first_delayed_node(delayed_root
);
1989 __btrfs_kill_delayed_node(curr_node
);
1991 prev_node
= curr_node
;
1992 curr_node
= btrfs_next_delayed_node(curr_node
);
1993 btrfs_release_delayed_node(prev_node
);