2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
25 #include "transaction.h"
28 static int total_trans
= 0;
29 extern struct kmem_cache
*btrfs_trans_handle_cachep
;
30 extern struct kmem_cache
*btrfs_transaction_cachep
;
32 static struct workqueue_struct
*trans_wq
;
34 #define BTRFS_ROOT_TRANS_TAG 0
35 #define BTRFS_ROOT_DEFRAG_TAG 1
37 static noinline
void put_transaction(struct btrfs_transaction
*transaction
)
39 WARN_ON(transaction
->use_count
== 0);
40 transaction
->use_count
--;
41 if (transaction
->use_count
== 0) {
42 WARN_ON(total_trans
== 0);
44 list_del_init(&transaction
->list
);
45 memset(transaction
, 0, sizeof(*transaction
));
46 kmem_cache_free(btrfs_transaction_cachep
, transaction
);
50 static noinline
int join_transaction(struct btrfs_root
*root
)
52 struct btrfs_transaction
*cur_trans
;
53 cur_trans
= root
->fs_info
->running_transaction
;
55 cur_trans
= kmem_cache_alloc(btrfs_transaction_cachep
,
59 root
->fs_info
->generation
++;
60 root
->fs_info
->last_alloc
= 0;
61 root
->fs_info
->last_data_alloc
= 0;
62 cur_trans
->num_writers
= 1;
63 cur_trans
->num_joined
= 0;
64 cur_trans
->transid
= root
->fs_info
->generation
;
65 init_waitqueue_head(&cur_trans
->writer_wait
);
66 init_waitqueue_head(&cur_trans
->commit_wait
);
67 cur_trans
->in_commit
= 0;
68 cur_trans
->use_count
= 1;
69 cur_trans
->commit_done
= 0;
70 cur_trans
->start_time
= get_seconds();
71 INIT_LIST_HEAD(&cur_trans
->pending_snapshots
);
72 list_add_tail(&cur_trans
->list
, &root
->fs_info
->trans_list
);
73 btrfs_ordered_inode_tree_init(&cur_trans
->ordered_inode_tree
);
74 extent_io_tree_init(&cur_trans
->dirty_pages
,
75 root
->fs_info
->btree_inode
->i_mapping
,
77 spin_lock(&root
->fs_info
->new_trans_lock
);
78 root
->fs_info
->running_transaction
= cur_trans
;
79 spin_unlock(&root
->fs_info
->new_trans_lock
);
81 cur_trans
->num_writers
++;
82 cur_trans
->num_joined
++;
88 static noinline
int record_root_in_trans(struct btrfs_root
*root
)
90 u64 running_trans_id
= root
->fs_info
->running_transaction
->transid
;
91 if (root
->ref_cows
&& root
->last_trans
< running_trans_id
) {
92 WARN_ON(root
== root
->fs_info
->extent_root
);
93 if (root
->root_item
.refs
!= 0) {
94 radix_tree_tag_set(&root
->fs_info
->fs_roots_radix
,
95 (unsigned long)root
->root_key
.objectid
,
96 BTRFS_ROOT_TRANS_TAG
);
97 radix_tree_tag_set(&root
->fs_info
->fs_roots_radix
,
98 (unsigned long)root
->root_key
.objectid
,
99 BTRFS_ROOT_DEFRAG_TAG
);
100 root
->commit_root
= btrfs_root_node(root
);
104 root
->last_trans
= running_trans_id
;
109 struct btrfs_trans_handle
*btrfs_start_transaction(struct btrfs_root
*root
,
112 struct btrfs_trans_handle
*h
=
113 kmem_cache_alloc(btrfs_trans_handle_cachep
, GFP_NOFS
);
116 mutex_lock(&root
->fs_info
->trans_mutex
);
117 ret
= join_transaction(root
);
120 record_root_in_trans(root
);
121 h
->transid
= root
->fs_info
->running_transaction
->transid
;
122 h
->transaction
= root
->fs_info
->running_transaction
;
123 h
->blocks_reserved
= num_blocks
;
125 h
->block_group
= NULL
;
126 h
->alloc_exclude_nr
= 0;
127 h
->alloc_exclude_start
= 0;
128 root
->fs_info
->running_transaction
->use_count
++;
129 mutex_unlock(&root
->fs_info
->trans_mutex
);
133 int btrfs_end_transaction(struct btrfs_trans_handle
*trans
,
134 struct btrfs_root
*root
)
136 struct btrfs_transaction
*cur_trans
;
138 mutex_lock(&root
->fs_info
->trans_mutex
);
139 cur_trans
= root
->fs_info
->running_transaction
;
140 WARN_ON(cur_trans
!= trans
->transaction
);
141 WARN_ON(cur_trans
->num_writers
< 1);
142 cur_trans
->num_writers
--;
143 if (waitqueue_active(&cur_trans
->writer_wait
))
144 wake_up(&cur_trans
->writer_wait
);
145 put_transaction(cur_trans
);
146 mutex_unlock(&root
->fs_info
->trans_mutex
);
147 memset(trans
, 0, sizeof(*trans
));
148 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
153 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle
*trans
,
154 struct btrfs_root
*root
)
159 struct extent_io_tree
*dirty_pages
;
161 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
166 if (!trans
|| !trans
->transaction
) {
167 return filemap_write_and_wait(btree_inode
->i_mapping
);
169 dirty_pages
= &trans
->transaction
->dirty_pages
;
171 ret
= find_first_extent_bit(dirty_pages
, 0, &start
, &end
,
175 clear_extent_dirty(dirty_pages
, start
, end
, GFP_NOFS
);
176 while(start
<= end
) {
177 index
= start
>> PAGE_CACHE_SHIFT
;
178 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
179 page
= find_lock_page(btree_inode
->i_mapping
, index
);
182 if (PageWriteback(page
)) {
184 wait_on_page_writeback(page
);
187 page_cache_release(page
);
191 err
= write_one_page(page
, 0);
194 page_cache_release(page
);
197 err
= filemap_fdatawait(btree_inode
->i_mapping
);
203 static int update_cowonly_root(struct btrfs_trans_handle
*trans
,
204 struct btrfs_root
*root
)
208 struct btrfs_root
*tree_root
= root
->fs_info
->tree_root
;
210 btrfs_write_dirty_block_groups(trans
, root
);
212 old_root_bytenr
= btrfs_root_bytenr(&root
->root_item
);
213 if (old_root_bytenr
== root
->node
->start
)
215 btrfs_set_root_bytenr(&root
->root_item
,
217 btrfs_set_root_level(&root
->root_item
,
218 btrfs_header_level(root
->node
));
219 ret
= btrfs_update_root(trans
, tree_root
,
223 btrfs_write_dirty_block_groups(trans
, root
);
228 int btrfs_commit_tree_roots(struct btrfs_trans_handle
*trans
,
229 struct btrfs_root
*root
)
231 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
232 struct list_head
*next
;
234 while(!list_empty(&fs_info
->dirty_cowonly_roots
)) {
235 next
= fs_info
->dirty_cowonly_roots
.next
;
237 root
= list_entry(next
, struct btrfs_root
, dirty_list
);
238 update_cowonly_root(trans
, root
);
243 static noinline
int wait_for_commit(struct btrfs_root
*root
,
244 struct btrfs_transaction
*commit
)
247 mutex_lock(&root
->fs_info
->trans_mutex
);
248 while(!commit
->commit_done
) {
249 prepare_to_wait(&commit
->commit_wait
, &wait
,
250 TASK_UNINTERRUPTIBLE
);
251 if (commit
->commit_done
)
253 mutex_unlock(&root
->fs_info
->trans_mutex
);
255 mutex_lock(&root
->fs_info
->trans_mutex
);
257 mutex_unlock(&root
->fs_info
->trans_mutex
);
258 finish_wait(&commit
->commit_wait
, &wait
);
263 struct list_head list
;
264 struct btrfs_root
*root
;
265 struct btrfs_root
*latest_root
;
268 int btrfs_add_dead_root(struct btrfs_root
*root
,
269 struct btrfs_root
*latest
,
270 struct list_head
*dead_list
)
272 struct dirty_root
*dirty
;
274 dirty
= kmalloc(sizeof(*dirty
), GFP_NOFS
);
278 dirty
->latest_root
= latest
;
279 list_add(&dirty
->list
, dead_list
);
283 static noinline
int add_dirty_roots(struct btrfs_trans_handle
*trans
,
284 struct radix_tree_root
*radix
,
285 struct list_head
*list
)
287 struct dirty_root
*dirty
;
288 struct btrfs_root
*gang
[8];
289 struct btrfs_root
*root
;
296 ret
= radix_tree_gang_lookup_tag(radix
, (void **)gang
, 0,
298 BTRFS_ROOT_TRANS_TAG
);
301 for (i
= 0; i
< ret
; i
++) {
303 radix_tree_tag_clear(radix
,
304 (unsigned long)root
->root_key
.objectid
,
305 BTRFS_ROOT_TRANS_TAG
);
306 if (root
->commit_root
== root
->node
) {
307 WARN_ON(root
->node
->start
!=
308 btrfs_root_bytenr(&root
->root_item
));
309 free_extent_buffer(root
->commit_root
);
310 root
->commit_root
= NULL
;
312 /* make sure to update the root on disk
313 * so we get any updates to the block used
316 err
= btrfs_update_root(trans
,
317 root
->fs_info
->tree_root
,
322 dirty
= kmalloc(sizeof(*dirty
), GFP_NOFS
);
324 dirty
->root
= kmalloc(sizeof(*dirty
->root
), GFP_NOFS
);
325 BUG_ON(!dirty
->root
);
327 memset(&root
->root_item
.drop_progress
, 0,
328 sizeof(struct btrfs_disk_key
));
329 root
->root_item
.drop_level
= 0;
331 memcpy(dirty
->root
, root
, sizeof(*root
));
332 dirty
->root
->node
= root
->commit_root
;
333 dirty
->latest_root
= root
;
334 root
->commit_root
= NULL
;
336 root
->root_key
.offset
= root
->fs_info
->generation
;
337 btrfs_set_root_bytenr(&root
->root_item
,
339 btrfs_set_root_level(&root
->root_item
,
340 btrfs_header_level(root
->node
));
341 err
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
,
347 refs
= btrfs_root_refs(&dirty
->root
->root_item
);
348 btrfs_set_root_refs(&dirty
->root
->root_item
, refs
- 1);
349 err
= btrfs_update_root(trans
, root
->fs_info
->tree_root
,
350 &dirty
->root
->root_key
,
351 &dirty
->root
->root_item
);
355 list_add(&dirty
->list
, list
);
366 int btrfs_defrag_root(struct btrfs_root
*root
, int cacheonly
)
368 struct btrfs_fs_info
*info
= root
->fs_info
;
370 struct btrfs_trans_handle
*trans
;
374 if (root
->defrag_running
)
376 trans
= btrfs_start_transaction(root
, 1);
378 root
->defrag_running
= 1;
379 ret
= btrfs_defrag_leaves(trans
, root
, cacheonly
);
380 nr
= trans
->blocks_used
;
381 btrfs_end_transaction(trans
, root
);
382 btrfs_btree_balance_dirty(info
->tree_root
, nr
);
385 trans
= btrfs_start_transaction(root
, 1);
389 root
->defrag_running
= 0;
391 radix_tree_tag_clear(&info
->fs_roots_radix
,
392 (unsigned long)root
->root_key
.objectid
,
393 BTRFS_ROOT_DEFRAG_TAG
);
394 btrfs_end_transaction(trans
, root
);
398 int btrfs_defrag_dirty_roots(struct btrfs_fs_info
*info
)
400 struct btrfs_root
*gang
[1];
401 struct btrfs_root
*root
;
408 ret
= radix_tree_gang_lookup_tag(&info
->fs_roots_radix
,
411 BTRFS_ROOT_DEFRAG_TAG
);
414 for (i
= 0; i
< ret
; i
++) {
416 last
= root
->root_key
.objectid
+ 1;
417 btrfs_defrag_root(root
, 1);
420 btrfs_defrag_root(info
->extent_root
, 1);
424 static noinline
int drop_dirty_roots(struct btrfs_root
*tree_root
,
425 struct list_head
*list
)
427 struct dirty_root
*dirty
;
428 struct btrfs_trans_handle
*trans
;
435 while(!list_empty(list
)) {
436 struct btrfs_root
*root
;
438 dirty
= list_entry(list
->next
, struct dirty_root
, list
);
439 list_del_init(&dirty
->list
);
441 num_bytes
= btrfs_root_used(&dirty
->root
->root_item
);
442 root
= dirty
->latest_root
;
443 atomic_inc(&root
->fs_info
->throttles
);
445 mutex_lock(&root
->fs_info
->drop_mutex
);
447 trans
= btrfs_start_transaction(tree_root
, 1);
448 ret
= btrfs_drop_snapshot(trans
, dirty
->root
);
449 if (ret
!= -EAGAIN
) {
453 err
= btrfs_update_root(trans
,
455 &dirty
->root
->root_key
,
456 &dirty
->root
->root_item
);
459 nr
= trans
->blocks_used
;
460 ret
= btrfs_end_transaction(trans
, tree_root
);
463 mutex_unlock(&root
->fs_info
->drop_mutex
);
464 btrfs_btree_balance_dirty(tree_root
, nr
);
466 mutex_lock(&root
->fs_info
->drop_mutex
);
469 atomic_dec(&root
->fs_info
->throttles
);
471 mutex_lock(&root
->fs_info
->alloc_mutex
);
472 num_bytes
-= btrfs_root_used(&dirty
->root
->root_item
);
473 bytes_used
= btrfs_root_used(&root
->root_item
);
475 record_root_in_trans(root
);
476 btrfs_set_root_used(&root
->root_item
,
477 bytes_used
- num_bytes
);
479 mutex_unlock(&root
->fs_info
->alloc_mutex
);
481 ret
= btrfs_del_root(trans
, tree_root
, &dirty
->root
->root_key
);
486 mutex_unlock(&root
->fs_info
->drop_mutex
);
488 nr
= trans
->blocks_used
;
489 ret
= btrfs_end_transaction(trans
, tree_root
);
492 free_extent_buffer(dirty
->root
->node
);
496 btrfs_btree_balance_dirty(tree_root
, nr
);
502 int btrfs_write_ordered_inodes(struct btrfs_trans_handle
*trans
,
503 struct btrfs_root
*root
)
505 struct btrfs_transaction
*cur_trans
= trans
->transaction
;
507 u64 root_objectid
= 0;
511 atomic_inc(&root
->fs_info
->throttles
);
513 ret
= btrfs_find_first_ordered_inode(
514 &cur_trans
->ordered_inode_tree
,
515 &root_objectid
, &objectid
, &inode
);
519 mutex_unlock(&root
->fs_info
->trans_mutex
);
521 if (S_ISREG(inode
->i_mode
)) {
522 atomic_inc(&BTRFS_I(inode
)->ordered_writeback
);
523 filemap_fdatawrite(inode
->i_mapping
);
524 atomic_dec(&BTRFS_I(inode
)->ordered_writeback
);
528 mutex_lock(&root
->fs_info
->trans_mutex
);
533 ret
= btrfs_find_del_first_ordered_inode(
534 &cur_trans
->ordered_inode_tree
,
535 &root_objectid
, &objectid
, &inode
);
538 mutex_unlock(&root
->fs_info
->trans_mutex
);
540 if (S_ISREG(inode
->i_mode
)) {
541 atomic_inc(&BTRFS_I(inode
)->ordered_writeback
);
542 filemap_write_and_wait(inode
->i_mapping
);
543 atomic_dec(&BTRFS_I(inode
)->ordered_writeback
);
545 atomic_dec(&inode
->i_count
);
548 mutex_lock(&root
->fs_info
->trans_mutex
);
550 atomic_dec(&root
->fs_info
->throttles
);
554 static noinline
int create_pending_snapshot(struct btrfs_trans_handle
*trans
,
555 struct btrfs_fs_info
*fs_info
,
556 struct btrfs_pending_snapshot
*pending
)
558 struct btrfs_key key
;
559 struct btrfs_root_item
*new_root_item
;
560 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
561 struct btrfs_root
*root
= pending
->root
;
562 struct extent_buffer
*tmp
;
563 struct extent_buffer
*old
;
568 new_root_item
= kmalloc(sizeof(*new_root_item
), GFP_NOFS
);
569 if (!new_root_item
) {
573 ret
= btrfs_find_free_objectid(trans
, tree_root
, 0, &objectid
);
577 memcpy(new_root_item
, &root
->root_item
, sizeof(*new_root_item
));
579 key
.objectid
= objectid
;
581 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
583 old
= btrfs_lock_root_node(root
);
584 btrfs_cow_block(trans
, root
, old
, NULL
, 0, &old
);
586 btrfs_copy_root(trans
, root
, old
, &tmp
, objectid
);
587 btrfs_tree_unlock(old
);
588 free_extent_buffer(old
);
590 btrfs_set_root_bytenr(new_root_item
, tmp
->start
);
591 btrfs_set_root_level(new_root_item
, btrfs_header_level(tmp
));
592 ret
= btrfs_insert_root(trans
, root
->fs_info
->tree_root
, &key
,
594 btrfs_tree_unlock(tmp
);
595 free_extent_buffer(tmp
);
600 * insert the directory item
602 key
.offset
= (u64
)-1;
603 namelen
= strlen(pending
->name
);
604 ret
= btrfs_insert_dir_item(trans
, root
->fs_info
->tree_root
,
605 pending
->name
, namelen
,
606 root
->fs_info
->sb
->s_root
->d_inode
->i_ino
,
612 ret
= btrfs_insert_inode_ref(trans
, root
->fs_info
->tree_root
,
613 pending
->name
, strlen(pending
->name
), objectid
,
614 root
->fs_info
->sb
->s_root
->d_inode
->i_ino
);
616 /* Invalidate existing dcache entry for new snapshot. */
617 btrfs_invalidate_dcache_root(root
, pending
->name
, namelen
);
620 kfree(new_root_item
);
624 static noinline
int create_pending_snapshots(struct btrfs_trans_handle
*trans
,
625 struct btrfs_fs_info
*fs_info
)
627 struct btrfs_pending_snapshot
*pending
;
628 struct list_head
*head
= &trans
->transaction
->pending_snapshots
;
631 while(!list_empty(head
)) {
632 pending
= list_entry(head
->next
,
633 struct btrfs_pending_snapshot
, list
);
634 ret
= create_pending_snapshot(trans
, fs_info
, pending
);
636 list_del(&pending
->list
);
637 kfree(pending
->name
);
643 int btrfs_commit_transaction(struct btrfs_trans_handle
*trans
,
644 struct btrfs_root
*root
)
646 unsigned long joined
= 0;
647 unsigned long timeout
= 1;
648 struct btrfs_transaction
*cur_trans
;
649 struct btrfs_transaction
*prev_trans
= NULL
;
650 struct btrfs_root
*chunk_root
= root
->fs_info
->chunk_root
;
651 struct list_head dirty_fs_roots
;
652 struct extent_io_tree
*pinned_copy
;
656 INIT_LIST_HEAD(&dirty_fs_roots
);
658 mutex_lock(&root
->fs_info
->trans_mutex
);
659 if (trans
->transaction
->in_commit
) {
660 cur_trans
= trans
->transaction
;
661 trans
->transaction
->use_count
++;
662 mutex_unlock(&root
->fs_info
->trans_mutex
);
663 btrfs_end_transaction(trans
, root
);
665 ret
= wait_for_commit(root
, cur_trans
);
668 mutex_lock(&root
->fs_info
->trans_mutex
);
669 put_transaction(cur_trans
);
670 mutex_unlock(&root
->fs_info
->trans_mutex
);
675 pinned_copy
= kmalloc(sizeof(*pinned_copy
), GFP_NOFS
);
679 extent_io_tree_init(pinned_copy
,
680 root
->fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
682 trans
->transaction
->in_commit
= 1;
683 cur_trans
= trans
->transaction
;
684 if (cur_trans
->list
.prev
!= &root
->fs_info
->trans_list
) {
685 prev_trans
= list_entry(cur_trans
->list
.prev
,
686 struct btrfs_transaction
, list
);
687 if (!prev_trans
->commit_done
) {
688 prev_trans
->use_count
++;
689 mutex_unlock(&root
->fs_info
->trans_mutex
);
691 wait_for_commit(root
, prev_trans
);
693 mutex_lock(&root
->fs_info
->trans_mutex
);
694 put_transaction(prev_trans
);
699 joined
= cur_trans
->num_joined
;
700 WARN_ON(cur_trans
!= trans
->transaction
);
701 prepare_to_wait(&cur_trans
->writer_wait
, &wait
,
702 TASK_UNINTERRUPTIBLE
);
704 if (cur_trans
->num_writers
> 1)
705 timeout
= MAX_SCHEDULE_TIMEOUT
;
709 mutex_unlock(&root
->fs_info
->trans_mutex
);
711 schedule_timeout(timeout
);
713 mutex_lock(&root
->fs_info
->trans_mutex
);
714 finish_wait(&cur_trans
->writer_wait
, &wait
);
715 ret
= btrfs_write_ordered_inodes(trans
, root
);
717 } while (cur_trans
->num_writers
> 1 ||
718 (cur_trans
->num_joined
!= joined
));
720 ret
= create_pending_snapshots(trans
, root
->fs_info
);
723 WARN_ON(cur_trans
!= trans
->transaction
);
725 ret
= add_dirty_roots(trans
, &root
->fs_info
->fs_roots_radix
,
729 ret
= btrfs_commit_tree_roots(trans
, root
);
732 cur_trans
= root
->fs_info
->running_transaction
;
733 spin_lock(&root
->fs_info
->new_trans_lock
);
734 root
->fs_info
->running_transaction
= NULL
;
735 spin_unlock(&root
->fs_info
->new_trans_lock
);
736 btrfs_set_super_generation(&root
->fs_info
->super_copy
,
738 btrfs_set_super_root(&root
->fs_info
->super_copy
,
739 root
->fs_info
->tree_root
->node
->start
);
740 btrfs_set_super_root_level(&root
->fs_info
->super_copy
,
741 btrfs_header_level(root
->fs_info
->tree_root
->node
));
743 btrfs_set_super_chunk_root(&root
->fs_info
->super_copy
,
744 chunk_root
->node
->start
);
745 btrfs_set_super_chunk_root_level(&root
->fs_info
->super_copy
,
746 btrfs_header_level(chunk_root
->node
));
747 memcpy(&root
->fs_info
->super_for_commit
, &root
->fs_info
->super_copy
,
748 sizeof(root
->fs_info
->super_copy
));
750 btrfs_copy_pinned(root
, pinned_copy
);
752 mutex_unlock(&root
->fs_info
->trans_mutex
);
753 ret
= btrfs_write_and_wait_transaction(trans
, root
);
755 write_ctree_super(trans
, root
);
757 btrfs_finish_extent_commit(trans
, root
, pinned_copy
);
758 mutex_lock(&root
->fs_info
->trans_mutex
);
762 cur_trans
->commit_done
= 1;
763 root
->fs_info
->last_trans_committed
= cur_trans
->transid
;
764 wake_up(&cur_trans
->commit_wait
);
765 put_transaction(cur_trans
);
766 put_transaction(cur_trans
);
768 if (root
->fs_info
->closing
)
769 list_splice_init(&root
->fs_info
->dead_roots
, &dirty_fs_roots
);
771 list_splice_init(&dirty_fs_roots
, &root
->fs_info
->dead_roots
);
773 mutex_unlock(&root
->fs_info
->trans_mutex
);
774 kmem_cache_free(btrfs_trans_handle_cachep
, trans
);
776 if (root
->fs_info
->closing
) {
777 drop_dirty_roots(root
->fs_info
->tree_root
, &dirty_fs_roots
);
782 int btrfs_clean_old_snapshots(struct btrfs_root
*root
)
784 struct list_head dirty_roots
;
785 INIT_LIST_HEAD(&dirty_roots
);
787 mutex_lock(&root
->fs_info
->trans_mutex
);
788 list_splice_init(&root
->fs_info
->dead_roots
, &dirty_roots
);
789 mutex_unlock(&root
->fs_info
->trans_mutex
);
791 if (!list_empty(&dirty_roots
)) {
792 drop_dirty_roots(root
, &dirty_roots
);
796 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
797 void btrfs_transaction_cleaner(void *p
)
799 void btrfs_transaction_cleaner(struct work_struct
*work
)
802 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
803 struct btrfs_fs_info
*fs_info
= p
;
805 struct btrfs_fs_info
*fs_info
= container_of(work
,
806 struct btrfs_fs_info
,
810 struct btrfs_root
*root
= fs_info
->tree_root
;
811 struct btrfs_transaction
*cur
;
812 struct btrfs_trans_handle
*trans
;
814 unsigned long delay
= HZ
* 30;
818 if (root
->fs_info
->closing
)
821 mutex_lock(&root
->fs_info
->trans_mutex
);
822 cur
= root
->fs_info
->running_transaction
;
824 mutex_unlock(&root
->fs_info
->trans_mutex
);
828 if (now
< cur
->start_time
|| now
- cur
->start_time
< 30) {
829 mutex_unlock(&root
->fs_info
->trans_mutex
);
833 mutex_unlock(&root
->fs_info
->trans_mutex
);
834 btrfs_defrag_dirty_roots(root
->fs_info
);
835 trans
= btrfs_start_transaction(root
, 1);
836 ret
= btrfs_commit_transaction(trans
, root
);
838 btrfs_clean_old_snapshots(root
);
839 btrfs_transaction_queue_work(root
, delay
);
842 void btrfs_transaction_queue_work(struct btrfs_root
*root
, int delay
)
844 if (!root
->fs_info
->closing
)
845 queue_delayed_work(trans_wq
, &root
->fs_info
->trans_work
, delay
);
848 void btrfs_transaction_flush_work(struct btrfs_root
*root
)
850 cancel_delayed_work(&root
->fs_info
->trans_work
);
851 flush_workqueue(trans_wq
);
854 void __init
btrfs_init_transaction_sys(void)
856 trans_wq
= create_workqueue("btrfs-transaction");
859 void btrfs_exit_transaction_sys(void)
861 destroy_workqueue(trans_wq
);