Btrfs: Fix nodatacow for the new data=ordered mode
[deliverable/linux.git] / fs / btrfs / transaction.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/sched.h>
21 #include <linux/writeback.h>
22 #include <linux/pagemap.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "locking.h"
27 #include "ref-cache.h"
28
29 static int total_trans = 0;
30 extern struct kmem_cache *btrfs_trans_handle_cachep;
31 extern struct kmem_cache *btrfs_transaction_cachep;
32
33 #define BTRFS_ROOT_TRANS_TAG 0
34
35 static noinline void put_transaction(struct btrfs_transaction *transaction)
36 {
37 WARN_ON(transaction->use_count == 0);
38 transaction->use_count--;
39 if (transaction->use_count == 0) {
40 WARN_ON(total_trans == 0);
41 total_trans--;
42 list_del_init(&transaction->list);
43 memset(transaction, 0, sizeof(*transaction));
44 kmem_cache_free(btrfs_transaction_cachep, transaction);
45 }
46 }
47
48 static noinline int join_transaction(struct btrfs_root *root)
49 {
50 struct btrfs_transaction *cur_trans;
51 cur_trans = root->fs_info->running_transaction;
52 if (!cur_trans) {
53 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
54 GFP_NOFS);
55 total_trans++;
56 BUG_ON(!cur_trans);
57 root->fs_info->generation++;
58 root->fs_info->last_alloc = 0;
59 root->fs_info->last_data_alloc = 0;
60 cur_trans->num_writers = 1;
61 cur_trans->num_joined = 0;
62 cur_trans->transid = root->fs_info->generation;
63 init_waitqueue_head(&cur_trans->writer_wait);
64 init_waitqueue_head(&cur_trans->commit_wait);
65 cur_trans->in_commit = 0;
66 cur_trans->blocked = 0;
67 cur_trans->use_count = 1;
68 cur_trans->commit_done = 0;
69 cur_trans->start_time = get_seconds();
70 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
71 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
72 extent_io_tree_init(&cur_trans->dirty_pages,
73 root->fs_info->btree_inode->i_mapping,
74 GFP_NOFS);
75 spin_lock(&root->fs_info->new_trans_lock);
76 root->fs_info->running_transaction = cur_trans;
77 spin_unlock(&root->fs_info->new_trans_lock);
78 } else {
79 cur_trans->num_writers++;
80 cur_trans->num_joined++;
81 }
82
83 return 0;
84 }
85
86 static noinline int record_root_in_trans(struct btrfs_root *root)
87 {
88 struct btrfs_dirty_root *dirty;
89 u64 running_trans_id = root->fs_info->running_transaction->transid;
90 if (root->ref_cows && root->last_trans < running_trans_id) {
91 WARN_ON(root == root->fs_info->extent_root);
92 if (root->root_item.refs != 0) {
93 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
94 (unsigned long)root->root_key.objectid,
95 BTRFS_ROOT_TRANS_TAG);
96
97 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
98 BUG_ON(!dirty);
99 dirty->root = kmalloc(sizeof(*dirty->root), GFP_NOFS);
100 BUG_ON(!dirty->root);
101 dirty->latest_root = root;
102 INIT_LIST_HEAD(&dirty->list);
103
104 root->commit_root = btrfs_root_node(root);
105
106 memcpy(dirty->root, root, sizeof(*root));
107 spin_lock_init(&dirty->root->node_lock);
108 spin_lock_init(&dirty->root->list_lock);
109 mutex_init(&dirty->root->objectid_mutex);
110 INIT_LIST_HEAD(&dirty->root->dead_list);
111 dirty->root->node = root->commit_root;
112 dirty->root->commit_root = NULL;
113
114 spin_lock(&root->list_lock);
115 list_add(&dirty->root->dead_list, &root->dead_list);
116 spin_unlock(&root->list_lock);
117
118 root->dirty_root = dirty;
119 } else {
120 WARN_ON(1);
121 }
122 root->last_trans = running_trans_id;
123 }
124 return 0;
125 }
126
127 static void wait_current_trans(struct btrfs_root *root)
128 {
129 struct btrfs_transaction *cur_trans;
130
131 cur_trans = root->fs_info->running_transaction;
132 if (cur_trans && cur_trans->blocked) {
133 DEFINE_WAIT(wait);
134 cur_trans->use_count++;
135 while(1) {
136 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
137 TASK_UNINTERRUPTIBLE);
138 if (cur_trans->blocked) {
139 mutex_unlock(&root->fs_info->trans_mutex);
140 schedule();
141 mutex_lock(&root->fs_info->trans_mutex);
142 finish_wait(&root->fs_info->transaction_wait,
143 &wait);
144 } else {
145 finish_wait(&root->fs_info->transaction_wait,
146 &wait);
147 break;
148 }
149 }
150 put_transaction(cur_trans);
151 }
152 }
153
154 struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
155 int num_blocks, int wait)
156 {
157 struct btrfs_trans_handle *h =
158 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
159 int ret;
160
161 mutex_lock(&root->fs_info->trans_mutex);
162 if ((wait == 1 && !root->fs_info->open_ioctl_trans) || wait == 2)
163 wait_current_trans(root);
164 ret = join_transaction(root);
165 BUG_ON(ret);
166
167 record_root_in_trans(root);
168 h->transid = root->fs_info->running_transaction->transid;
169 h->transaction = root->fs_info->running_transaction;
170 h->blocks_reserved = num_blocks;
171 h->blocks_used = 0;
172 h->block_group = NULL;
173 h->alloc_exclude_nr = 0;
174 h->alloc_exclude_start = 0;
175 root->fs_info->running_transaction->use_count++;
176 mutex_unlock(&root->fs_info->trans_mutex);
177 return h;
178 }
179
180 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
181 int num_blocks)
182 {
183 return start_transaction(root, num_blocks, 1);
184 }
185 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
186 int num_blocks)
187 {
188 return start_transaction(root, num_blocks, 0);
189 }
190
191 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
192 int num_blocks)
193 {
194 return start_transaction(r, num_blocks, 2);
195 }
196
197
198 static noinline int wait_for_commit(struct btrfs_root *root,
199 struct btrfs_transaction *commit)
200 {
201 DEFINE_WAIT(wait);
202 mutex_lock(&root->fs_info->trans_mutex);
203 while(!commit->commit_done) {
204 prepare_to_wait(&commit->commit_wait, &wait,
205 TASK_UNINTERRUPTIBLE);
206 if (commit->commit_done)
207 break;
208 mutex_unlock(&root->fs_info->trans_mutex);
209 schedule();
210 mutex_lock(&root->fs_info->trans_mutex);
211 }
212 mutex_unlock(&root->fs_info->trans_mutex);
213 finish_wait(&commit->commit_wait, &wait);
214 return 0;
215 }
216
217 static void throttle_on_drops(struct btrfs_root *root)
218 {
219 struct btrfs_fs_info *info = root->fs_info;
220 int harder_count = 0;
221
222 harder:
223 if (atomic_read(&info->throttles)) {
224 DEFINE_WAIT(wait);
225 int thr;
226 thr = atomic_read(&info->throttle_gen);
227
228 do {
229 prepare_to_wait(&info->transaction_throttle,
230 &wait, TASK_UNINTERRUPTIBLE);
231 if (!atomic_read(&info->throttles)) {
232 finish_wait(&info->transaction_throttle, &wait);
233 break;
234 }
235 schedule();
236 finish_wait(&info->transaction_throttle, &wait);
237 } while (thr == atomic_read(&info->throttle_gen));
238 harder_count++;
239
240 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
241 harder_count < 2)
242 goto harder;
243
244 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
245 harder_count < 10)
246 goto harder;
247
248 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
249 harder_count < 20)
250 goto harder;
251 }
252 }
253
254 void btrfs_throttle(struct btrfs_root *root)
255 {
256 mutex_lock(&root->fs_info->trans_mutex);
257 if (!root->fs_info->open_ioctl_trans)
258 wait_current_trans(root);
259 mutex_unlock(&root->fs_info->trans_mutex);
260
261 throttle_on_drops(root);
262 }
263
264 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
265 struct btrfs_root *root, int throttle)
266 {
267 struct btrfs_transaction *cur_trans;
268 struct btrfs_fs_info *info = root->fs_info;
269
270 mutex_lock(&info->trans_mutex);
271 cur_trans = info->running_transaction;
272 WARN_ON(cur_trans != trans->transaction);
273 WARN_ON(cur_trans->num_writers < 1);
274 cur_trans->num_writers--;
275
276 if (waitqueue_active(&cur_trans->writer_wait))
277 wake_up(&cur_trans->writer_wait);
278 put_transaction(cur_trans);
279 mutex_unlock(&info->trans_mutex);
280 memset(trans, 0, sizeof(*trans));
281 kmem_cache_free(btrfs_trans_handle_cachep, trans);
282
283 if (throttle)
284 throttle_on_drops(root);
285
286 return 0;
287 }
288
289 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
290 struct btrfs_root *root)
291 {
292 return __btrfs_end_transaction(trans, root, 0);
293 }
294
295 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
296 struct btrfs_root *root)
297 {
298 return __btrfs_end_transaction(trans, root, 1);
299 }
300
301
302 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
303 struct btrfs_root *root)
304 {
305 int ret;
306 int err;
307 int werr = 0;
308 struct extent_io_tree *dirty_pages;
309 struct page *page;
310 struct inode *btree_inode = root->fs_info->btree_inode;
311 u64 start;
312 u64 end;
313 unsigned long index;
314
315 if (!trans || !trans->transaction) {
316 return filemap_write_and_wait(btree_inode->i_mapping);
317 }
318 dirty_pages = &trans->transaction->dirty_pages;
319 while(1) {
320 ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
321 EXTENT_DIRTY);
322 if (ret)
323 break;
324 clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
325 while(start <= end) {
326 index = start >> PAGE_CACHE_SHIFT;
327 start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
328 page = find_lock_page(btree_inode->i_mapping, index);
329 if (!page)
330 continue;
331 if (PageWriteback(page)) {
332 if (PageDirty(page))
333 wait_on_page_writeback(page);
334 else {
335 unlock_page(page);
336 page_cache_release(page);
337 continue;
338 }
339 }
340 err = write_one_page(page, 0);
341 if (err)
342 werr = err;
343 page_cache_release(page);
344 }
345 }
346 err = filemap_fdatawait(btree_inode->i_mapping);
347 if (err)
348 werr = err;
349 return werr;
350 }
351
352 static int update_cowonly_root(struct btrfs_trans_handle *trans,
353 struct btrfs_root *root)
354 {
355 int ret;
356 u64 old_root_bytenr;
357 struct btrfs_root *tree_root = root->fs_info->tree_root;
358
359 btrfs_write_dirty_block_groups(trans, root);
360 while(1) {
361 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
362 if (old_root_bytenr == root->node->start)
363 break;
364 btrfs_set_root_bytenr(&root->root_item,
365 root->node->start);
366 btrfs_set_root_level(&root->root_item,
367 btrfs_header_level(root->node));
368 ret = btrfs_update_root(trans, tree_root,
369 &root->root_key,
370 &root->root_item);
371 BUG_ON(ret);
372 btrfs_write_dirty_block_groups(trans, root);
373 }
374 return 0;
375 }
376
377 int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
378 struct btrfs_root *root)
379 {
380 struct btrfs_fs_info *fs_info = root->fs_info;
381 struct list_head *next;
382
383 while(!list_empty(&fs_info->dirty_cowonly_roots)) {
384 next = fs_info->dirty_cowonly_roots.next;
385 list_del_init(next);
386 root = list_entry(next, struct btrfs_root, dirty_list);
387 update_cowonly_root(trans, root);
388 }
389 return 0;
390 }
391
392 int btrfs_add_dead_root(struct btrfs_root *root, struct btrfs_root *latest)
393 {
394 struct btrfs_dirty_root *dirty;
395
396 dirty = kmalloc(sizeof(*dirty), GFP_NOFS);
397 if (!dirty)
398 return -ENOMEM;
399 dirty->root = root;
400 dirty->latest_root = latest;
401
402 mutex_lock(&root->fs_info->trans_mutex);
403 list_add(&dirty->list, &latest->fs_info->dead_roots);
404 mutex_unlock(&root->fs_info->trans_mutex);
405 return 0;
406 }
407
408 static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
409 struct radix_tree_root *radix,
410 struct list_head *list)
411 {
412 struct btrfs_dirty_root *dirty;
413 struct btrfs_root *gang[8];
414 struct btrfs_root *root;
415 int i;
416 int ret;
417 int err = 0;
418 u32 refs;
419
420 while(1) {
421 ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
422 ARRAY_SIZE(gang),
423 BTRFS_ROOT_TRANS_TAG);
424 if (ret == 0)
425 break;
426 for (i = 0; i < ret; i++) {
427 root = gang[i];
428 radix_tree_tag_clear(radix,
429 (unsigned long)root->root_key.objectid,
430 BTRFS_ROOT_TRANS_TAG);
431
432 BUG_ON(!root->ref_tree);
433 dirty = root->dirty_root;
434
435 if (root->commit_root == root->node) {
436 WARN_ON(root->node->start !=
437 btrfs_root_bytenr(&root->root_item));
438
439 free_extent_buffer(root->commit_root);
440 root->commit_root = NULL;
441 root->dirty_root = NULL;
442
443 spin_lock(&root->list_lock);
444 list_del_init(&dirty->root->dead_list);
445 spin_unlock(&root->list_lock);
446
447 kfree(dirty->root);
448 kfree(dirty);
449
450 /* make sure to update the root on disk
451 * so we get any updates to the block used
452 * counts
453 */
454 err = btrfs_update_root(trans,
455 root->fs_info->tree_root,
456 &root->root_key,
457 &root->root_item);
458 continue;
459 }
460
461 memset(&root->root_item.drop_progress, 0,
462 sizeof(struct btrfs_disk_key));
463 root->root_item.drop_level = 0;
464 root->commit_root = NULL;
465 root->dirty_root = NULL;
466 root->root_key.offset = root->fs_info->generation;
467 btrfs_set_root_bytenr(&root->root_item,
468 root->node->start);
469 btrfs_set_root_level(&root->root_item,
470 btrfs_header_level(root->node));
471 err = btrfs_insert_root(trans, root->fs_info->tree_root,
472 &root->root_key,
473 &root->root_item);
474 if (err)
475 break;
476
477 refs = btrfs_root_refs(&dirty->root->root_item);
478 btrfs_set_root_refs(&dirty->root->root_item, refs - 1);
479 err = btrfs_update_root(trans, root->fs_info->tree_root,
480 &dirty->root->root_key,
481 &dirty->root->root_item);
482
483 BUG_ON(err);
484 if (refs == 1) {
485 list_add(&dirty->list, list);
486 } else {
487 WARN_ON(1);
488 free_extent_buffer(dirty->root->node);
489 kfree(dirty->root);
490 kfree(dirty);
491 }
492 }
493 }
494 return err;
495 }
496
497 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
498 {
499 struct btrfs_fs_info *info = root->fs_info;
500 int ret;
501 struct btrfs_trans_handle *trans;
502 unsigned long nr;
503
504 smp_mb();
505 if (root->defrag_running)
506 return 0;
507 trans = btrfs_start_transaction(root, 1);
508 while (1) {
509 root->defrag_running = 1;
510 ret = btrfs_defrag_leaves(trans, root, cacheonly);
511 nr = trans->blocks_used;
512 btrfs_end_transaction(trans, root);
513 btrfs_btree_balance_dirty(info->tree_root, nr);
514 cond_resched();
515
516 trans = btrfs_start_transaction(root, 1);
517 if (root->fs_info->closing || ret != -EAGAIN)
518 break;
519 }
520 root->defrag_running = 0;
521 smp_mb();
522 btrfs_end_transaction(trans, root);
523 return 0;
524 }
525
526 static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
527 struct list_head *list)
528 {
529 struct btrfs_dirty_root *dirty;
530 struct btrfs_trans_handle *trans;
531 unsigned long nr;
532 u64 num_bytes;
533 u64 bytes_used;
534 u64 max_useless;
535 int ret = 0;
536 int err;
537
538 while(!list_empty(list)) {
539 struct btrfs_root *root;
540
541 dirty = list_entry(list->prev, struct btrfs_dirty_root, list);
542 list_del_init(&dirty->list);
543
544 num_bytes = btrfs_root_used(&dirty->root->root_item);
545 root = dirty->latest_root;
546 atomic_inc(&root->fs_info->throttles);
547
548 mutex_lock(&root->fs_info->drop_mutex);
549 while(1) {
550 trans = btrfs_start_transaction(tree_root, 1);
551 ret = btrfs_drop_snapshot(trans, dirty->root);
552 if (ret != -EAGAIN) {
553 break;
554 }
555
556 err = btrfs_update_root(trans,
557 tree_root,
558 &dirty->root->root_key,
559 &dirty->root->root_item);
560 if (err)
561 ret = err;
562 nr = trans->blocks_used;
563 ret = btrfs_end_transaction(trans, tree_root);
564 BUG_ON(ret);
565
566 mutex_unlock(&root->fs_info->drop_mutex);
567 btrfs_btree_balance_dirty(tree_root, nr);
568 cond_resched();
569 mutex_lock(&root->fs_info->drop_mutex);
570 }
571 BUG_ON(ret);
572 atomic_dec(&root->fs_info->throttles);
573 wake_up(&root->fs_info->transaction_throttle);
574
575 mutex_lock(&root->fs_info->alloc_mutex);
576 num_bytes -= btrfs_root_used(&dirty->root->root_item);
577 bytes_used = btrfs_root_used(&root->root_item);
578 if (num_bytes) {
579 record_root_in_trans(root);
580 btrfs_set_root_used(&root->root_item,
581 bytes_used - num_bytes);
582 }
583 mutex_unlock(&root->fs_info->alloc_mutex);
584
585 ret = btrfs_del_root(trans, tree_root, &dirty->root->root_key);
586 if (ret) {
587 BUG();
588 break;
589 }
590 mutex_unlock(&root->fs_info->drop_mutex);
591
592 spin_lock(&root->list_lock);
593 list_del_init(&dirty->root->dead_list);
594 if (!list_empty(&root->dead_list)) {
595 struct btrfs_root *oldest;
596 oldest = list_entry(root->dead_list.prev,
597 struct btrfs_root, dead_list);
598 max_useless = oldest->root_key.offset - 1;
599 } else {
600 max_useless = root->root_key.offset - 1;
601 }
602 spin_unlock(&root->list_lock);
603
604 nr = trans->blocks_used;
605 ret = btrfs_end_transaction(trans, tree_root);
606 BUG_ON(ret);
607
608 ret = btrfs_remove_leaf_refs(root, max_useless);
609 BUG_ON(ret);
610
611 free_extent_buffer(dirty->root->node);
612 kfree(dirty->root);
613 kfree(dirty);
614
615 btrfs_btree_balance_dirty(tree_root, nr);
616 cond_resched();
617 }
618 return ret;
619 }
620
621 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
622 struct btrfs_fs_info *fs_info,
623 struct btrfs_pending_snapshot *pending)
624 {
625 struct btrfs_key key;
626 struct btrfs_root_item *new_root_item;
627 struct btrfs_root *tree_root = fs_info->tree_root;
628 struct btrfs_root *root = pending->root;
629 struct extent_buffer *tmp;
630 struct extent_buffer *old;
631 int ret;
632 int namelen;
633 u64 objectid;
634
635 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
636 if (!new_root_item) {
637 ret = -ENOMEM;
638 goto fail;
639 }
640 ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
641 if (ret)
642 goto fail;
643
644 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
645
646 key.objectid = objectid;
647 key.offset = 1;
648 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
649
650 old = btrfs_lock_root_node(root);
651 btrfs_cow_block(trans, root, old, NULL, 0, &old, 0);
652
653 btrfs_copy_root(trans, root, old, &tmp, objectid);
654 btrfs_tree_unlock(old);
655 free_extent_buffer(old);
656
657 btrfs_set_root_bytenr(new_root_item, tmp->start);
658 btrfs_set_root_level(new_root_item, btrfs_header_level(tmp));
659 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
660 new_root_item);
661 btrfs_tree_unlock(tmp);
662 free_extent_buffer(tmp);
663 if (ret)
664 goto fail;
665
666 /*
667 * insert the directory item
668 */
669 key.offset = (u64)-1;
670 namelen = strlen(pending->name);
671 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
672 pending->name, namelen,
673 root->fs_info->sb->s_root->d_inode->i_ino,
674 &key, BTRFS_FT_DIR, 0);
675
676 if (ret)
677 goto fail;
678
679 ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
680 pending->name, strlen(pending->name), objectid,
681 root->fs_info->sb->s_root->d_inode->i_ino, 0);
682
683 /* Invalidate existing dcache entry for new snapshot. */
684 btrfs_invalidate_dcache_root(root, pending->name, namelen);
685
686 fail:
687 kfree(new_root_item);
688 return ret;
689 }
690
691 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
692 struct btrfs_fs_info *fs_info)
693 {
694 struct btrfs_pending_snapshot *pending;
695 struct list_head *head = &trans->transaction->pending_snapshots;
696 int ret;
697
698 while(!list_empty(head)) {
699 pending = list_entry(head->next,
700 struct btrfs_pending_snapshot, list);
701 ret = create_pending_snapshot(trans, fs_info, pending);
702 BUG_ON(ret);
703 list_del(&pending->list);
704 kfree(pending->name);
705 kfree(pending);
706 }
707 return 0;
708 }
709
710 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
711 struct btrfs_root *root)
712 {
713 unsigned long joined = 0;
714 unsigned long timeout = 1;
715 struct btrfs_transaction *cur_trans;
716 struct btrfs_transaction *prev_trans = NULL;
717 struct btrfs_root *chunk_root = root->fs_info->chunk_root;
718 struct list_head dirty_fs_roots;
719 struct extent_io_tree *pinned_copy;
720 DEFINE_WAIT(wait);
721 int ret;
722
723 INIT_LIST_HEAD(&dirty_fs_roots);
724
725 mutex_lock(&root->fs_info->trans_mutex);
726 if (trans->transaction->in_commit) {
727 cur_trans = trans->transaction;
728 trans->transaction->use_count++;
729 mutex_unlock(&root->fs_info->trans_mutex);
730 btrfs_end_transaction(trans, root);
731
732 ret = wait_for_commit(root, cur_trans);
733 BUG_ON(ret);
734
735 mutex_lock(&root->fs_info->trans_mutex);
736 put_transaction(cur_trans);
737 mutex_unlock(&root->fs_info->trans_mutex);
738
739 return 0;
740 }
741
742 pinned_copy = kmalloc(sizeof(*pinned_copy), GFP_NOFS);
743 if (!pinned_copy)
744 return -ENOMEM;
745
746 extent_io_tree_init(pinned_copy,
747 root->fs_info->btree_inode->i_mapping, GFP_NOFS);
748
749 trans->transaction->in_commit = 1;
750 trans->transaction->blocked = 1;
751 cur_trans = trans->transaction;
752 if (cur_trans->list.prev != &root->fs_info->trans_list) {
753 prev_trans = list_entry(cur_trans->list.prev,
754 struct btrfs_transaction, list);
755 if (!prev_trans->commit_done) {
756 prev_trans->use_count++;
757 mutex_unlock(&root->fs_info->trans_mutex);
758
759 wait_for_commit(root, prev_trans);
760
761 mutex_lock(&root->fs_info->trans_mutex);
762 put_transaction(prev_trans);
763 }
764 }
765
766 do {
767 int snap_pending = 0;
768 joined = cur_trans->num_joined;
769 if (!list_empty(&trans->transaction->pending_snapshots))
770 snap_pending = 1;
771
772 WARN_ON(cur_trans != trans->transaction);
773 prepare_to_wait(&cur_trans->writer_wait, &wait,
774 TASK_UNINTERRUPTIBLE);
775
776 if (cur_trans->num_writers > 1)
777 timeout = MAX_SCHEDULE_TIMEOUT;
778 else
779 timeout = 1;
780
781 mutex_unlock(&root->fs_info->trans_mutex);
782
783 if (snap_pending) {
784 ret = btrfs_wait_ordered_extents(root, 1);
785 BUG_ON(ret);
786 }
787
788 schedule_timeout(timeout);
789
790 mutex_lock(&root->fs_info->trans_mutex);
791 finish_wait(&cur_trans->writer_wait, &wait);
792 } while (cur_trans->num_writers > 1 ||
793 (cur_trans->num_joined != joined));
794
795 ret = create_pending_snapshots(trans, root->fs_info);
796 BUG_ON(ret);
797
798 WARN_ON(cur_trans != trans->transaction);
799
800 ret = add_dirty_roots(trans, &root->fs_info->fs_roots_radix,
801 &dirty_fs_roots);
802 BUG_ON(ret);
803
804 ret = btrfs_commit_tree_roots(trans, root);
805 BUG_ON(ret);
806
807 cur_trans = root->fs_info->running_transaction;
808 spin_lock(&root->fs_info->new_trans_lock);
809 root->fs_info->running_transaction = NULL;
810 spin_unlock(&root->fs_info->new_trans_lock);
811 btrfs_set_super_generation(&root->fs_info->super_copy,
812 cur_trans->transid);
813 btrfs_set_super_root(&root->fs_info->super_copy,
814 root->fs_info->tree_root->node->start);
815 btrfs_set_super_root_level(&root->fs_info->super_copy,
816 btrfs_header_level(root->fs_info->tree_root->node));
817
818 btrfs_set_super_chunk_root(&root->fs_info->super_copy,
819 chunk_root->node->start);
820 btrfs_set_super_chunk_root_level(&root->fs_info->super_copy,
821 btrfs_header_level(chunk_root->node));
822 memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
823 sizeof(root->fs_info->super_copy));
824
825 btrfs_copy_pinned(root, pinned_copy);
826
827 trans->transaction->blocked = 0;
828 wake_up(&root->fs_info->transaction_throttle);
829 wake_up(&root->fs_info->transaction_wait);
830
831 mutex_unlock(&root->fs_info->trans_mutex);
832 ret = btrfs_write_and_wait_transaction(trans, root);
833 BUG_ON(ret);
834 write_ctree_super(trans, root);
835
836 btrfs_finish_extent_commit(trans, root, pinned_copy);
837 mutex_lock(&root->fs_info->trans_mutex);
838
839 kfree(pinned_copy);
840
841 cur_trans->commit_done = 1;
842 root->fs_info->last_trans_committed = cur_trans->transid;
843 wake_up(&cur_trans->commit_wait);
844 put_transaction(cur_trans);
845 put_transaction(cur_trans);
846
847 list_splice_init(&dirty_fs_roots, &root->fs_info->dead_roots);
848 if (root->fs_info->closing)
849 list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
850
851 mutex_unlock(&root->fs_info->trans_mutex);
852 kmem_cache_free(btrfs_trans_handle_cachep, trans);
853
854 if (root->fs_info->closing) {
855 drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
856 }
857 return ret;
858 }
859
860 int btrfs_clean_old_snapshots(struct btrfs_root *root)
861 {
862 struct list_head dirty_roots;
863 INIT_LIST_HEAD(&dirty_roots);
864 again:
865 mutex_lock(&root->fs_info->trans_mutex);
866 list_splice_init(&root->fs_info->dead_roots, &dirty_roots);
867 mutex_unlock(&root->fs_info->trans_mutex);
868
869 if (!list_empty(&dirty_roots)) {
870 drop_dirty_roots(root, &dirty_roots);
871 goto again;
872 }
873 return 0;
874 }
This page took 0.063126 seconds and 5 git commands to generate.