Btrfs: fix uncompleted transaction
[deliverable/linux.git] / fs / btrfs / transaction.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33 #include "dev-replace.h"
34
35 #define BTRFS_ROOT_TRANS_TAG 0
36
37 void put_transaction(struct btrfs_transaction *transaction)
38 {
39 WARN_ON(atomic_read(&transaction->use_count) == 0);
40 if (atomic_dec_and_test(&transaction->use_count)) {
41 BUG_ON(!list_empty(&transaction->list));
42 WARN_ON(transaction->delayed_refs.root.rb_node);
43 kmem_cache_free(btrfs_transaction_cachep, transaction);
44 }
45 }
46
47 static noinline void switch_commit_root(struct btrfs_root *root)
48 {
49 free_extent_buffer(root->commit_root);
50 root->commit_root = btrfs_root_node(root);
51 }
52
53 static inline int can_join_transaction(struct btrfs_transaction *trans,
54 int type)
55 {
56 return !(trans->in_commit &&
57 type != TRANS_JOIN &&
58 type != TRANS_JOIN_NOLOCK);
59 }
60
61 /*
62 * either allocate a new transaction or hop into the existing one
63 */
64 static noinline int join_transaction(struct btrfs_root *root, int type)
65 {
66 struct btrfs_transaction *cur_trans;
67 struct btrfs_fs_info *fs_info = root->fs_info;
68
69 spin_lock(&fs_info->trans_lock);
70 loop:
71 /* The file system has been taken offline. No new transactions. */
72 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
73 spin_unlock(&fs_info->trans_lock);
74 return -EROFS;
75 }
76
77 if (fs_info->trans_no_join) {
78 /*
79 * If we are JOIN_NOLOCK we're already committing a current
80 * transaction, we just need a handle to deal with something
81 * when committing the transaction, such as inode cache and
82 * space cache. It is a special case.
83 */
84 if (type != TRANS_JOIN_NOLOCK) {
85 spin_unlock(&fs_info->trans_lock);
86 return -EBUSY;
87 }
88 }
89
90 cur_trans = fs_info->running_transaction;
91 if (cur_trans) {
92 if (cur_trans->aborted) {
93 spin_unlock(&fs_info->trans_lock);
94 return cur_trans->aborted;
95 }
96 if (!can_join_transaction(cur_trans, type)) {
97 spin_unlock(&fs_info->trans_lock);
98 return -EBUSY;
99 }
100 atomic_inc(&cur_trans->use_count);
101 atomic_inc(&cur_trans->num_writers);
102 cur_trans->num_joined++;
103 spin_unlock(&fs_info->trans_lock);
104 return 0;
105 }
106 spin_unlock(&fs_info->trans_lock);
107
108 /*
109 * If we are ATTACH, we just want to catch the current transaction,
110 * and commit it. If there is no transaction, just return ENOENT.
111 */
112 if (type == TRANS_ATTACH)
113 return -ENOENT;
114
115 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
116 if (!cur_trans)
117 return -ENOMEM;
118
119 spin_lock(&fs_info->trans_lock);
120 if (fs_info->running_transaction) {
121 /*
122 * someone started a transaction after we unlocked. Make sure
123 * to redo the trans_no_join checks above
124 */
125 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
126 cur_trans = fs_info->running_transaction;
127 goto loop;
128 } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
129 spin_unlock(&fs_info->trans_lock);
130 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
131 return -EROFS;
132 }
133
134 atomic_set(&cur_trans->num_writers, 1);
135 cur_trans->num_joined = 0;
136 init_waitqueue_head(&cur_trans->writer_wait);
137 init_waitqueue_head(&cur_trans->commit_wait);
138 cur_trans->in_commit = 0;
139 cur_trans->blocked = 0;
140 /*
141 * One for this trans handle, one so it will live on until we
142 * commit the transaction.
143 */
144 atomic_set(&cur_trans->use_count, 2);
145 cur_trans->commit_done = 0;
146 cur_trans->start_time = get_seconds();
147
148 cur_trans->delayed_refs.root = RB_ROOT;
149 cur_trans->delayed_refs.num_entries = 0;
150 cur_trans->delayed_refs.num_heads_ready = 0;
151 cur_trans->delayed_refs.num_heads = 0;
152 cur_trans->delayed_refs.flushing = 0;
153 cur_trans->delayed_refs.run_delayed_start = 0;
154
155 /*
156 * although the tree mod log is per file system and not per transaction,
157 * the log must never go across transaction boundaries.
158 */
159 smp_mb();
160 if (!list_empty(&fs_info->tree_mod_seq_list))
161 WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
162 "creating a fresh transaction\n");
163 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
164 WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
165 "creating a fresh transaction\n");
166 atomic_set(&fs_info->tree_mod_seq, 0);
167
168 spin_lock_init(&cur_trans->commit_lock);
169 spin_lock_init(&cur_trans->delayed_refs.lock);
170
171 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
172 INIT_LIST_HEAD(&cur_trans->ordered_operations);
173 list_add_tail(&cur_trans->list, &fs_info->trans_list);
174 extent_io_tree_init(&cur_trans->dirty_pages,
175 fs_info->btree_inode->i_mapping);
176 fs_info->generation++;
177 cur_trans->transid = fs_info->generation;
178 fs_info->running_transaction = cur_trans;
179 cur_trans->aborted = 0;
180 spin_unlock(&fs_info->trans_lock);
181
182 return 0;
183 }
184
185 /*
186 * this does all the record keeping required to make sure that a reference
187 * counted root is properly recorded in a given transaction. This is required
188 * to make sure the old root from before we joined the transaction is deleted
189 * when the transaction commits
190 */
191 static int record_root_in_trans(struct btrfs_trans_handle *trans,
192 struct btrfs_root *root)
193 {
194 if (root->ref_cows && root->last_trans < trans->transid) {
195 WARN_ON(root == root->fs_info->extent_root);
196 WARN_ON(root->commit_root != root->node);
197
198 /*
199 * see below for in_trans_setup usage rules
200 * we have the reloc mutex held now, so there
201 * is only one writer in this function
202 */
203 root->in_trans_setup = 1;
204
205 /* make sure readers find in_trans_setup before
206 * they find our root->last_trans update
207 */
208 smp_wmb();
209
210 spin_lock(&root->fs_info->fs_roots_radix_lock);
211 if (root->last_trans == trans->transid) {
212 spin_unlock(&root->fs_info->fs_roots_radix_lock);
213 return 0;
214 }
215 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
216 (unsigned long)root->root_key.objectid,
217 BTRFS_ROOT_TRANS_TAG);
218 spin_unlock(&root->fs_info->fs_roots_radix_lock);
219 root->last_trans = trans->transid;
220
221 /* this is pretty tricky. We don't want to
222 * take the relocation lock in btrfs_record_root_in_trans
223 * unless we're really doing the first setup for this root in
224 * this transaction.
225 *
226 * Normally we'd use root->last_trans as a flag to decide
227 * if we want to take the expensive mutex.
228 *
229 * But, we have to set root->last_trans before we
230 * init the relocation root, otherwise, we trip over warnings
231 * in ctree.c. The solution used here is to flag ourselves
232 * with root->in_trans_setup. When this is 1, we're still
233 * fixing up the reloc trees and everyone must wait.
234 *
235 * When this is zero, they can trust root->last_trans and fly
236 * through btrfs_record_root_in_trans without having to take the
237 * lock. smp_wmb() makes sure that all the writes above are
238 * done before we pop in the zero below
239 */
240 btrfs_init_reloc_root(trans, root);
241 smp_wmb();
242 root->in_trans_setup = 0;
243 }
244 return 0;
245 }
246
247
248 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
249 struct btrfs_root *root)
250 {
251 if (!root->ref_cows)
252 return 0;
253
254 /*
255 * see record_root_in_trans for comments about in_trans_setup usage
256 * and barriers
257 */
258 smp_rmb();
259 if (root->last_trans == trans->transid &&
260 !root->in_trans_setup)
261 return 0;
262
263 mutex_lock(&root->fs_info->reloc_mutex);
264 record_root_in_trans(trans, root);
265 mutex_unlock(&root->fs_info->reloc_mutex);
266
267 return 0;
268 }
269
270 /* wait for commit against the current transaction to become unblocked
271 * when this is done, it is safe to start a new transaction, but the current
272 * transaction might not be fully on disk.
273 */
274 static void wait_current_trans(struct btrfs_root *root)
275 {
276 struct btrfs_transaction *cur_trans;
277
278 spin_lock(&root->fs_info->trans_lock);
279 cur_trans = root->fs_info->running_transaction;
280 if (cur_trans && cur_trans->blocked) {
281 atomic_inc(&cur_trans->use_count);
282 spin_unlock(&root->fs_info->trans_lock);
283
284 wait_event(root->fs_info->transaction_wait,
285 !cur_trans->blocked);
286 put_transaction(cur_trans);
287 } else {
288 spin_unlock(&root->fs_info->trans_lock);
289 }
290 }
291
292 static int may_wait_transaction(struct btrfs_root *root, int type)
293 {
294 if (root->fs_info->log_root_recovering)
295 return 0;
296
297 if (type == TRANS_USERSPACE)
298 return 1;
299
300 if (type == TRANS_START &&
301 !atomic_read(&root->fs_info->open_ioctl_trans))
302 return 1;
303
304 return 0;
305 }
306
307 static struct btrfs_trans_handle *
308 start_transaction(struct btrfs_root *root, u64 num_items, int type,
309 enum btrfs_reserve_flush_enum flush)
310 {
311 struct btrfs_trans_handle *h;
312 struct btrfs_transaction *cur_trans;
313 u64 num_bytes = 0;
314 int ret;
315 u64 qgroup_reserved = 0;
316
317 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
318 return ERR_PTR(-EROFS);
319
320 if (current->journal_info) {
321 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
322 h = current->journal_info;
323 h->use_count++;
324 WARN_ON(h->use_count > 2);
325 h->orig_rsv = h->block_rsv;
326 h->block_rsv = NULL;
327 goto got_it;
328 }
329
330 /*
331 * Do the reservation before we join the transaction so we can do all
332 * the appropriate flushing if need be.
333 */
334 if (num_items > 0 && root != root->fs_info->chunk_root) {
335 if (root->fs_info->quota_enabled &&
336 is_fstree(root->root_key.objectid)) {
337 qgroup_reserved = num_items * root->leafsize;
338 ret = btrfs_qgroup_reserve(root, qgroup_reserved);
339 if (ret)
340 return ERR_PTR(ret);
341 }
342
343 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
344 ret = btrfs_block_rsv_add(root,
345 &root->fs_info->trans_block_rsv,
346 num_bytes, flush);
347 if (ret)
348 goto reserve_fail;
349 }
350 again:
351 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
352 if (!h) {
353 ret = -ENOMEM;
354 goto alloc_fail;
355 }
356
357 /*
358 * If we are JOIN_NOLOCK we're already committing a transaction and
359 * waiting on this guy, so we don't need to do the sb_start_intwrite
360 * because we're already holding a ref. We need this because we could
361 * have raced in and did an fsync() on a file which can kick a commit
362 * and then we deadlock with somebody doing a freeze.
363 *
364 * If we are ATTACH, it means we just want to catch the current
365 * transaction and commit it, so we needn't do sb_start_intwrite().
366 */
367 if (type < TRANS_JOIN_NOLOCK)
368 sb_start_intwrite(root->fs_info->sb);
369
370 if (may_wait_transaction(root, type))
371 wait_current_trans(root);
372
373 do {
374 ret = join_transaction(root, type);
375 if (ret == -EBUSY) {
376 wait_current_trans(root);
377 if (unlikely(type == TRANS_ATTACH))
378 ret = -ENOENT;
379 }
380 } while (ret == -EBUSY);
381
382 if (ret < 0) {
383 /* We must get the transaction if we are JOIN_NOLOCK. */
384 BUG_ON(type == TRANS_JOIN_NOLOCK);
385 goto join_fail;
386 }
387
388 cur_trans = root->fs_info->running_transaction;
389
390 h->transid = cur_trans->transid;
391 h->transaction = cur_trans;
392 h->blocks_used = 0;
393 h->bytes_reserved = 0;
394 h->root = root;
395 h->delayed_ref_updates = 0;
396 h->use_count = 1;
397 h->adding_csums = 0;
398 h->block_rsv = NULL;
399 h->orig_rsv = NULL;
400 h->aborted = 0;
401 h->qgroup_reserved = 0;
402 h->delayed_ref_elem.seq = 0;
403 h->type = type;
404 h->allocating_chunk = false;
405 INIT_LIST_HEAD(&h->qgroup_ref_list);
406 INIT_LIST_HEAD(&h->new_bgs);
407
408 smp_mb();
409 if (cur_trans->blocked && may_wait_transaction(root, type)) {
410 btrfs_commit_transaction(h, root);
411 goto again;
412 }
413
414 if (num_bytes) {
415 trace_btrfs_space_reservation(root->fs_info, "transaction",
416 h->transid, num_bytes, 1);
417 h->block_rsv = &root->fs_info->trans_block_rsv;
418 h->bytes_reserved = num_bytes;
419 }
420 h->qgroup_reserved = qgroup_reserved;
421
422 got_it:
423 btrfs_record_root_in_trans(h, root);
424
425 if (!current->journal_info && type != TRANS_USERSPACE)
426 current->journal_info = h;
427 return h;
428
429 join_fail:
430 if (type < TRANS_JOIN_NOLOCK)
431 sb_end_intwrite(root->fs_info->sb);
432 kmem_cache_free(btrfs_trans_handle_cachep, h);
433 alloc_fail:
434 if (num_bytes)
435 btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
436 num_bytes);
437 reserve_fail:
438 if (qgroup_reserved)
439 btrfs_qgroup_free(root, qgroup_reserved);
440 return ERR_PTR(ret);
441 }
442
443 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
444 int num_items)
445 {
446 return start_transaction(root, num_items, TRANS_START,
447 BTRFS_RESERVE_FLUSH_ALL);
448 }
449
450 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
451 struct btrfs_root *root, int num_items)
452 {
453 return start_transaction(root, num_items, TRANS_START,
454 BTRFS_RESERVE_FLUSH_LIMIT);
455 }
456
457 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
458 {
459 return start_transaction(root, 0, TRANS_JOIN, 0);
460 }
461
462 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
463 {
464 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
465 }
466
467 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
468 {
469 return start_transaction(root, 0, TRANS_USERSPACE, 0);
470 }
471
472 /*
473 * btrfs_attach_transaction() - catch the running transaction
474 *
475 * It is used when we want to commit the current the transaction, but
476 * don't want to start a new one.
477 *
478 * Note: If this function return -ENOENT, it just means there is no
479 * running transaction. But it is possible that the inactive transaction
480 * is still in the memory, not fully on disk. If you hope there is no
481 * inactive transaction in the fs when -ENOENT is returned, you should
482 * invoke
483 * btrfs_attach_transaction_barrier()
484 */
485 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
486 {
487 return start_transaction(root, 0, TRANS_ATTACH, 0);
488 }
489
490 /*
491 * btrfs_attach_transaction() - catch the running transaction
492 *
493 * It is similar to the above function, the differentia is this one
494 * will wait for all the inactive transactions until they fully
495 * complete.
496 */
497 struct btrfs_trans_handle *
498 btrfs_attach_transaction_barrier(struct btrfs_root *root)
499 {
500 struct btrfs_trans_handle *trans;
501
502 trans = start_transaction(root, 0, TRANS_ATTACH, 0);
503 if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
504 btrfs_wait_for_commit(root, 0);
505
506 return trans;
507 }
508
509 /* wait for a transaction commit to be fully complete */
510 static noinline void wait_for_commit(struct btrfs_root *root,
511 struct btrfs_transaction *commit)
512 {
513 wait_event(commit->commit_wait, commit->commit_done);
514 }
515
516 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
517 {
518 struct btrfs_transaction *cur_trans = NULL, *t;
519 int ret = 0;
520
521 if (transid) {
522 if (transid <= root->fs_info->last_trans_committed)
523 goto out;
524
525 ret = -EINVAL;
526 /* find specified transaction */
527 spin_lock(&root->fs_info->trans_lock);
528 list_for_each_entry(t, &root->fs_info->trans_list, list) {
529 if (t->transid == transid) {
530 cur_trans = t;
531 atomic_inc(&cur_trans->use_count);
532 ret = 0;
533 break;
534 }
535 if (t->transid > transid) {
536 ret = 0;
537 break;
538 }
539 }
540 spin_unlock(&root->fs_info->trans_lock);
541 /* The specified transaction doesn't exist */
542 if (!cur_trans)
543 goto out;
544 } else {
545 /* find newest transaction that is committing | committed */
546 spin_lock(&root->fs_info->trans_lock);
547 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
548 list) {
549 if (t->in_commit) {
550 if (t->commit_done)
551 break;
552 cur_trans = t;
553 atomic_inc(&cur_trans->use_count);
554 break;
555 }
556 }
557 spin_unlock(&root->fs_info->trans_lock);
558 if (!cur_trans)
559 goto out; /* nothing committing|committed */
560 }
561
562 wait_for_commit(root, cur_trans);
563 put_transaction(cur_trans);
564 out:
565 return ret;
566 }
567
568 void btrfs_throttle(struct btrfs_root *root)
569 {
570 if (!atomic_read(&root->fs_info->open_ioctl_trans))
571 wait_current_trans(root);
572 }
573
574 static int should_end_transaction(struct btrfs_trans_handle *trans,
575 struct btrfs_root *root)
576 {
577 int ret;
578
579 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
580 return ret ? 1 : 0;
581 }
582
583 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
584 struct btrfs_root *root)
585 {
586 struct btrfs_transaction *cur_trans = trans->transaction;
587 int updates;
588 int err;
589
590 smp_mb();
591 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
592 return 1;
593
594 updates = trans->delayed_ref_updates;
595 trans->delayed_ref_updates = 0;
596 if (updates) {
597 err = btrfs_run_delayed_refs(trans, root, updates);
598 if (err) /* Error code will also eval true */
599 return err;
600 }
601
602 return should_end_transaction(trans, root);
603 }
604
605 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
606 struct btrfs_root *root, int throttle)
607 {
608 struct btrfs_transaction *cur_trans = trans->transaction;
609 struct btrfs_fs_info *info = root->fs_info;
610 int count = 0;
611 int lock = (trans->type != TRANS_JOIN_NOLOCK);
612 int err = 0;
613
614 if (--trans->use_count) {
615 trans->block_rsv = trans->orig_rsv;
616 return 0;
617 }
618
619 /*
620 * do the qgroup accounting as early as possible
621 */
622 err = btrfs_delayed_refs_qgroup_accounting(trans, info);
623
624 btrfs_trans_release_metadata(trans, root);
625 trans->block_rsv = NULL;
626 /*
627 * the same root has to be passed to start_transaction and
628 * end_transaction. Subvolume quota depends on this.
629 */
630 WARN_ON(trans->root != root);
631
632 if (trans->qgroup_reserved) {
633 btrfs_qgroup_free(root, trans->qgroup_reserved);
634 trans->qgroup_reserved = 0;
635 }
636
637 if (!list_empty(&trans->new_bgs))
638 btrfs_create_pending_block_groups(trans, root);
639
640 while (count < 2) {
641 unsigned long cur = trans->delayed_ref_updates;
642 trans->delayed_ref_updates = 0;
643 if (cur &&
644 trans->transaction->delayed_refs.num_heads_ready > 64) {
645 trans->delayed_ref_updates = 0;
646 btrfs_run_delayed_refs(trans, root, cur);
647 } else {
648 break;
649 }
650 count++;
651 }
652 btrfs_trans_release_metadata(trans, root);
653 trans->block_rsv = NULL;
654
655 if (!list_empty(&trans->new_bgs))
656 btrfs_create_pending_block_groups(trans, root);
657
658 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
659 should_end_transaction(trans, root)) {
660 trans->transaction->blocked = 1;
661 smp_wmb();
662 }
663
664 if (lock && cur_trans->blocked && !cur_trans->in_commit) {
665 if (throttle) {
666 /*
667 * We may race with somebody else here so end up having
668 * to call end_transaction on ourselves again, so inc
669 * our use_count.
670 */
671 trans->use_count++;
672 return btrfs_commit_transaction(trans, root);
673 } else {
674 wake_up_process(info->transaction_kthread);
675 }
676 }
677
678 if (trans->type < TRANS_JOIN_NOLOCK)
679 sb_end_intwrite(root->fs_info->sb);
680
681 WARN_ON(cur_trans != info->running_transaction);
682 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
683 atomic_dec(&cur_trans->num_writers);
684
685 smp_mb();
686 if (waitqueue_active(&cur_trans->writer_wait))
687 wake_up(&cur_trans->writer_wait);
688 put_transaction(cur_trans);
689
690 if (current->journal_info == trans)
691 current->journal_info = NULL;
692
693 if (throttle)
694 btrfs_run_delayed_iputs(root);
695
696 if (trans->aborted ||
697 test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
698 err = -EIO;
699 assert_qgroups_uptodate(trans);
700
701 kmem_cache_free(btrfs_trans_handle_cachep, trans);
702 return err;
703 }
704
705 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
706 struct btrfs_root *root)
707 {
708 int ret;
709
710 ret = __btrfs_end_transaction(trans, root, 0);
711 if (ret)
712 return ret;
713 return 0;
714 }
715
716 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
717 struct btrfs_root *root)
718 {
719 int ret;
720
721 ret = __btrfs_end_transaction(trans, root, 1);
722 if (ret)
723 return ret;
724 return 0;
725 }
726
727 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
728 struct btrfs_root *root)
729 {
730 return __btrfs_end_transaction(trans, root, 1);
731 }
732
733 /*
734 * when btree blocks are allocated, they have some corresponding bits set for
735 * them in one of two extent_io trees. This is used to make sure all of
736 * those extents are sent to disk but does not wait on them
737 */
738 int btrfs_write_marked_extents(struct btrfs_root *root,
739 struct extent_io_tree *dirty_pages, int mark)
740 {
741 int err = 0;
742 int werr = 0;
743 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
744 struct extent_state *cached_state = NULL;
745 u64 start = 0;
746 u64 end;
747
748 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
749 mark, &cached_state)) {
750 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
751 mark, &cached_state, GFP_NOFS);
752 cached_state = NULL;
753 err = filemap_fdatawrite_range(mapping, start, end);
754 if (err)
755 werr = err;
756 cond_resched();
757 start = end + 1;
758 }
759 if (err)
760 werr = err;
761 return werr;
762 }
763
764 /*
765 * when btree blocks are allocated, they have some corresponding bits set for
766 * them in one of two extent_io trees. This is used to make sure all of
767 * those extents are on disk for transaction or log commit. We wait
768 * on all the pages and clear them from the dirty pages state tree
769 */
770 int btrfs_wait_marked_extents(struct btrfs_root *root,
771 struct extent_io_tree *dirty_pages, int mark)
772 {
773 int err = 0;
774 int werr = 0;
775 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
776 struct extent_state *cached_state = NULL;
777 u64 start = 0;
778 u64 end;
779
780 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
781 EXTENT_NEED_WAIT, &cached_state)) {
782 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
783 0, 0, &cached_state, GFP_NOFS);
784 err = filemap_fdatawait_range(mapping, start, end);
785 if (err)
786 werr = err;
787 cond_resched();
788 start = end + 1;
789 }
790 if (err)
791 werr = err;
792 return werr;
793 }
794
795 /*
796 * when btree blocks are allocated, they have some corresponding bits set for
797 * them in one of two extent_io trees. This is used to make sure all of
798 * those extents are on disk for transaction or log commit
799 */
800 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
801 struct extent_io_tree *dirty_pages, int mark)
802 {
803 int ret;
804 int ret2;
805
806 ret = btrfs_write_marked_extents(root, dirty_pages, mark);
807 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
808
809 if (ret)
810 return ret;
811 if (ret2)
812 return ret2;
813 return 0;
814 }
815
816 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
817 struct btrfs_root *root)
818 {
819 if (!trans || !trans->transaction) {
820 struct inode *btree_inode;
821 btree_inode = root->fs_info->btree_inode;
822 return filemap_write_and_wait(btree_inode->i_mapping);
823 }
824 return btrfs_write_and_wait_marked_extents(root,
825 &trans->transaction->dirty_pages,
826 EXTENT_DIRTY);
827 }
828
829 /*
830 * this is used to update the root pointer in the tree of tree roots.
831 *
832 * But, in the case of the extent allocation tree, updating the root
833 * pointer may allocate blocks which may change the root of the extent
834 * allocation tree.
835 *
836 * So, this loops and repeats and makes sure the cowonly root didn't
837 * change while the root pointer was being updated in the metadata.
838 */
839 static int update_cowonly_root(struct btrfs_trans_handle *trans,
840 struct btrfs_root *root)
841 {
842 int ret;
843 u64 old_root_bytenr;
844 u64 old_root_used;
845 struct btrfs_root *tree_root = root->fs_info->tree_root;
846
847 old_root_used = btrfs_root_used(&root->root_item);
848 btrfs_write_dirty_block_groups(trans, root);
849
850 while (1) {
851 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
852 if (old_root_bytenr == root->node->start &&
853 old_root_used == btrfs_root_used(&root->root_item))
854 break;
855
856 btrfs_set_root_node(&root->root_item, root->node);
857 ret = btrfs_update_root(trans, tree_root,
858 &root->root_key,
859 &root->root_item);
860 if (ret)
861 return ret;
862
863 old_root_used = btrfs_root_used(&root->root_item);
864 ret = btrfs_write_dirty_block_groups(trans, root);
865 if (ret)
866 return ret;
867 }
868
869 if (root != root->fs_info->extent_root)
870 switch_commit_root(root);
871
872 return 0;
873 }
874
875 /*
876 * update all the cowonly tree roots on disk
877 *
878 * The error handling in this function may not be obvious. Any of the
879 * failures will cause the file system to go offline. We still need
880 * to clean up the delayed refs.
881 */
882 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
883 struct btrfs_root *root)
884 {
885 struct btrfs_fs_info *fs_info = root->fs_info;
886 struct list_head *next;
887 struct extent_buffer *eb;
888 int ret;
889
890 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
891 if (ret)
892 return ret;
893
894 eb = btrfs_lock_root_node(fs_info->tree_root);
895 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
896 0, &eb);
897 btrfs_tree_unlock(eb);
898 free_extent_buffer(eb);
899
900 if (ret)
901 return ret;
902
903 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
904 if (ret)
905 return ret;
906
907 ret = btrfs_run_dev_stats(trans, root->fs_info);
908 WARN_ON(ret);
909 ret = btrfs_run_dev_replace(trans, root->fs_info);
910 WARN_ON(ret);
911
912 ret = btrfs_run_qgroups(trans, root->fs_info);
913 BUG_ON(ret);
914
915 /* run_qgroups might have added some more refs */
916 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
917 BUG_ON(ret);
918
919 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
920 next = fs_info->dirty_cowonly_roots.next;
921 list_del_init(next);
922 root = list_entry(next, struct btrfs_root, dirty_list);
923
924 ret = update_cowonly_root(trans, root);
925 if (ret)
926 return ret;
927 }
928
929 down_write(&fs_info->extent_commit_sem);
930 switch_commit_root(fs_info->extent_root);
931 up_write(&fs_info->extent_commit_sem);
932
933 btrfs_after_dev_replace_commit(fs_info);
934
935 return 0;
936 }
937
938 /*
939 * dead roots are old snapshots that need to be deleted. This allocates
940 * a dirty root struct and adds it into the list of dead roots that need to
941 * be deleted
942 */
943 int btrfs_add_dead_root(struct btrfs_root *root)
944 {
945 spin_lock(&root->fs_info->trans_lock);
946 list_add(&root->root_list, &root->fs_info->dead_roots);
947 spin_unlock(&root->fs_info->trans_lock);
948 return 0;
949 }
950
951 /*
952 * update all the cowonly tree roots on disk
953 */
954 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
955 struct btrfs_root *root)
956 {
957 struct btrfs_root *gang[8];
958 struct btrfs_fs_info *fs_info = root->fs_info;
959 int i;
960 int ret;
961 int err = 0;
962
963 spin_lock(&fs_info->fs_roots_radix_lock);
964 while (1) {
965 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
966 (void **)gang, 0,
967 ARRAY_SIZE(gang),
968 BTRFS_ROOT_TRANS_TAG);
969 if (ret == 0)
970 break;
971 for (i = 0; i < ret; i++) {
972 root = gang[i];
973 radix_tree_tag_clear(&fs_info->fs_roots_radix,
974 (unsigned long)root->root_key.objectid,
975 BTRFS_ROOT_TRANS_TAG);
976 spin_unlock(&fs_info->fs_roots_radix_lock);
977
978 btrfs_free_log(trans, root);
979 btrfs_update_reloc_root(trans, root);
980 btrfs_orphan_commit_root(trans, root);
981
982 btrfs_save_ino_cache(root, trans);
983
984 /* see comments in should_cow_block() */
985 root->force_cow = 0;
986 smp_wmb();
987
988 if (root->commit_root != root->node) {
989 mutex_lock(&root->fs_commit_mutex);
990 switch_commit_root(root);
991 btrfs_unpin_free_ino(root);
992 mutex_unlock(&root->fs_commit_mutex);
993
994 btrfs_set_root_node(&root->root_item,
995 root->node);
996 }
997
998 err = btrfs_update_root(trans, fs_info->tree_root,
999 &root->root_key,
1000 &root->root_item);
1001 spin_lock(&fs_info->fs_roots_radix_lock);
1002 if (err)
1003 break;
1004 }
1005 }
1006 spin_unlock(&fs_info->fs_roots_radix_lock);
1007 return err;
1008 }
1009
1010 /*
1011 * defrag a given btree.
1012 * Every leaf in the btree is read and defragged.
1013 */
1014 int btrfs_defrag_root(struct btrfs_root *root)
1015 {
1016 struct btrfs_fs_info *info = root->fs_info;
1017 struct btrfs_trans_handle *trans;
1018 int ret;
1019
1020 if (xchg(&root->defrag_running, 1))
1021 return 0;
1022
1023 while (1) {
1024 trans = btrfs_start_transaction(root, 0);
1025 if (IS_ERR(trans))
1026 return PTR_ERR(trans);
1027
1028 ret = btrfs_defrag_leaves(trans, root);
1029
1030 btrfs_end_transaction(trans, root);
1031 btrfs_btree_balance_dirty(info->tree_root);
1032 cond_resched();
1033
1034 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
1035 break;
1036
1037 if (btrfs_defrag_cancelled(root->fs_info)) {
1038 printk(KERN_DEBUG "btrfs: defrag_root cancelled\n");
1039 ret = -EAGAIN;
1040 break;
1041 }
1042 }
1043 root->defrag_running = 0;
1044 return ret;
1045 }
1046
1047 /*
1048 * new snapshots need to be created at a very specific time in the
1049 * transaction commit. This does the actual creation
1050 */
1051 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
1052 struct btrfs_fs_info *fs_info,
1053 struct btrfs_pending_snapshot *pending)
1054 {
1055 struct btrfs_key key;
1056 struct btrfs_root_item *new_root_item;
1057 struct btrfs_root *tree_root = fs_info->tree_root;
1058 struct btrfs_root *root = pending->root;
1059 struct btrfs_root *parent_root;
1060 struct btrfs_block_rsv *rsv;
1061 struct inode *parent_inode;
1062 struct btrfs_path *path;
1063 struct btrfs_dir_item *dir_item;
1064 struct dentry *parent;
1065 struct dentry *dentry;
1066 struct extent_buffer *tmp;
1067 struct extent_buffer *old;
1068 struct timespec cur_time = CURRENT_TIME;
1069 int ret;
1070 u64 to_reserve = 0;
1071 u64 index = 0;
1072 u64 objectid;
1073 u64 root_flags;
1074 uuid_le new_uuid;
1075
1076 path = btrfs_alloc_path();
1077 if (!path) {
1078 ret = pending->error = -ENOMEM;
1079 goto path_alloc_fail;
1080 }
1081
1082 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1083 if (!new_root_item) {
1084 ret = pending->error = -ENOMEM;
1085 goto root_item_alloc_fail;
1086 }
1087
1088 ret = btrfs_find_free_objectid(tree_root, &objectid);
1089 if (ret) {
1090 pending->error = ret;
1091 goto no_free_objectid;
1092 }
1093
1094 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1095
1096 if (to_reserve > 0) {
1097 ret = btrfs_block_rsv_add(root, &pending->block_rsv,
1098 to_reserve,
1099 BTRFS_RESERVE_NO_FLUSH);
1100 if (ret) {
1101 pending->error = ret;
1102 goto no_free_objectid;
1103 }
1104 }
1105
1106 ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
1107 objectid, pending->inherit);
1108 if (ret) {
1109 pending->error = ret;
1110 goto no_free_objectid;
1111 }
1112
1113 key.objectid = objectid;
1114 key.offset = (u64)-1;
1115 key.type = BTRFS_ROOT_ITEM_KEY;
1116
1117 rsv = trans->block_rsv;
1118 trans->block_rsv = &pending->block_rsv;
1119
1120 dentry = pending->dentry;
1121 parent = dget_parent(dentry);
1122 parent_inode = parent->d_inode;
1123 parent_root = BTRFS_I(parent_inode)->root;
1124 record_root_in_trans(trans, parent_root);
1125
1126 /*
1127 * insert the directory item
1128 */
1129 ret = btrfs_set_inode_index(parent_inode, &index);
1130 BUG_ON(ret); /* -ENOMEM */
1131
1132 /* check if there is a file/dir which has the same name. */
1133 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1134 btrfs_ino(parent_inode),
1135 dentry->d_name.name,
1136 dentry->d_name.len, 0);
1137 if (dir_item != NULL && !IS_ERR(dir_item)) {
1138 pending->error = -EEXIST;
1139 goto fail;
1140 } else if (IS_ERR(dir_item)) {
1141 ret = PTR_ERR(dir_item);
1142 btrfs_abort_transaction(trans, root, ret);
1143 goto fail;
1144 }
1145 btrfs_release_path(path);
1146
1147 /*
1148 * pull in the delayed directory update
1149 * and the delayed inode item
1150 * otherwise we corrupt the FS during
1151 * snapshot
1152 */
1153 ret = btrfs_run_delayed_items(trans, root);
1154 if (ret) { /* Transaction aborted */
1155 btrfs_abort_transaction(trans, root, ret);
1156 goto fail;
1157 }
1158
1159 record_root_in_trans(trans, root);
1160 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1161 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1162 btrfs_check_and_init_root_item(new_root_item);
1163
1164 root_flags = btrfs_root_flags(new_root_item);
1165 if (pending->readonly)
1166 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1167 else
1168 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1169 btrfs_set_root_flags(new_root_item, root_flags);
1170
1171 btrfs_set_root_generation_v2(new_root_item,
1172 trans->transid);
1173 uuid_le_gen(&new_uuid);
1174 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1175 memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1176 BTRFS_UUID_SIZE);
1177 new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
1178 new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
1179 btrfs_set_root_otransid(new_root_item, trans->transid);
1180 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1181 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1182 btrfs_set_root_stransid(new_root_item, 0);
1183 btrfs_set_root_rtransid(new_root_item, 0);
1184
1185 old = btrfs_lock_root_node(root);
1186 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1187 if (ret) {
1188 btrfs_tree_unlock(old);
1189 free_extent_buffer(old);
1190 btrfs_abort_transaction(trans, root, ret);
1191 goto fail;
1192 }
1193
1194 btrfs_set_lock_blocking(old);
1195
1196 ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1197 /* clean up in any case */
1198 btrfs_tree_unlock(old);
1199 free_extent_buffer(old);
1200 if (ret) {
1201 btrfs_abort_transaction(trans, root, ret);
1202 goto fail;
1203 }
1204
1205 /* see comments in should_cow_block() */
1206 root->force_cow = 1;
1207 smp_wmb();
1208
1209 btrfs_set_root_node(new_root_item, tmp);
1210 /* record when the snapshot was created in key.offset */
1211 key.offset = trans->transid;
1212 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1213 btrfs_tree_unlock(tmp);
1214 free_extent_buffer(tmp);
1215 if (ret) {
1216 btrfs_abort_transaction(trans, root, ret);
1217 goto fail;
1218 }
1219
1220 /*
1221 * insert root back/forward references
1222 */
1223 ret = btrfs_add_root_ref(trans, tree_root, objectid,
1224 parent_root->root_key.objectid,
1225 btrfs_ino(parent_inode), index,
1226 dentry->d_name.name, dentry->d_name.len);
1227 if (ret) {
1228 btrfs_abort_transaction(trans, root, ret);
1229 goto fail;
1230 }
1231
1232 key.offset = (u64)-1;
1233 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1234 if (IS_ERR(pending->snap)) {
1235 ret = PTR_ERR(pending->snap);
1236 btrfs_abort_transaction(trans, root, ret);
1237 goto fail;
1238 }
1239
1240 ret = btrfs_reloc_post_snapshot(trans, pending);
1241 if (ret) {
1242 btrfs_abort_transaction(trans, root, ret);
1243 goto fail;
1244 }
1245
1246 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1247 if (ret) {
1248 btrfs_abort_transaction(trans, root, ret);
1249 goto fail;
1250 }
1251
1252 ret = btrfs_insert_dir_item(trans, parent_root,
1253 dentry->d_name.name, dentry->d_name.len,
1254 parent_inode, &key,
1255 BTRFS_FT_DIR, index);
1256 /* We have check then name at the beginning, so it is impossible. */
1257 BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
1258 if (ret) {
1259 btrfs_abort_transaction(trans, root, ret);
1260 goto fail;
1261 }
1262
1263 btrfs_i_size_write(parent_inode, parent_inode->i_size +
1264 dentry->d_name.len * 2);
1265 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1266 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1267 if (ret)
1268 btrfs_abort_transaction(trans, root, ret);
1269 fail:
1270 dput(parent);
1271 trans->block_rsv = rsv;
1272 no_free_objectid:
1273 kfree(new_root_item);
1274 root_item_alloc_fail:
1275 btrfs_free_path(path);
1276 path_alloc_fail:
1277 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1278 return ret;
1279 }
1280
1281 /*
1282 * create all the snapshots we've scheduled for creation
1283 */
1284 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1285 struct btrfs_fs_info *fs_info)
1286 {
1287 struct btrfs_pending_snapshot *pending;
1288 struct list_head *head = &trans->transaction->pending_snapshots;
1289
1290 list_for_each_entry(pending, head, list)
1291 create_pending_snapshot(trans, fs_info, pending);
1292 return 0;
1293 }
1294
1295 static void update_super_roots(struct btrfs_root *root)
1296 {
1297 struct btrfs_root_item *root_item;
1298 struct btrfs_super_block *super;
1299
1300 super = root->fs_info->super_copy;
1301
1302 root_item = &root->fs_info->chunk_root->root_item;
1303 super->chunk_root = root_item->bytenr;
1304 super->chunk_root_generation = root_item->generation;
1305 super->chunk_root_level = root_item->level;
1306
1307 root_item = &root->fs_info->tree_root->root_item;
1308 super->root = root_item->bytenr;
1309 super->generation = root_item->generation;
1310 super->root_level = root_item->level;
1311 if (btrfs_test_opt(root, SPACE_CACHE))
1312 super->cache_generation = root_item->generation;
1313 }
1314
1315 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1316 {
1317 int ret = 0;
1318 spin_lock(&info->trans_lock);
1319 if (info->running_transaction)
1320 ret = info->running_transaction->in_commit;
1321 spin_unlock(&info->trans_lock);
1322 return ret;
1323 }
1324
1325 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1326 {
1327 int ret = 0;
1328 spin_lock(&info->trans_lock);
1329 if (info->running_transaction)
1330 ret = info->running_transaction->blocked;
1331 spin_unlock(&info->trans_lock);
1332 return ret;
1333 }
1334
1335 /*
1336 * wait for the current transaction commit to start and block subsequent
1337 * transaction joins
1338 */
1339 static void wait_current_trans_commit_start(struct btrfs_root *root,
1340 struct btrfs_transaction *trans)
1341 {
1342 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1343 }
1344
1345 /*
1346 * wait for the current transaction to start and then become unblocked.
1347 * caller holds ref.
1348 */
1349 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1350 struct btrfs_transaction *trans)
1351 {
1352 wait_event(root->fs_info->transaction_wait,
1353 trans->commit_done || (trans->in_commit && !trans->blocked));
1354 }
1355
1356 /*
1357 * commit transactions asynchronously. once btrfs_commit_transaction_async
1358 * returns, any subsequent transaction will not be allowed to join.
1359 */
1360 struct btrfs_async_commit {
1361 struct btrfs_trans_handle *newtrans;
1362 struct btrfs_root *root;
1363 struct work_struct work;
1364 };
1365
1366 static void do_async_commit(struct work_struct *work)
1367 {
1368 struct btrfs_async_commit *ac =
1369 container_of(work, struct btrfs_async_commit, work);
1370
1371 /*
1372 * We've got freeze protection passed with the transaction.
1373 * Tell lockdep about it.
1374 */
1375 if (ac->newtrans->type < TRANS_JOIN_NOLOCK)
1376 rwsem_acquire_read(
1377 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1378 0, 1, _THIS_IP_);
1379
1380 current->journal_info = ac->newtrans;
1381
1382 btrfs_commit_transaction(ac->newtrans, ac->root);
1383 kfree(ac);
1384 }
1385
1386 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1387 struct btrfs_root *root,
1388 int wait_for_unblock)
1389 {
1390 struct btrfs_async_commit *ac;
1391 struct btrfs_transaction *cur_trans;
1392
1393 ac = kmalloc(sizeof(*ac), GFP_NOFS);
1394 if (!ac)
1395 return -ENOMEM;
1396
1397 INIT_WORK(&ac->work, do_async_commit);
1398 ac->root = root;
1399 ac->newtrans = btrfs_join_transaction(root);
1400 if (IS_ERR(ac->newtrans)) {
1401 int err = PTR_ERR(ac->newtrans);
1402 kfree(ac);
1403 return err;
1404 }
1405
1406 /* take transaction reference */
1407 cur_trans = trans->transaction;
1408 atomic_inc(&cur_trans->use_count);
1409
1410 btrfs_end_transaction(trans, root);
1411
1412 /*
1413 * Tell lockdep we've released the freeze rwsem, since the
1414 * async commit thread will be the one to unlock it.
1415 */
1416 if (trans->type < TRANS_JOIN_NOLOCK)
1417 rwsem_release(
1418 &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1419 1, _THIS_IP_);
1420
1421 schedule_work(&ac->work);
1422
1423 /* wait for transaction to start and unblock */
1424 if (wait_for_unblock)
1425 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1426 else
1427 wait_current_trans_commit_start(root, cur_trans);
1428
1429 if (current->journal_info == trans)
1430 current->journal_info = NULL;
1431
1432 put_transaction(cur_trans);
1433 return 0;
1434 }
1435
1436
1437 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1438 struct btrfs_root *root, int err)
1439 {
1440 struct btrfs_transaction *cur_trans = trans->transaction;
1441
1442 WARN_ON(trans->use_count > 1);
1443
1444 btrfs_abort_transaction(trans, root, err);
1445
1446 spin_lock(&root->fs_info->trans_lock);
1447 list_del_init(&cur_trans->list);
1448 if (cur_trans == root->fs_info->running_transaction) {
1449 root->fs_info->running_transaction = NULL;
1450 root->fs_info->trans_no_join = 0;
1451 }
1452 spin_unlock(&root->fs_info->trans_lock);
1453
1454 btrfs_cleanup_one_transaction(trans->transaction, root);
1455
1456 put_transaction(cur_trans);
1457 put_transaction(cur_trans);
1458
1459 trace_btrfs_transaction_commit(root);
1460
1461 btrfs_scrub_continue(root);
1462
1463 if (current->journal_info == trans)
1464 current->journal_info = NULL;
1465
1466 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1467 }
1468
1469 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
1470 struct btrfs_root *root)
1471 {
1472 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1473 int snap_pending = 0;
1474 int ret;
1475
1476 if (!flush_on_commit) {
1477 spin_lock(&root->fs_info->trans_lock);
1478 if (!list_empty(&trans->transaction->pending_snapshots))
1479 snap_pending = 1;
1480 spin_unlock(&root->fs_info->trans_lock);
1481 }
1482
1483 if (flush_on_commit || snap_pending) {
1484 ret = btrfs_start_delalloc_inodes(root, 1);
1485 if (ret)
1486 return ret;
1487 btrfs_wait_ordered_extents(root, 1);
1488 }
1489
1490 ret = btrfs_run_delayed_items(trans, root);
1491 if (ret)
1492 return ret;
1493
1494 /*
1495 * running the delayed items may have added new refs. account
1496 * them now so that they hinder processing of more delayed refs
1497 * as little as possible.
1498 */
1499 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1500
1501 /*
1502 * rename don't use btrfs_join_transaction, so, once we
1503 * set the transaction to blocked above, we aren't going
1504 * to get any new ordered operations. We can safely run
1505 * it here and no for sure that nothing new will be added
1506 * to the list
1507 */
1508 ret = btrfs_run_ordered_operations(trans, root, 1);
1509
1510 return ret;
1511 }
1512
1513 /*
1514 * btrfs_transaction state sequence:
1515 * in_commit = 0, blocked = 0 (initial)
1516 * in_commit = 1, blocked = 1
1517 * blocked = 0
1518 * commit_done = 1
1519 */
1520 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1521 struct btrfs_root *root)
1522 {
1523 unsigned long joined = 0;
1524 struct btrfs_transaction *cur_trans = trans->transaction;
1525 struct btrfs_transaction *prev_trans = NULL;
1526 DEFINE_WAIT(wait);
1527 int ret;
1528 int should_grow = 0;
1529 unsigned long now = get_seconds();
1530
1531 ret = btrfs_run_ordered_operations(trans, root, 0);
1532 if (ret) {
1533 btrfs_abort_transaction(trans, root, ret);
1534 btrfs_end_transaction(trans, root);
1535 return ret;
1536 }
1537
1538 /* Stop the commit early if ->aborted is set */
1539 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1540 ret = cur_trans->aborted;
1541 btrfs_end_transaction(trans, root);
1542 return ret;
1543 }
1544
1545 /* make a pass through all the delayed refs we have so far
1546 * any runnings procs may add more while we are here
1547 */
1548 ret = btrfs_run_delayed_refs(trans, root, 0);
1549 if (ret) {
1550 btrfs_end_transaction(trans, root);
1551 return ret;
1552 }
1553
1554 btrfs_trans_release_metadata(trans, root);
1555 trans->block_rsv = NULL;
1556
1557 cur_trans = trans->transaction;
1558
1559 /*
1560 * set the flushing flag so procs in this transaction have to
1561 * start sending their work down.
1562 */
1563 cur_trans->delayed_refs.flushing = 1;
1564
1565 if (!list_empty(&trans->new_bgs))
1566 btrfs_create_pending_block_groups(trans, root);
1567
1568 ret = btrfs_run_delayed_refs(trans, root, 0);
1569 if (ret) {
1570 btrfs_end_transaction(trans, root);
1571 return ret;
1572 }
1573
1574 spin_lock(&cur_trans->commit_lock);
1575 if (cur_trans->in_commit) {
1576 spin_unlock(&cur_trans->commit_lock);
1577 atomic_inc(&cur_trans->use_count);
1578 ret = btrfs_end_transaction(trans, root);
1579
1580 wait_for_commit(root, cur_trans);
1581
1582 put_transaction(cur_trans);
1583
1584 return ret;
1585 }
1586
1587 trans->transaction->in_commit = 1;
1588 trans->transaction->blocked = 1;
1589 spin_unlock(&cur_trans->commit_lock);
1590 wake_up(&root->fs_info->transaction_blocked_wait);
1591
1592 spin_lock(&root->fs_info->trans_lock);
1593 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1594 prev_trans = list_entry(cur_trans->list.prev,
1595 struct btrfs_transaction, list);
1596 if (!prev_trans->commit_done) {
1597 atomic_inc(&prev_trans->use_count);
1598 spin_unlock(&root->fs_info->trans_lock);
1599
1600 wait_for_commit(root, prev_trans);
1601
1602 put_transaction(prev_trans);
1603 } else {
1604 spin_unlock(&root->fs_info->trans_lock);
1605 }
1606 } else {
1607 spin_unlock(&root->fs_info->trans_lock);
1608 }
1609
1610 if (!btrfs_test_opt(root, SSD) &&
1611 (now < cur_trans->start_time || now - cur_trans->start_time < 1))
1612 should_grow = 1;
1613
1614 do {
1615 joined = cur_trans->num_joined;
1616
1617 WARN_ON(cur_trans != trans->transaction);
1618
1619 ret = btrfs_flush_all_pending_stuffs(trans, root);
1620 if (ret)
1621 goto cleanup_transaction;
1622
1623 prepare_to_wait(&cur_trans->writer_wait, &wait,
1624 TASK_UNINTERRUPTIBLE);
1625
1626 if (atomic_read(&cur_trans->num_writers) > 1)
1627 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1628 else if (should_grow)
1629 schedule_timeout(1);
1630
1631 finish_wait(&cur_trans->writer_wait, &wait);
1632 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1633 (should_grow && cur_trans->num_joined != joined));
1634
1635 ret = btrfs_flush_all_pending_stuffs(trans, root);
1636 if (ret)
1637 goto cleanup_transaction;
1638
1639 /*
1640 * Ok now we need to make sure to block out any other joins while we
1641 * commit the transaction. We could have started a join before setting
1642 * no_join so make sure to wait for num_writers to == 1 again.
1643 */
1644 spin_lock(&root->fs_info->trans_lock);
1645 root->fs_info->trans_no_join = 1;
1646 spin_unlock(&root->fs_info->trans_lock);
1647 wait_event(cur_trans->writer_wait,
1648 atomic_read(&cur_trans->num_writers) == 1);
1649
1650 /* ->aborted might be set after the previous check, so check it */
1651 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1652 ret = cur_trans->aborted;
1653 goto cleanup_transaction;
1654 }
1655 /*
1656 * the reloc mutex makes sure that we stop
1657 * the balancing code from coming in and moving
1658 * extents around in the middle of the commit
1659 */
1660 mutex_lock(&root->fs_info->reloc_mutex);
1661
1662 /*
1663 * We needn't worry about the delayed items because we will
1664 * deal with them in create_pending_snapshot(), which is the
1665 * core function of the snapshot creation.
1666 */
1667 ret = create_pending_snapshots(trans, root->fs_info);
1668 if (ret) {
1669 mutex_unlock(&root->fs_info->reloc_mutex);
1670 goto cleanup_transaction;
1671 }
1672
1673 /*
1674 * We insert the dir indexes of the snapshots and update the inode
1675 * of the snapshots' parents after the snapshot creation, so there
1676 * are some delayed items which are not dealt with. Now deal with
1677 * them.
1678 *
1679 * We needn't worry that this operation will corrupt the snapshots,
1680 * because all the tree which are snapshoted will be forced to COW
1681 * the nodes and leaves.
1682 */
1683 ret = btrfs_run_delayed_items(trans, root);
1684 if (ret) {
1685 mutex_unlock(&root->fs_info->reloc_mutex);
1686 goto cleanup_transaction;
1687 }
1688
1689 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1690 if (ret) {
1691 mutex_unlock(&root->fs_info->reloc_mutex);
1692 goto cleanup_transaction;
1693 }
1694
1695 /*
1696 * make sure none of the code above managed to slip in a
1697 * delayed item
1698 */
1699 btrfs_assert_delayed_root_empty(root);
1700
1701 WARN_ON(cur_trans != trans->transaction);
1702
1703 btrfs_scrub_pause(root);
1704 /* btrfs_commit_tree_roots is responsible for getting the
1705 * various roots consistent with each other. Every pointer
1706 * in the tree of tree roots has to point to the most up to date
1707 * root for every subvolume and other tree. So, we have to keep
1708 * the tree logging code from jumping in and changing any
1709 * of the trees.
1710 *
1711 * At this point in the commit, there can't be any tree-log
1712 * writers, but a little lower down we drop the trans mutex
1713 * and let new people in. By holding the tree_log_mutex
1714 * from now until after the super is written, we avoid races
1715 * with the tree-log code.
1716 */
1717 mutex_lock(&root->fs_info->tree_log_mutex);
1718
1719 ret = commit_fs_roots(trans, root);
1720 if (ret) {
1721 mutex_unlock(&root->fs_info->tree_log_mutex);
1722 mutex_unlock(&root->fs_info->reloc_mutex);
1723 goto cleanup_transaction;
1724 }
1725
1726 /* commit_fs_roots gets rid of all the tree log roots, it is now
1727 * safe to free the root of tree log roots
1728 */
1729 btrfs_free_log_root_tree(trans, root->fs_info);
1730
1731 ret = commit_cowonly_roots(trans, root);
1732 if (ret) {
1733 mutex_unlock(&root->fs_info->tree_log_mutex);
1734 mutex_unlock(&root->fs_info->reloc_mutex);
1735 goto cleanup_transaction;
1736 }
1737
1738 /*
1739 * The tasks which save the space cache and inode cache may also
1740 * update ->aborted, check it.
1741 */
1742 if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
1743 ret = cur_trans->aborted;
1744 mutex_unlock(&root->fs_info->tree_log_mutex);
1745 mutex_unlock(&root->fs_info->reloc_mutex);
1746 goto cleanup_transaction;
1747 }
1748
1749 btrfs_prepare_extent_commit(trans, root);
1750
1751 cur_trans = root->fs_info->running_transaction;
1752
1753 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1754 root->fs_info->tree_root->node);
1755 switch_commit_root(root->fs_info->tree_root);
1756
1757 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1758 root->fs_info->chunk_root->node);
1759 switch_commit_root(root->fs_info->chunk_root);
1760
1761 assert_qgroups_uptodate(trans);
1762 update_super_roots(root);
1763
1764 if (!root->fs_info->log_root_recovering) {
1765 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1766 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1767 }
1768
1769 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1770 sizeof(*root->fs_info->super_copy));
1771
1772 trans->transaction->blocked = 0;
1773 spin_lock(&root->fs_info->trans_lock);
1774 root->fs_info->running_transaction = NULL;
1775 root->fs_info->trans_no_join = 0;
1776 spin_unlock(&root->fs_info->trans_lock);
1777 mutex_unlock(&root->fs_info->reloc_mutex);
1778
1779 wake_up(&root->fs_info->transaction_wait);
1780
1781 ret = btrfs_write_and_wait_transaction(trans, root);
1782 if (ret) {
1783 btrfs_error(root->fs_info, ret,
1784 "Error while writing out transaction.");
1785 mutex_unlock(&root->fs_info->tree_log_mutex);
1786 goto cleanup_transaction;
1787 }
1788
1789 ret = write_ctree_super(trans, root, 0);
1790 if (ret) {
1791 mutex_unlock(&root->fs_info->tree_log_mutex);
1792 goto cleanup_transaction;
1793 }
1794
1795 /*
1796 * the super is written, we can safely allow the tree-loggers
1797 * to go about their business
1798 */
1799 mutex_unlock(&root->fs_info->tree_log_mutex);
1800
1801 btrfs_finish_extent_commit(trans, root);
1802
1803 cur_trans->commit_done = 1;
1804
1805 root->fs_info->last_trans_committed = cur_trans->transid;
1806
1807 wake_up(&cur_trans->commit_wait);
1808
1809 spin_lock(&root->fs_info->trans_lock);
1810 list_del_init(&cur_trans->list);
1811 spin_unlock(&root->fs_info->trans_lock);
1812
1813 put_transaction(cur_trans);
1814 put_transaction(cur_trans);
1815
1816 if (trans->type < TRANS_JOIN_NOLOCK)
1817 sb_end_intwrite(root->fs_info->sb);
1818
1819 trace_btrfs_transaction_commit(root);
1820
1821 btrfs_scrub_continue(root);
1822
1823 if (current->journal_info == trans)
1824 current->journal_info = NULL;
1825
1826 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1827
1828 if (current != root->fs_info->transaction_kthread)
1829 btrfs_run_delayed_iputs(root);
1830
1831 return ret;
1832
1833 cleanup_transaction:
1834 btrfs_trans_release_metadata(trans, root);
1835 trans->block_rsv = NULL;
1836 btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1837 // WARN_ON(1);
1838 if (current->journal_info == trans)
1839 current->journal_info = NULL;
1840 cleanup_transaction(trans, root, ret);
1841
1842 return ret;
1843 }
1844
1845 /*
1846 * interface function to delete all the snapshots we have scheduled for deletion
1847 */
1848 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1849 {
1850 LIST_HEAD(list);
1851 struct btrfs_fs_info *fs_info = root->fs_info;
1852
1853 spin_lock(&fs_info->trans_lock);
1854 list_splice_init(&fs_info->dead_roots, &list);
1855 spin_unlock(&fs_info->trans_lock);
1856
1857 while (!list_empty(&list)) {
1858 int ret;
1859
1860 root = list_entry(list.next, struct btrfs_root, root_list);
1861 list_del(&root->root_list);
1862
1863 btrfs_kill_all_delayed_nodes(root);
1864
1865 if (btrfs_header_backref_rev(root->node) <
1866 BTRFS_MIXED_BACKREF_REV)
1867 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1868 else
1869 ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1870 BUG_ON(ret < 0);
1871 }
1872 return 0;
1873 }
This page took 0.106279 seconds and 6 git commands to generate.