Btrfs: change core code of btrfs to support the device replace operations
[deliverable/linux.git] / fs / btrfs / transaction.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include <linux/uuid.h>
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "transaction.h"
29 #include "locking.h"
30 #include "tree-log.h"
31 #include "inode-map.h"
32 #include "volumes.h"
33 #include "dev-replace.h"
34
35 #define BTRFS_ROOT_TRANS_TAG 0
36
37 void put_transaction(struct btrfs_transaction *transaction)
38 {
39 WARN_ON(atomic_read(&transaction->use_count) == 0);
40 if (atomic_dec_and_test(&transaction->use_count)) {
41 BUG_ON(!list_empty(&transaction->list));
42 WARN_ON(transaction->delayed_refs.root.rb_node);
43 memset(transaction, 0, sizeof(*transaction));
44 kmem_cache_free(btrfs_transaction_cachep, transaction);
45 }
46 }
47
48 static noinline void switch_commit_root(struct btrfs_root *root)
49 {
50 free_extent_buffer(root->commit_root);
51 root->commit_root = btrfs_root_node(root);
52 }
53
54 /*
55 * either allocate a new transaction or hop into the existing one
56 */
57 static noinline int join_transaction(struct btrfs_root *root, int type)
58 {
59 struct btrfs_transaction *cur_trans;
60 struct btrfs_fs_info *fs_info = root->fs_info;
61
62 spin_lock(&fs_info->trans_lock);
63 loop:
64 /* The file system has been taken offline. No new transactions. */
65 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
66 spin_unlock(&fs_info->trans_lock);
67 return -EROFS;
68 }
69
70 if (fs_info->trans_no_join) {
71 /*
72 * If we are JOIN_NOLOCK we're already committing a current
73 * transaction, we just need a handle to deal with something
74 * when committing the transaction, such as inode cache and
75 * space cache. It is a special case.
76 */
77 if (type != TRANS_JOIN_NOLOCK) {
78 spin_unlock(&fs_info->trans_lock);
79 return -EBUSY;
80 }
81 }
82
83 cur_trans = fs_info->running_transaction;
84 if (cur_trans) {
85 if (cur_trans->aborted) {
86 spin_unlock(&fs_info->trans_lock);
87 return cur_trans->aborted;
88 }
89 atomic_inc(&cur_trans->use_count);
90 atomic_inc(&cur_trans->num_writers);
91 cur_trans->num_joined++;
92 spin_unlock(&fs_info->trans_lock);
93 return 0;
94 }
95 spin_unlock(&fs_info->trans_lock);
96
97 /*
98 * If we are ATTACH, we just want to catch the current transaction,
99 * and commit it. If there is no transaction, just return ENOENT.
100 */
101 if (type == TRANS_ATTACH)
102 return -ENOENT;
103
104 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
105 if (!cur_trans)
106 return -ENOMEM;
107
108 spin_lock(&fs_info->trans_lock);
109 if (fs_info->running_transaction) {
110 /*
111 * someone started a transaction after we unlocked. Make sure
112 * to redo the trans_no_join checks above
113 */
114 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
115 cur_trans = fs_info->running_transaction;
116 goto loop;
117 } else if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
118 spin_unlock(&fs_info->trans_lock);
119 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
120 return -EROFS;
121 }
122
123 atomic_set(&cur_trans->num_writers, 1);
124 cur_trans->num_joined = 0;
125 init_waitqueue_head(&cur_trans->writer_wait);
126 init_waitqueue_head(&cur_trans->commit_wait);
127 cur_trans->in_commit = 0;
128 cur_trans->blocked = 0;
129 /*
130 * One for this trans handle, one so it will live on until we
131 * commit the transaction.
132 */
133 atomic_set(&cur_trans->use_count, 2);
134 cur_trans->commit_done = 0;
135 cur_trans->start_time = get_seconds();
136
137 cur_trans->delayed_refs.root = RB_ROOT;
138 cur_trans->delayed_refs.num_entries = 0;
139 cur_trans->delayed_refs.num_heads_ready = 0;
140 cur_trans->delayed_refs.num_heads = 0;
141 cur_trans->delayed_refs.flushing = 0;
142 cur_trans->delayed_refs.run_delayed_start = 0;
143
144 /*
145 * although the tree mod log is per file system and not per transaction,
146 * the log must never go across transaction boundaries.
147 */
148 smp_mb();
149 if (!list_empty(&fs_info->tree_mod_seq_list))
150 WARN(1, KERN_ERR "btrfs: tree_mod_seq_list not empty when "
151 "creating a fresh transaction\n");
152 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
153 WARN(1, KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
154 "creating a fresh transaction\n");
155 atomic_set(&fs_info->tree_mod_seq, 0);
156
157 spin_lock_init(&cur_trans->commit_lock);
158 spin_lock_init(&cur_trans->delayed_refs.lock);
159
160 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
161 list_add_tail(&cur_trans->list, &fs_info->trans_list);
162 extent_io_tree_init(&cur_trans->dirty_pages,
163 fs_info->btree_inode->i_mapping);
164 fs_info->generation++;
165 cur_trans->transid = fs_info->generation;
166 fs_info->running_transaction = cur_trans;
167 cur_trans->aborted = 0;
168 spin_unlock(&fs_info->trans_lock);
169
170 return 0;
171 }
172
173 /*
174 * this does all the record keeping required to make sure that a reference
175 * counted root is properly recorded in a given transaction. This is required
176 * to make sure the old root from before we joined the transaction is deleted
177 * when the transaction commits
178 */
179 static int record_root_in_trans(struct btrfs_trans_handle *trans,
180 struct btrfs_root *root)
181 {
182 if (root->ref_cows && root->last_trans < trans->transid) {
183 WARN_ON(root == root->fs_info->extent_root);
184 WARN_ON(root->commit_root != root->node);
185
186 /*
187 * see below for in_trans_setup usage rules
188 * we have the reloc mutex held now, so there
189 * is only one writer in this function
190 */
191 root->in_trans_setup = 1;
192
193 /* make sure readers find in_trans_setup before
194 * they find our root->last_trans update
195 */
196 smp_wmb();
197
198 spin_lock(&root->fs_info->fs_roots_radix_lock);
199 if (root->last_trans == trans->transid) {
200 spin_unlock(&root->fs_info->fs_roots_radix_lock);
201 return 0;
202 }
203 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
204 (unsigned long)root->root_key.objectid,
205 BTRFS_ROOT_TRANS_TAG);
206 spin_unlock(&root->fs_info->fs_roots_radix_lock);
207 root->last_trans = trans->transid;
208
209 /* this is pretty tricky. We don't want to
210 * take the relocation lock in btrfs_record_root_in_trans
211 * unless we're really doing the first setup for this root in
212 * this transaction.
213 *
214 * Normally we'd use root->last_trans as a flag to decide
215 * if we want to take the expensive mutex.
216 *
217 * But, we have to set root->last_trans before we
218 * init the relocation root, otherwise, we trip over warnings
219 * in ctree.c. The solution used here is to flag ourselves
220 * with root->in_trans_setup. When this is 1, we're still
221 * fixing up the reloc trees and everyone must wait.
222 *
223 * When this is zero, they can trust root->last_trans and fly
224 * through btrfs_record_root_in_trans without having to take the
225 * lock. smp_wmb() makes sure that all the writes above are
226 * done before we pop in the zero below
227 */
228 btrfs_init_reloc_root(trans, root);
229 smp_wmb();
230 root->in_trans_setup = 0;
231 }
232 return 0;
233 }
234
235
236 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
237 struct btrfs_root *root)
238 {
239 if (!root->ref_cows)
240 return 0;
241
242 /*
243 * see record_root_in_trans for comments about in_trans_setup usage
244 * and barriers
245 */
246 smp_rmb();
247 if (root->last_trans == trans->transid &&
248 !root->in_trans_setup)
249 return 0;
250
251 mutex_lock(&root->fs_info->reloc_mutex);
252 record_root_in_trans(trans, root);
253 mutex_unlock(&root->fs_info->reloc_mutex);
254
255 return 0;
256 }
257
258 /* wait for commit against the current transaction to become unblocked
259 * when this is done, it is safe to start a new transaction, but the current
260 * transaction might not be fully on disk.
261 */
262 static void wait_current_trans(struct btrfs_root *root)
263 {
264 struct btrfs_transaction *cur_trans;
265
266 spin_lock(&root->fs_info->trans_lock);
267 cur_trans = root->fs_info->running_transaction;
268 if (cur_trans && cur_trans->blocked) {
269 atomic_inc(&cur_trans->use_count);
270 spin_unlock(&root->fs_info->trans_lock);
271
272 wait_event(root->fs_info->transaction_wait,
273 !cur_trans->blocked);
274 put_transaction(cur_trans);
275 } else {
276 spin_unlock(&root->fs_info->trans_lock);
277 }
278 }
279
280 static int may_wait_transaction(struct btrfs_root *root, int type)
281 {
282 if (root->fs_info->log_root_recovering)
283 return 0;
284
285 if (type == TRANS_USERSPACE)
286 return 1;
287
288 if (type == TRANS_START &&
289 !atomic_read(&root->fs_info->open_ioctl_trans))
290 return 1;
291
292 return 0;
293 }
294
295 static struct btrfs_trans_handle *
296 start_transaction(struct btrfs_root *root, u64 num_items, int type,
297 enum btrfs_reserve_flush_enum flush)
298 {
299 struct btrfs_trans_handle *h;
300 struct btrfs_transaction *cur_trans;
301 u64 num_bytes = 0;
302 int ret;
303 u64 qgroup_reserved = 0;
304
305 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
306 return ERR_PTR(-EROFS);
307
308 if (current->journal_info) {
309 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
310 h = current->journal_info;
311 h->use_count++;
312 WARN_ON(h->use_count > 2);
313 h->orig_rsv = h->block_rsv;
314 h->block_rsv = NULL;
315 goto got_it;
316 }
317
318 /*
319 * Do the reservation before we join the transaction so we can do all
320 * the appropriate flushing if need be.
321 */
322 if (num_items > 0 && root != root->fs_info->chunk_root) {
323 if (root->fs_info->quota_enabled &&
324 is_fstree(root->root_key.objectid)) {
325 qgroup_reserved = num_items * root->leafsize;
326 ret = btrfs_qgroup_reserve(root, qgroup_reserved);
327 if (ret)
328 return ERR_PTR(ret);
329 }
330
331 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
332 ret = btrfs_block_rsv_add(root,
333 &root->fs_info->trans_block_rsv,
334 num_bytes, flush);
335 if (ret)
336 return ERR_PTR(ret);
337 }
338 again:
339 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
340 if (!h)
341 return ERR_PTR(-ENOMEM);
342
343 /*
344 * If we are JOIN_NOLOCK we're already committing a transaction and
345 * waiting on this guy, so we don't need to do the sb_start_intwrite
346 * because we're already holding a ref. We need this because we could
347 * have raced in and did an fsync() on a file which can kick a commit
348 * and then we deadlock with somebody doing a freeze.
349 *
350 * If we are ATTACH, it means we just want to catch the current
351 * transaction and commit it, so we needn't do sb_start_intwrite().
352 */
353 if (type < TRANS_JOIN_NOLOCK)
354 sb_start_intwrite(root->fs_info->sb);
355
356 if (may_wait_transaction(root, type))
357 wait_current_trans(root);
358
359 do {
360 ret = join_transaction(root, type);
361 if (ret == -EBUSY)
362 wait_current_trans(root);
363 } while (ret == -EBUSY);
364
365 if (ret < 0) {
366 /* We must get the transaction if we are JOIN_NOLOCK. */
367 BUG_ON(type == TRANS_JOIN_NOLOCK);
368
369 if (type < TRANS_JOIN_NOLOCK)
370 sb_end_intwrite(root->fs_info->sb);
371 kmem_cache_free(btrfs_trans_handle_cachep, h);
372 return ERR_PTR(ret);
373 }
374
375 cur_trans = root->fs_info->running_transaction;
376
377 h->transid = cur_trans->transid;
378 h->transaction = cur_trans;
379 h->blocks_used = 0;
380 h->bytes_reserved = 0;
381 h->root = root;
382 h->delayed_ref_updates = 0;
383 h->use_count = 1;
384 h->adding_csums = 0;
385 h->block_rsv = NULL;
386 h->orig_rsv = NULL;
387 h->aborted = 0;
388 h->qgroup_reserved = qgroup_reserved;
389 h->delayed_ref_elem.seq = 0;
390 h->type = type;
391 INIT_LIST_HEAD(&h->qgroup_ref_list);
392 INIT_LIST_HEAD(&h->new_bgs);
393
394 smp_mb();
395 if (cur_trans->blocked && may_wait_transaction(root, type)) {
396 btrfs_commit_transaction(h, root);
397 goto again;
398 }
399
400 if (num_bytes) {
401 trace_btrfs_space_reservation(root->fs_info, "transaction",
402 h->transid, num_bytes, 1);
403 h->block_rsv = &root->fs_info->trans_block_rsv;
404 h->bytes_reserved = num_bytes;
405 }
406
407 got_it:
408 btrfs_record_root_in_trans(h, root);
409
410 if (!current->journal_info && type != TRANS_USERSPACE)
411 current->journal_info = h;
412 return h;
413 }
414
415 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
416 int num_items)
417 {
418 return start_transaction(root, num_items, TRANS_START,
419 BTRFS_RESERVE_FLUSH_ALL);
420 }
421
422 struct btrfs_trans_handle *btrfs_start_transaction_lflush(
423 struct btrfs_root *root, int num_items)
424 {
425 return start_transaction(root, num_items, TRANS_START,
426 BTRFS_RESERVE_FLUSH_LIMIT);
427 }
428
429 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
430 {
431 return start_transaction(root, 0, TRANS_JOIN, 0);
432 }
433
434 struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
435 {
436 return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
437 }
438
439 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
440 {
441 return start_transaction(root, 0, TRANS_USERSPACE, 0);
442 }
443
444 struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
445 {
446 return start_transaction(root, 0, TRANS_ATTACH, 0);
447 }
448
449 /* wait for a transaction commit to be fully complete */
450 static noinline void wait_for_commit(struct btrfs_root *root,
451 struct btrfs_transaction *commit)
452 {
453 wait_event(commit->commit_wait, commit->commit_done);
454 }
455
456 int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
457 {
458 struct btrfs_transaction *cur_trans = NULL, *t;
459 int ret;
460
461 ret = 0;
462 if (transid) {
463 if (transid <= root->fs_info->last_trans_committed)
464 goto out;
465
466 /* find specified transaction */
467 spin_lock(&root->fs_info->trans_lock);
468 list_for_each_entry(t, &root->fs_info->trans_list, list) {
469 if (t->transid == transid) {
470 cur_trans = t;
471 atomic_inc(&cur_trans->use_count);
472 break;
473 }
474 if (t->transid > transid)
475 break;
476 }
477 spin_unlock(&root->fs_info->trans_lock);
478 ret = -EINVAL;
479 if (!cur_trans)
480 goto out; /* bad transid */
481 } else {
482 /* find newest transaction that is committing | committed */
483 spin_lock(&root->fs_info->trans_lock);
484 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
485 list) {
486 if (t->in_commit) {
487 if (t->commit_done)
488 break;
489 cur_trans = t;
490 atomic_inc(&cur_trans->use_count);
491 break;
492 }
493 }
494 spin_unlock(&root->fs_info->trans_lock);
495 if (!cur_trans)
496 goto out; /* nothing committing|committed */
497 }
498
499 wait_for_commit(root, cur_trans);
500
501 put_transaction(cur_trans);
502 ret = 0;
503 out:
504 return ret;
505 }
506
507 void btrfs_throttle(struct btrfs_root *root)
508 {
509 if (!atomic_read(&root->fs_info->open_ioctl_trans))
510 wait_current_trans(root);
511 }
512
513 static int should_end_transaction(struct btrfs_trans_handle *trans,
514 struct btrfs_root *root)
515 {
516 int ret;
517
518 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
519 return ret ? 1 : 0;
520 }
521
522 int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
523 struct btrfs_root *root)
524 {
525 struct btrfs_transaction *cur_trans = trans->transaction;
526 int updates;
527 int err;
528
529 smp_mb();
530 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
531 return 1;
532
533 updates = trans->delayed_ref_updates;
534 trans->delayed_ref_updates = 0;
535 if (updates) {
536 err = btrfs_run_delayed_refs(trans, root, updates);
537 if (err) /* Error code will also eval true */
538 return err;
539 }
540
541 return should_end_transaction(trans, root);
542 }
543
544 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
545 struct btrfs_root *root, int throttle)
546 {
547 struct btrfs_transaction *cur_trans = trans->transaction;
548 struct btrfs_fs_info *info = root->fs_info;
549 int count = 0;
550 int lock = (trans->type != TRANS_JOIN_NOLOCK);
551 int err = 0;
552
553 if (--trans->use_count) {
554 trans->block_rsv = trans->orig_rsv;
555 return 0;
556 }
557
558 /*
559 * do the qgroup accounting as early as possible
560 */
561 err = btrfs_delayed_refs_qgroup_accounting(trans, info);
562
563 btrfs_trans_release_metadata(trans, root);
564 trans->block_rsv = NULL;
565 /*
566 * the same root has to be passed to start_transaction and
567 * end_transaction. Subvolume quota depends on this.
568 */
569 WARN_ON(trans->root != root);
570
571 if (trans->qgroup_reserved) {
572 btrfs_qgroup_free(root, trans->qgroup_reserved);
573 trans->qgroup_reserved = 0;
574 }
575
576 if (!list_empty(&trans->new_bgs))
577 btrfs_create_pending_block_groups(trans, root);
578
579 while (count < 2) {
580 unsigned long cur = trans->delayed_ref_updates;
581 trans->delayed_ref_updates = 0;
582 if (cur &&
583 trans->transaction->delayed_refs.num_heads_ready > 64) {
584 trans->delayed_ref_updates = 0;
585 btrfs_run_delayed_refs(trans, root, cur);
586 } else {
587 break;
588 }
589 count++;
590 }
591 btrfs_trans_release_metadata(trans, root);
592 trans->block_rsv = NULL;
593
594 if (!list_empty(&trans->new_bgs))
595 btrfs_create_pending_block_groups(trans, root);
596
597 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
598 should_end_transaction(trans, root)) {
599 trans->transaction->blocked = 1;
600 smp_wmb();
601 }
602
603 if (lock && cur_trans->blocked && !cur_trans->in_commit) {
604 if (throttle) {
605 /*
606 * We may race with somebody else here so end up having
607 * to call end_transaction on ourselves again, so inc
608 * our use_count.
609 */
610 trans->use_count++;
611 return btrfs_commit_transaction(trans, root);
612 } else {
613 wake_up_process(info->transaction_kthread);
614 }
615 }
616
617 if (trans->type < TRANS_JOIN_NOLOCK)
618 sb_end_intwrite(root->fs_info->sb);
619
620 WARN_ON(cur_trans != info->running_transaction);
621 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
622 atomic_dec(&cur_trans->num_writers);
623
624 smp_mb();
625 if (waitqueue_active(&cur_trans->writer_wait))
626 wake_up(&cur_trans->writer_wait);
627 put_transaction(cur_trans);
628
629 if (current->journal_info == trans)
630 current->journal_info = NULL;
631
632 if (throttle)
633 btrfs_run_delayed_iputs(root);
634
635 if (trans->aborted ||
636 root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
637 err = -EIO;
638 }
639 assert_qgroups_uptodate(trans);
640
641 memset(trans, 0, sizeof(*trans));
642 kmem_cache_free(btrfs_trans_handle_cachep, trans);
643 return err;
644 }
645
646 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
647 struct btrfs_root *root)
648 {
649 int ret;
650
651 ret = __btrfs_end_transaction(trans, root, 0);
652 if (ret)
653 return ret;
654 return 0;
655 }
656
657 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
658 struct btrfs_root *root)
659 {
660 int ret;
661
662 ret = __btrfs_end_transaction(trans, root, 1);
663 if (ret)
664 return ret;
665 return 0;
666 }
667
668 int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
669 struct btrfs_root *root)
670 {
671 return __btrfs_end_transaction(trans, root, 1);
672 }
673
674 /*
675 * when btree blocks are allocated, they have some corresponding bits set for
676 * them in one of two extent_io trees. This is used to make sure all of
677 * those extents are sent to disk but does not wait on them
678 */
679 int btrfs_write_marked_extents(struct btrfs_root *root,
680 struct extent_io_tree *dirty_pages, int mark)
681 {
682 int err = 0;
683 int werr = 0;
684 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
685 struct extent_state *cached_state = NULL;
686 u64 start = 0;
687 u64 end;
688
689 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
690 mark, &cached_state)) {
691 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
692 mark, &cached_state, GFP_NOFS);
693 cached_state = NULL;
694 err = filemap_fdatawrite_range(mapping, start, end);
695 if (err)
696 werr = err;
697 cond_resched();
698 start = end + 1;
699 }
700 if (err)
701 werr = err;
702 return werr;
703 }
704
705 /*
706 * when btree blocks are allocated, they have some corresponding bits set for
707 * them in one of two extent_io trees. This is used to make sure all of
708 * those extents are on disk for transaction or log commit. We wait
709 * on all the pages and clear them from the dirty pages state tree
710 */
711 int btrfs_wait_marked_extents(struct btrfs_root *root,
712 struct extent_io_tree *dirty_pages, int mark)
713 {
714 int err = 0;
715 int werr = 0;
716 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
717 struct extent_state *cached_state = NULL;
718 u64 start = 0;
719 u64 end;
720
721 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
722 EXTENT_NEED_WAIT, &cached_state)) {
723 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
724 0, 0, &cached_state, GFP_NOFS);
725 err = filemap_fdatawait_range(mapping, start, end);
726 if (err)
727 werr = err;
728 cond_resched();
729 start = end + 1;
730 }
731 if (err)
732 werr = err;
733 return werr;
734 }
735
736 /*
737 * when btree blocks are allocated, they have some corresponding bits set for
738 * them in one of two extent_io trees. This is used to make sure all of
739 * those extents are on disk for transaction or log commit
740 */
741 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
742 struct extent_io_tree *dirty_pages, int mark)
743 {
744 int ret;
745 int ret2;
746
747 ret = btrfs_write_marked_extents(root, dirty_pages, mark);
748 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
749
750 if (ret)
751 return ret;
752 if (ret2)
753 return ret2;
754 return 0;
755 }
756
757 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
758 struct btrfs_root *root)
759 {
760 if (!trans || !trans->transaction) {
761 struct inode *btree_inode;
762 btree_inode = root->fs_info->btree_inode;
763 return filemap_write_and_wait(btree_inode->i_mapping);
764 }
765 return btrfs_write_and_wait_marked_extents(root,
766 &trans->transaction->dirty_pages,
767 EXTENT_DIRTY);
768 }
769
770 /*
771 * this is used to update the root pointer in the tree of tree roots.
772 *
773 * But, in the case of the extent allocation tree, updating the root
774 * pointer may allocate blocks which may change the root of the extent
775 * allocation tree.
776 *
777 * So, this loops and repeats and makes sure the cowonly root didn't
778 * change while the root pointer was being updated in the metadata.
779 */
780 static int update_cowonly_root(struct btrfs_trans_handle *trans,
781 struct btrfs_root *root)
782 {
783 int ret;
784 u64 old_root_bytenr;
785 u64 old_root_used;
786 struct btrfs_root *tree_root = root->fs_info->tree_root;
787
788 old_root_used = btrfs_root_used(&root->root_item);
789 btrfs_write_dirty_block_groups(trans, root);
790
791 while (1) {
792 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
793 if (old_root_bytenr == root->node->start &&
794 old_root_used == btrfs_root_used(&root->root_item))
795 break;
796
797 btrfs_set_root_node(&root->root_item, root->node);
798 ret = btrfs_update_root(trans, tree_root,
799 &root->root_key,
800 &root->root_item);
801 if (ret)
802 return ret;
803
804 old_root_used = btrfs_root_used(&root->root_item);
805 ret = btrfs_write_dirty_block_groups(trans, root);
806 if (ret)
807 return ret;
808 }
809
810 if (root != root->fs_info->extent_root)
811 switch_commit_root(root);
812
813 return 0;
814 }
815
816 /*
817 * update all the cowonly tree roots on disk
818 *
819 * The error handling in this function may not be obvious. Any of the
820 * failures will cause the file system to go offline. We still need
821 * to clean up the delayed refs.
822 */
823 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
824 struct btrfs_root *root)
825 {
826 struct btrfs_fs_info *fs_info = root->fs_info;
827 struct list_head *next;
828 struct extent_buffer *eb;
829 int ret;
830
831 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
832 if (ret)
833 return ret;
834
835 eb = btrfs_lock_root_node(fs_info->tree_root);
836 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
837 0, &eb);
838 btrfs_tree_unlock(eb);
839 free_extent_buffer(eb);
840
841 if (ret)
842 return ret;
843
844 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
845 if (ret)
846 return ret;
847
848 ret = btrfs_run_dev_stats(trans, root->fs_info);
849 WARN_ON(ret);
850 ret = btrfs_run_dev_replace(trans, root->fs_info);
851 WARN_ON(ret);
852
853 ret = btrfs_run_qgroups(trans, root->fs_info);
854 BUG_ON(ret);
855
856 /* run_qgroups might have added some more refs */
857 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
858 BUG_ON(ret);
859
860 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
861 next = fs_info->dirty_cowonly_roots.next;
862 list_del_init(next);
863 root = list_entry(next, struct btrfs_root, dirty_list);
864
865 ret = update_cowonly_root(trans, root);
866 if (ret)
867 return ret;
868 }
869
870 down_write(&fs_info->extent_commit_sem);
871 switch_commit_root(fs_info->extent_root);
872 up_write(&fs_info->extent_commit_sem);
873
874 btrfs_after_dev_replace_commit(fs_info);
875
876 return 0;
877 }
878
879 /*
880 * dead roots are old snapshots that need to be deleted. This allocates
881 * a dirty root struct and adds it into the list of dead roots that need to
882 * be deleted
883 */
884 int btrfs_add_dead_root(struct btrfs_root *root)
885 {
886 spin_lock(&root->fs_info->trans_lock);
887 list_add(&root->root_list, &root->fs_info->dead_roots);
888 spin_unlock(&root->fs_info->trans_lock);
889 return 0;
890 }
891
892 /*
893 * update all the cowonly tree roots on disk
894 */
895 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
896 struct btrfs_root *root)
897 {
898 struct btrfs_root *gang[8];
899 struct btrfs_fs_info *fs_info = root->fs_info;
900 int i;
901 int ret;
902 int err = 0;
903
904 spin_lock(&fs_info->fs_roots_radix_lock);
905 while (1) {
906 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
907 (void **)gang, 0,
908 ARRAY_SIZE(gang),
909 BTRFS_ROOT_TRANS_TAG);
910 if (ret == 0)
911 break;
912 for (i = 0; i < ret; i++) {
913 root = gang[i];
914 radix_tree_tag_clear(&fs_info->fs_roots_radix,
915 (unsigned long)root->root_key.objectid,
916 BTRFS_ROOT_TRANS_TAG);
917 spin_unlock(&fs_info->fs_roots_radix_lock);
918
919 btrfs_free_log(trans, root);
920 btrfs_update_reloc_root(trans, root);
921 btrfs_orphan_commit_root(trans, root);
922
923 btrfs_save_ino_cache(root, trans);
924
925 /* see comments in should_cow_block() */
926 root->force_cow = 0;
927 smp_wmb();
928
929 if (root->commit_root != root->node) {
930 mutex_lock(&root->fs_commit_mutex);
931 switch_commit_root(root);
932 btrfs_unpin_free_ino(root);
933 mutex_unlock(&root->fs_commit_mutex);
934
935 btrfs_set_root_node(&root->root_item,
936 root->node);
937 }
938
939 err = btrfs_update_root(trans, fs_info->tree_root,
940 &root->root_key,
941 &root->root_item);
942 spin_lock(&fs_info->fs_roots_radix_lock);
943 if (err)
944 break;
945 }
946 }
947 spin_unlock(&fs_info->fs_roots_radix_lock);
948 return err;
949 }
950
951 /*
952 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
953 * otherwise every leaf in the btree is read and defragged.
954 */
955 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
956 {
957 struct btrfs_fs_info *info = root->fs_info;
958 struct btrfs_trans_handle *trans;
959 int ret;
960
961 if (xchg(&root->defrag_running, 1))
962 return 0;
963
964 while (1) {
965 trans = btrfs_start_transaction(root, 0);
966 if (IS_ERR(trans))
967 return PTR_ERR(trans);
968
969 ret = btrfs_defrag_leaves(trans, root, cacheonly);
970
971 btrfs_end_transaction(trans, root);
972 btrfs_btree_balance_dirty(info->tree_root);
973 cond_resched();
974
975 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
976 break;
977 }
978 root->defrag_running = 0;
979 return ret;
980 }
981
982 /*
983 * new snapshots need to be created at a very specific time in the
984 * transaction commit. This does the actual creation
985 */
986 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
987 struct btrfs_fs_info *fs_info,
988 struct btrfs_pending_snapshot *pending)
989 {
990 struct btrfs_key key;
991 struct btrfs_root_item *new_root_item;
992 struct btrfs_root *tree_root = fs_info->tree_root;
993 struct btrfs_root *root = pending->root;
994 struct btrfs_root *parent_root;
995 struct btrfs_block_rsv *rsv;
996 struct inode *parent_inode;
997 struct btrfs_path *path;
998 struct btrfs_dir_item *dir_item;
999 struct dentry *parent;
1000 struct dentry *dentry;
1001 struct extent_buffer *tmp;
1002 struct extent_buffer *old;
1003 struct timespec cur_time = CURRENT_TIME;
1004 int ret;
1005 u64 to_reserve = 0;
1006 u64 index = 0;
1007 u64 objectid;
1008 u64 root_flags;
1009 uuid_le new_uuid;
1010
1011 path = btrfs_alloc_path();
1012 if (!path) {
1013 ret = pending->error = -ENOMEM;
1014 goto path_alloc_fail;
1015 }
1016
1017 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
1018 if (!new_root_item) {
1019 ret = pending->error = -ENOMEM;
1020 goto root_item_alloc_fail;
1021 }
1022
1023 ret = btrfs_find_free_objectid(tree_root, &objectid);
1024 if (ret) {
1025 pending->error = ret;
1026 goto no_free_objectid;
1027 }
1028
1029 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
1030
1031 if (to_reserve > 0) {
1032 ret = btrfs_block_rsv_add(root, &pending->block_rsv,
1033 to_reserve,
1034 BTRFS_RESERVE_NO_FLUSH);
1035 if (ret) {
1036 pending->error = ret;
1037 goto no_free_objectid;
1038 }
1039 }
1040
1041 ret = btrfs_qgroup_inherit(trans, fs_info, root->root_key.objectid,
1042 objectid, pending->inherit);
1043 if (ret) {
1044 pending->error = ret;
1045 goto no_free_objectid;
1046 }
1047
1048 key.objectid = objectid;
1049 key.offset = (u64)-1;
1050 key.type = BTRFS_ROOT_ITEM_KEY;
1051
1052 rsv = trans->block_rsv;
1053 trans->block_rsv = &pending->block_rsv;
1054
1055 dentry = pending->dentry;
1056 parent = dget_parent(dentry);
1057 parent_inode = parent->d_inode;
1058 parent_root = BTRFS_I(parent_inode)->root;
1059 record_root_in_trans(trans, parent_root);
1060
1061 /*
1062 * insert the directory item
1063 */
1064 ret = btrfs_set_inode_index(parent_inode, &index);
1065 BUG_ON(ret); /* -ENOMEM */
1066
1067 /* check if there is a file/dir which has the same name. */
1068 dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
1069 btrfs_ino(parent_inode),
1070 dentry->d_name.name,
1071 dentry->d_name.len, 0);
1072 if (dir_item != NULL && !IS_ERR(dir_item)) {
1073 pending->error = -EEXIST;
1074 goto fail;
1075 } else if (IS_ERR(dir_item)) {
1076 ret = PTR_ERR(dir_item);
1077 btrfs_abort_transaction(trans, root, ret);
1078 goto fail;
1079 }
1080 btrfs_release_path(path);
1081
1082 /*
1083 * pull in the delayed directory update
1084 * and the delayed inode item
1085 * otherwise we corrupt the FS during
1086 * snapshot
1087 */
1088 ret = btrfs_run_delayed_items(trans, root);
1089 if (ret) { /* Transaction aborted */
1090 btrfs_abort_transaction(trans, root, ret);
1091 goto fail;
1092 }
1093
1094 record_root_in_trans(trans, root);
1095 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1096 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
1097 btrfs_check_and_init_root_item(new_root_item);
1098
1099 root_flags = btrfs_root_flags(new_root_item);
1100 if (pending->readonly)
1101 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1102 else
1103 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1104 btrfs_set_root_flags(new_root_item, root_flags);
1105
1106 btrfs_set_root_generation_v2(new_root_item,
1107 trans->transid);
1108 uuid_le_gen(&new_uuid);
1109 memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
1110 memcpy(new_root_item->parent_uuid, root->root_item.uuid,
1111 BTRFS_UUID_SIZE);
1112 new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec);
1113 new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec);
1114 btrfs_set_root_otransid(new_root_item, trans->transid);
1115 memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
1116 memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
1117 btrfs_set_root_stransid(new_root_item, 0);
1118 btrfs_set_root_rtransid(new_root_item, 0);
1119
1120 old = btrfs_lock_root_node(root);
1121 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
1122 if (ret) {
1123 btrfs_tree_unlock(old);
1124 free_extent_buffer(old);
1125 btrfs_abort_transaction(trans, root, ret);
1126 goto fail;
1127 }
1128
1129 btrfs_set_lock_blocking(old);
1130
1131 ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
1132 /* clean up in any case */
1133 btrfs_tree_unlock(old);
1134 free_extent_buffer(old);
1135 if (ret) {
1136 btrfs_abort_transaction(trans, root, ret);
1137 goto fail;
1138 }
1139
1140 /* see comments in should_cow_block() */
1141 root->force_cow = 1;
1142 smp_wmb();
1143
1144 btrfs_set_root_node(new_root_item, tmp);
1145 /* record when the snapshot was created in key.offset */
1146 key.offset = trans->transid;
1147 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
1148 btrfs_tree_unlock(tmp);
1149 free_extent_buffer(tmp);
1150 if (ret) {
1151 btrfs_abort_transaction(trans, root, ret);
1152 goto fail;
1153 }
1154
1155 /*
1156 * insert root back/forward references
1157 */
1158 ret = btrfs_add_root_ref(trans, tree_root, objectid,
1159 parent_root->root_key.objectid,
1160 btrfs_ino(parent_inode), index,
1161 dentry->d_name.name, dentry->d_name.len);
1162 if (ret) {
1163 btrfs_abort_transaction(trans, root, ret);
1164 goto fail;
1165 }
1166
1167 key.offset = (u64)-1;
1168 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
1169 if (IS_ERR(pending->snap)) {
1170 ret = PTR_ERR(pending->snap);
1171 btrfs_abort_transaction(trans, root, ret);
1172 goto fail;
1173 }
1174
1175 ret = btrfs_reloc_post_snapshot(trans, pending);
1176 if (ret) {
1177 btrfs_abort_transaction(trans, root, ret);
1178 goto fail;
1179 }
1180
1181 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1182 if (ret) {
1183 btrfs_abort_transaction(trans, root, ret);
1184 goto fail;
1185 }
1186
1187 ret = btrfs_insert_dir_item(trans, parent_root,
1188 dentry->d_name.name, dentry->d_name.len,
1189 parent_inode, &key,
1190 BTRFS_FT_DIR, index);
1191 /* We have check then name at the beginning, so it is impossible. */
1192 BUG_ON(ret == -EEXIST);
1193 if (ret) {
1194 btrfs_abort_transaction(trans, root, ret);
1195 goto fail;
1196 }
1197
1198 btrfs_i_size_write(parent_inode, parent_inode->i_size +
1199 dentry->d_name.len * 2);
1200 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1201 ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
1202 if (ret)
1203 btrfs_abort_transaction(trans, root, ret);
1204 fail:
1205 dput(parent);
1206 trans->block_rsv = rsv;
1207 no_free_objectid:
1208 kfree(new_root_item);
1209 root_item_alloc_fail:
1210 btrfs_free_path(path);
1211 path_alloc_fail:
1212 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
1213 return ret;
1214 }
1215
1216 /*
1217 * create all the snapshots we've scheduled for creation
1218 */
1219 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1220 struct btrfs_fs_info *fs_info)
1221 {
1222 struct btrfs_pending_snapshot *pending;
1223 struct list_head *head = &trans->transaction->pending_snapshots;
1224
1225 list_for_each_entry(pending, head, list)
1226 create_pending_snapshot(trans, fs_info, pending);
1227 return 0;
1228 }
1229
1230 static void update_super_roots(struct btrfs_root *root)
1231 {
1232 struct btrfs_root_item *root_item;
1233 struct btrfs_super_block *super;
1234
1235 super = root->fs_info->super_copy;
1236
1237 root_item = &root->fs_info->chunk_root->root_item;
1238 super->chunk_root = root_item->bytenr;
1239 super->chunk_root_generation = root_item->generation;
1240 super->chunk_root_level = root_item->level;
1241
1242 root_item = &root->fs_info->tree_root->root_item;
1243 super->root = root_item->bytenr;
1244 super->generation = root_item->generation;
1245 super->root_level = root_item->level;
1246 if (btrfs_test_opt(root, SPACE_CACHE))
1247 super->cache_generation = root_item->generation;
1248 }
1249
1250 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1251 {
1252 int ret = 0;
1253 spin_lock(&info->trans_lock);
1254 if (info->running_transaction)
1255 ret = info->running_transaction->in_commit;
1256 spin_unlock(&info->trans_lock);
1257 return ret;
1258 }
1259
1260 int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1261 {
1262 int ret = 0;
1263 spin_lock(&info->trans_lock);
1264 if (info->running_transaction)
1265 ret = info->running_transaction->blocked;
1266 spin_unlock(&info->trans_lock);
1267 return ret;
1268 }
1269
1270 /*
1271 * wait for the current transaction commit to start and block subsequent
1272 * transaction joins
1273 */
1274 static void wait_current_trans_commit_start(struct btrfs_root *root,
1275 struct btrfs_transaction *trans)
1276 {
1277 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
1278 }
1279
1280 /*
1281 * wait for the current transaction to start and then become unblocked.
1282 * caller holds ref.
1283 */
1284 static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1285 struct btrfs_transaction *trans)
1286 {
1287 wait_event(root->fs_info->transaction_wait,
1288 trans->commit_done || (trans->in_commit && !trans->blocked));
1289 }
1290
1291 /*
1292 * commit transactions asynchronously. once btrfs_commit_transaction_async
1293 * returns, any subsequent transaction will not be allowed to join.
1294 */
1295 struct btrfs_async_commit {
1296 struct btrfs_trans_handle *newtrans;
1297 struct btrfs_root *root;
1298 struct delayed_work work;
1299 };
1300
1301 static void do_async_commit(struct work_struct *work)
1302 {
1303 struct btrfs_async_commit *ac =
1304 container_of(work, struct btrfs_async_commit, work.work);
1305
1306 /*
1307 * We've got freeze protection passed with the transaction.
1308 * Tell lockdep about it.
1309 */
1310 rwsem_acquire_read(
1311 &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1312 0, 1, _THIS_IP_);
1313
1314 current->journal_info = ac->newtrans;
1315
1316 btrfs_commit_transaction(ac->newtrans, ac->root);
1317 kfree(ac);
1318 }
1319
1320 int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1321 struct btrfs_root *root,
1322 int wait_for_unblock)
1323 {
1324 struct btrfs_async_commit *ac;
1325 struct btrfs_transaction *cur_trans;
1326
1327 ac = kmalloc(sizeof(*ac), GFP_NOFS);
1328 if (!ac)
1329 return -ENOMEM;
1330
1331 INIT_DELAYED_WORK(&ac->work, do_async_commit);
1332 ac->root = root;
1333 ac->newtrans = btrfs_join_transaction(root);
1334 if (IS_ERR(ac->newtrans)) {
1335 int err = PTR_ERR(ac->newtrans);
1336 kfree(ac);
1337 return err;
1338 }
1339
1340 /* take transaction reference */
1341 cur_trans = trans->transaction;
1342 atomic_inc(&cur_trans->use_count);
1343
1344 btrfs_end_transaction(trans, root);
1345
1346 /*
1347 * Tell lockdep we've released the freeze rwsem, since the
1348 * async commit thread will be the one to unlock it.
1349 */
1350 rwsem_release(&root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
1351 1, _THIS_IP_);
1352
1353 schedule_delayed_work(&ac->work, 0);
1354
1355 /* wait for transaction to start and unblock */
1356 if (wait_for_unblock)
1357 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1358 else
1359 wait_current_trans_commit_start(root, cur_trans);
1360
1361 if (current->journal_info == trans)
1362 current->journal_info = NULL;
1363
1364 put_transaction(cur_trans);
1365 return 0;
1366 }
1367
1368
1369 static void cleanup_transaction(struct btrfs_trans_handle *trans,
1370 struct btrfs_root *root, int err)
1371 {
1372 struct btrfs_transaction *cur_trans = trans->transaction;
1373
1374 WARN_ON(trans->use_count > 1);
1375
1376 btrfs_abort_transaction(trans, root, err);
1377
1378 spin_lock(&root->fs_info->trans_lock);
1379 list_del_init(&cur_trans->list);
1380 if (cur_trans == root->fs_info->running_transaction) {
1381 root->fs_info->running_transaction = NULL;
1382 root->fs_info->trans_no_join = 0;
1383 }
1384 spin_unlock(&root->fs_info->trans_lock);
1385
1386 btrfs_cleanup_one_transaction(trans->transaction, root);
1387
1388 put_transaction(cur_trans);
1389 put_transaction(cur_trans);
1390
1391 trace_btrfs_transaction_commit(root);
1392
1393 btrfs_scrub_continue(root);
1394
1395 if (current->journal_info == trans)
1396 current->journal_info = NULL;
1397
1398 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1399 }
1400
1401 static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
1402 struct btrfs_root *root)
1403 {
1404 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
1405 int snap_pending = 0;
1406 int ret;
1407
1408 if (!flush_on_commit) {
1409 spin_lock(&root->fs_info->trans_lock);
1410 if (!list_empty(&trans->transaction->pending_snapshots))
1411 snap_pending = 1;
1412 spin_unlock(&root->fs_info->trans_lock);
1413 }
1414
1415 if (flush_on_commit || snap_pending) {
1416 btrfs_start_delalloc_inodes(root, 1);
1417 btrfs_wait_ordered_extents(root, 1);
1418 }
1419
1420 ret = btrfs_run_delayed_items(trans, root);
1421 if (ret)
1422 return ret;
1423
1424 /*
1425 * running the delayed items may have added new refs. account
1426 * them now so that they hinder processing of more delayed refs
1427 * as little as possible.
1428 */
1429 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1430
1431 /*
1432 * rename don't use btrfs_join_transaction, so, once we
1433 * set the transaction to blocked above, we aren't going
1434 * to get any new ordered operations. We can safely run
1435 * it here and no for sure that nothing new will be added
1436 * to the list
1437 */
1438 btrfs_run_ordered_operations(root, 1);
1439
1440 return 0;
1441 }
1442
1443 /*
1444 * btrfs_transaction state sequence:
1445 * in_commit = 0, blocked = 0 (initial)
1446 * in_commit = 1, blocked = 1
1447 * blocked = 0
1448 * commit_done = 1
1449 */
1450 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1451 struct btrfs_root *root)
1452 {
1453 unsigned long joined = 0;
1454 struct btrfs_transaction *cur_trans = trans->transaction;
1455 struct btrfs_transaction *prev_trans = NULL;
1456 DEFINE_WAIT(wait);
1457 int ret;
1458 int should_grow = 0;
1459 unsigned long now = get_seconds();
1460
1461 ret = btrfs_run_ordered_operations(root, 0);
1462 if (ret) {
1463 btrfs_abort_transaction(trans, root, ret);
1464 goto cleanup_transaction;
1465 }
1466
1467 if (cur_trans->aborted) {
1468 ret = cur_trans->aborted;
1469 goto cleanup_transaction;
1470 }
1471
1472 /* make a pass through all the delayed refs we have so far
1473 * any runnings procs may add more while we are here
1474 */
1475 ret = btrfs_run_delayed_refs(trans, root, 0);
1476 if (ret)
1477 goto cleanup_transaction;
1478
1479 btrfs_trans_release_metadata(trans, root);
1480 trans->block_rsv = NULL;
1481
1482 cur_trans = trans->transaction;
1483
1484 /*
1485 * set the flushing flag so procs in this transaction have to
1486 * start sending their work down.
1487 */
1488 cur_trans->delayed_refs.flushing = 1;
1489
1490 if (!list_empty(&trans->new_bgs))
1491 btrfs_create_pending_block_groups(trans, root);
1492
1493 ret = btrfs_run_delayed_refs(trans, root, 0);
1494 if (ret)
1495 goto cleanup_transaction;
1496
1497 spin_lock(&cur_trans->commit_lock);
1498 if (cur_trans->in_commit) {
1499 spin_unlock(&cur_trans->commit_lock);
1500 atomic_inc(&cur_trans->use_count);
1501 ret = btrfs_end_transaction(trans, root);
1502
1503 wait_for_commit(root, cur_trans);
1504
1505 put_transaction(cur_trans);
1506
1507 return ret;
1508 }
1509
1510 trans->transaction->in_commit = 1;
1511 trans->transaction->blocked = 1;
1512 spin_unlock(&cur_trans->commit_lock);
1513 wake_up(&root->fs_info->transaction_blocked_wait);
1514
1515 spin_lock(&root->fs_info->trans_lock);
1516 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1517 prev_trans = list_entry(cur_trans->list.prev,
1518 struct btrfs_transaction, list);
1519 if (!prev_trans->commit_done) {
1520 atomic_inc(&prev_trans->use_count);
1521 spin_unlock(&root->fs_info->trans_lock);
1522
1523 wait_for_commit(root, prev_trans);
1524
1525 put_transaction(prev_trans);
1526 } else {
1527 spin_unlock(&root->fs_info->trans_lock);
1528 }
1529 } else {
1530 spin_unlock(&root->fs_info->trans_lock);
1531 }
1532
1533 if (!btrfs_test_opt(root, SSD) &&
1534 (now < cur_trans->start_time || now - cur_trans->start_time < 1))
1535 should_grow = 1;
1536
1537 do {
1538 joined = cur_trans->num_joined;
1539
1540 WARN_ON(cur_trans != trans->transaction);
1541
1542 ret = btrfs_flush_all_pending_stuffs(trans, root);
1543 if (ret)
1544 goto cleanup_transaction;
1545
1546 prepare_to_wait(&cur_trans->writer_wait, &wait,
1547 TASK_UNINTERRUPTIBLE);
1548
1549 if (atomic_read(&cur_trans->num_writers) > 1)
1550 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1551 else if (should_grow)
1552 schedule_timeout(1);
1553
1554 finish_wait(&cur_trans->writer_wait, &wait);
1555 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1556 (should_grow && cur_trans->num_joined != joined));
1557
1558 ret = btrfs_flush_all_pending_stuffs(trans, root);
1559 if (ret)
1560 goto cleanup_transaction;
1561
1562 /*
1563 * Ok now we need to make sure to block out any other joins while we
1564 * commit the transaction. We could have started a join before setting
1565 * no_join so make sure to wait for num_writers to == 1 again.
1566 */
1567 spin_lock(&root->fs_info->trans_lock);
1568 root->fs_info->trans_no_join = 1;
1569 spin_unlock(&root->fs_info->trans_lock);
1570 wait_event(cur_trans->writer_wait,
1571 atomic_read(&cur_trans->num_writers) == 1);
1572
1573 /*
1574 * the reloc mutex makes sure that we stop
1575 * the balancing code from coming in and moving
1576 * extents around in the middle of the commit
1577 */
1578 mutex_lock(&root->fs_info->reloc_mutex);
1579
1580 /*
1581 * We needn't worry about the delayed items because we will
1582 * deal with them in create_pending_snapshot(), which is the
1583 * core function of the snapshot creation.
1584 */
1585 ret = create_pending_snapshots(trans, root->fs_info);
1586 if (ret) {
1587 mutex_unlock(&root->fs_info->reloc_mutex);
1588 goto cleanup_transaction;
1589 }
1590
1591 /*
1592 * We insert the dir indexes of the snapshots and update the inode
1593 * of the snapshots' parents after the snapshot creation, so there
1594 * are some delayed items which are not dealt with. Now deal with
1595 * them.
1596 *
1597 * We needn't worry that this operation will corrupt the snapshots,
1598 * because all the tree which are snapshoted will be forced to COW
1599 * the nodes and leaves.
1600 */
1601 ret = btrfs_run_delayed_items(trans, root);
1602 if (ret) {
1603 mutex_unlock(&root->fs_info->reloc_mutex);
1604 goto cleanup_transaction;
1605 }
1606
1607 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1608 if (ret) {
1609 mutex_unlock(&root->fs_info->reloc_mutex);
1610 goto cleanup_transaction;
1611 }
1612
1613 /*
1614 * make sure none of the code above managed to slip in a
1615 * delayed item
1616 */
1617 btrfs_assert_delayed_root_empty(root);
1618
1619 WARN_ON(cur_trans != trans->transaction);
1620
1621 btrfs_scrub_pause(root);
1622 /* btrfs_commit_tree_roots is responsible for getting the
1623 * various roots consistent with each other. Every pointer
1624 * in the tree of tree roots has to point to the most up to date
1625 * root for every subvolume and other tree. So, we have to keep
1626 * the tree logging code from jumping in and changing any
1627 * of the trees.
1628 *
1629 * At this point in the commit, there can't be any tree-log
1630 * writers, but a little lower down we drop the trans mutex
1631 * and let new people in. By holding the tree_log_mutex
1632 * from now until after the super is written, we avoid races
1633 * with the tree-log code.
1634 */
1635 mutex_lock(&root->fs_info->tree_log_mutex);
1636
1637 ret = commit_fs_roots(trans, root);
1638 if (ret) {
1639 mutex_unlock(&root->fs_info->tree_log_mutex);
1640 mutex_unlock(&root->fs_info->reloc_mutex);
1641 goto cleanup_transaction;
1642 }
1643
1644 /* commit_fs_roots gets rid of all the tree log roots, it is now
1645 * safe to free the root of tree log roots
1646 */
1647 btrfs_free_log_root_tree(trans, root->fs_info);
1648
1649 ret = commit_cowonly_roots(trans, root);
1650 if (ret) {
1651 mutex_unlock(&root->fs_info->tree_log_mutex);
1652 mutex_unlock(&root->fs_info->reloc_mutex);
1653 goto cleanup_transaction;
1654 }
1655
1656 btrfs_prepare_extent_commit(trans, root);
1657
1658 cur_trans = root->fs_info->running_transaction;
1659
1660 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1661 root->fs_info->tree_root->node);
1662 switch_commit_root(root->fs_info->tree_root);
1663
1664 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1665 root->fs_info->chunk_root->node);
1666 switch_commit_root(root->fs_info->chunk_root);
1667
1668 assert_qgroups_uptodate(trans);
1669 update_super_roots(root);
1670
1671 if (!root->fs_info->log_root_recovering) {
1672 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1673 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
1674 }
1675
1676 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1677 sizeof(*root->fs_info->super_copy));
1678
1679 trans->transaction->blocked = 0;
1680 spin_lock(&root->fs_info->trans_lock);
1681 root->fs_info->running_transaction = NULL;
1682 root->fs_info->trans_no_join = 0;
1683 spin_unlock(&root->fs_info->trans_lock);
1684 mutex_unlock(&root->fs_info->reloc_mutex);
1685
1686 wake_up(&root->fs_info->transaction_wait);
1687
1688 ret = btrfs_write_and_wait_transaction(trans, root);
1689 if (ret) {
1690 btrfs_error(root->fs_info, ret,
1691 "Error while writing out transaction.");
1692 mutex_unlock(&root->fs_info->tree_log_mutex);
1693 goto cleanup_transaction;
1694 }
1695
1696 ret = write_ctree_super(trans, root, 0);
1697 if (ret) {
1698 mutex_unlock(&root->fs_info->tree_log_mutex);
1699 goto cleanup_transaction;
1700 }
1701
1702 /*
1703 * the super is written, we can safely allow the tree-loggers
1704 * to go about their business
1705 */
1706 mutex_unlock(&root->fs_info->tree_log_mutex);
1707
1708 btrfs_finish_extent_commit(trans, root);
1709
1710 cur_trans->commit_done = 1;
1711
1712 root->fs_info->last_trans_committed = cur_trans->transid;
1713
1714 wake_up(&cur_trans->commit_wait);
1715
1716 spin_lock(&root->fs_info->trans_lock);
1717 list_del_init(&cur_trans->list);
1718 spin_unlock(&root->fs_info->trans_lock);
1719
1720 put_transaction(cur_trans);
1721 put_transaction(cur_trans);
1722
1723 if (trans->type < TRANS_JOIN_NOLOCK)
1724 sb_end_intwrite(root->fs_info->sb);
1725
1726 trace_btrfs_transaction_commit(root);
1727
1728 btrfs_scrub_continue(root);
1729
1730 if (current->journal_info == trans)
1731 current->journal_info = NULL;
1732
1733 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1734
1735 if (current != root->fs_info->transaction_kthread)
1736 btrfs_run_delayed_iputs(root);
1737
1738 return ret;
1739
1740 cleanup_transaction:
1741 btrfs_trans_release_metadata(trans, root);
1742 trans->block_rsv = NULL;
1743 btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1744 // WARN_ON(1);
1745 if (current->journal_info == trans)
1746 current->journal_info = NULL;
1747 cleanup_transaction(trans, root, ret);
1748
1749 return ret;
1750 }
1751
1752 /*
1753 * interface function to delete all the snapshots we have scheduled for deletion
1754 */
1755 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1756 {
1757 LIST_HEAD(list);
1758 struct btrfs_fs_info *fs_info = root->fs_info;
1759
1760 spin_lock(&fs_info->trans_lock);
1761 list_splice_init(&fs_info->dead_roots, &list);
1762 spin_unlock(&fs_info->trans_lock);
1763
1764 while (!list_empty(&list)) {
1765 int ret;
1766
1767 root = list_entry(list.next, struct btrfs_root, root_list);
1768 list_del(&root->root_list);
1769
1770 btrfs_kill_all_delayed_nodes(root);
1771
1772 if (btrfs_header_backref_rev(root->node) <
1773 BTRFS_MIXED_BACKREF_REV)
1774 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
1775 else
1776 ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1777 BUG_ON(ret < 0);
1778 }
1779 return 0;
1780 }
This page took 0.126239 seconds and 5 git commands to generate.