Btrfs: quota tree support and startup
[deliverable/linux.git] / fs / btrfs / transaction.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
79154b1b 19#include <linux/fs.h>
5a0e3ad6 20#include <linux/slab.h>
34088780 21#include <linux/sched.h>
d3c2fdcf 22#include <linux/writeback.h>
5f39d397 23#include <linux/pagemap.h>
5f2cc086 24#include <linux/blkdev.h>
79154b1b
CM
25#include "ctree.h"
26#include "disk-io.h"
27#include "transaction.h"
925baedd 28#include "locking.h"
e02119d5 29#include "tree-log.h"
581bb050 30#include "inode-map.h"
733f4fbb 31#include "volumes.h"
79154b1b 32
0f7d52f4
CM
33#define BTRFS_ROOT_TRANS_TAG 0
34
49b25e05 35void put_transaction(struct btrfs_transaction *transaction)
79154b1b 36{
13c5a93e
JB
37 WARN_ON(atomic_read(&transaction->use_count) == 0);
38 if (atomic_dec_and_test(&transaction->use_count)) {
a4abeea4 39 BUG_ON(!list_empty(&transaction->list));
00f04b88 40 WARN_ON(transaction->delayed_refs.root.rb_node);
2c90e5d6
CM
41 memset(transaction, 0, sizeof(*transaction));
42 kmem_cache_free(btrfs_transaction_cachep, transaction);
78fae27e 43 }
79154b1b
CM
44}
45
817d52f8
JB
46static noinline void switch_commit_root(struct btrfs_root *root)
47{
817d52f8
JB
48 free_extent_buffer(root->commit_root);
49 root->commit_root = btrfs_root_node(root);
817d52f8
JB
50}
51
d352ac68
CM
52/*
53 * either allocate a new transaction or hop into the existing one
54 */
a4abeea4 55static noinline int join_transaction(struct btrfs_root *root, int nofail)
79154b1b
CM
56{
57 struct btrfs_transaction *cur_trans;
19ae4e81 58 struct btrfs_fs_info *fs_info = root->fs_info;
a4abeea4 59
19ae4e81 60 spin_lock(&fs_info->trans_lock);
d43317dc 61loop:
49b25e05 62 /* The file system has been taken offline. No new transactions. */
19ae4e81
JS
63 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
64 spin_unlock(&fs_info->trans_lock);
49b25e05
JM
65 return -EROFS;
66 }
67
19ae4e81 68 if (fs_info->trans_no_join) {
a4abeea4 69 if (!nofail) {
19ae4e81 70 spin_unlock(&fs_info->trans_lock);
a4abeea4
JB
71 return -EBUSY;
72 }
73 }
74
19ae4e81 75 cur_trans = fs_info->running_transaction;
a4abeea4 76 if (cur_trans) {
871383be 77 if (cur_trans->aborted) {
19ae4e81 78 spin_unlock(&fs_info->trans_lock);
49b25e05 79 return cur_trans->aborted;
871383be 80 }
a4abeea4 81 atomic_inc(&cur_trans->use_count);
13c5a93e 82 atomic_inc(&cur_trans->num_writers);
15ee9bc7 83 cur_trans->num_joined++;
19ae4e81 84 spin_unlock(&fs_info->trans_lock);
a4abeea4 85 return 0;
79154b1b 86 }
19ae4e81 87 spin_unlock(&fs_info->trans_lock);
a4abeea4
JB
88
89 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
90 if (!cur_trans)
91 return -ENOMEM;
d43317dc 92
19ae4e81
JS
93 spin_lock(&fs_info->trans_lock);
94 if (fs_info->running_transaction) {
d43317dc
CM
95 /*
96 * someone started a transaction after we unlocked. Make sure
97 * to redo the trans_no_join checks above
98 */
a4abeea4 99 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
19ae4e81 100 cur_trans = fs_info->running_transaction;
d43317dc 101 goto loop;
7b8b92af
JB
102 } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
103 spin_unlock(&root->fs_info->trans_lock);
104 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
105 return -EROFS;
79154b1b 106 }
d43317dc 107
a4abeea4
JB
108 atomic_set(&cur_trans->num_writers, 1);
109 cur_trans->num_joined = 0;
110 init_waitqueue_head(&cur_trans->writer_wait);
111 init_waitqueue_head(&cur_trans->commit_wait);
112 cur_trans->in_commit = 0;
113 cur_trans->blocked = 0;
114 /*
115 * One for this trans handle, one so it will live on until we
116 * commit the transaction.
117 */
118 atomic_set(&cur_trans->use_count, 2);
119 cur_trans->commit_done = 0;
120 cur_trans->start_time = get_seconds();
121
122 cur_trans->delayed_refs.root = RB_ROOT;
123 cur_trans->delayed_refs.num_entries = 0;
124 cur_trans->delayed_refs.num_heads_ready = 0;
125 cur_trans->delayed_refs.num_heads = 0;
126 cur_trans->delayed_refs.flushing = 0;
127 cur_trans->delayed_refs.run_delayed_start = 0;
20b297d6
JS
128
129 /*
130 * although the tree mod log is per file system and not per transaction,
131 * the log must never go across transaction boundaries.
132 */
133 smp_mb();
134 if (!list_empty(&fs_info->tree_mod_seq_list)) {
135 printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
136 "creating a fresh transaction\n");
137 WARN_ON(1);
138 }
139 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
140 printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
141 "creating a fresh transaction\n");
142 WARN_ON(1);
143 }
144 atomic_set(&fs_info->tree_mod_seq, 0);
145
a4abeea4
JB
146 spin_lock_init(&cur_trans->commit_lock);
147 spin_lock_init(&cur_trans->delayed_refs.lock);
148
149 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
19ae4e81 150 list_add_tail(&cur_trans->list, &fs_info->trans_list);
a4abeea4 151 extent_io_tree_init(&cur_trans->dirty_pages,
19ae4e81
JS
152 fs_info->btree_inode->i_mapping);
153 fs_info->generation++;
154 cur_trans->transid = fs_info->generation;
155 fs_info->running_transaction = cur_trans;
49b25e05 156 cur_trans->aborted = 0;
19ae4e81 157 spin_unlock(&fs_info->trans_lock);
15ee9bc7 158
79154b1b
CM
159 return 0;
160}
161
d352ac68 162/*
d397712b
CM
163 * this does all the record keeping required to make sure that a reference
164 * counted root is properly recorded in a given transaction. This is required
165 * to make sure the old root from before we joined the transaction is deleted
166 * when the transaction commits
d352ac68 167 */
7585717f 168static int record_root_in_trans(struct btrfs_trans_handle *trans,
a4abeea4 169 struct btrfs_root *root)
6702ed49 170{
5d4f98a2 171 if (root->ref_cows && root->last_trans < trans->transid) {
6702ed49 172 WARN_ON(root == root->fs_info->extent_root);
5d4f98a2
YZ
173 WARN_ON(root->commit_root != root->node);
174
7585717f
CM
175 /*
176 * see below for in_trans_setup usage rules
177 * we have the reloc mutex held now, so there
178 * is only one writer in this function
179 */
180 root->in_trans_setup = 1;
181
182 /* make sure readers find in_trans_setup before
183 * they find our root->last_trans update
184 */
185 smp_wmb();
186
a4abeea4
JB
187 spin_lock(&root->fs_info->fs_roots_radix_lock);
188 if (root->last_trans == trans->transid) {
189 spin_unlock(&root->fs_info->fs_roots_radix_lock);
190 return 0;
191 }
5d4f98a2
YZ
192 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
193 (unsigned long)root->root_key.objectid,
194 BTRFS_ROOT_TRANS_TAG);
a4abeea4 195 spin_unlock(&root->fs_info->fs_roots_radix_lock);
7585717f
CM
196 root->last_trans = trans->transid;
197
198 /* this is pretty tricky. We don't want to
199 * take the relocation lock in btrfs_record_root_in_trans
200 * unless we're really doing the first setup for this root in
201 * this transaction.
202 *
203 * Normally we'd use root->last_trans as a flag to decide
204 * if we want to take the expensive mutex.
205 *
206 * But, we have to set root->last_trans before we
207 * init the relocation root, otherwise, we trip over warnings
208 * in ctree.c. The solution used here is to flag ourselves
209 * with root->in_trans_setup. When this is 1, we're still
210 * fixing up the reloc trees and everyone must wait.
211 *
212 * When this is zero, they can trust root->last_trans and fly
213 * through btrfs_record_root_in_trans without having to take the
214 * lock. smp_wmb() makes sure that all the writes above are
215 * done before we pop in the zero below
216 */
5d4f98a2 217 btrfs_init_reloc_root(trans, root);
7585717f
CM
218 smp_wmb();
219 root->in_trans_setup = 0;
5d4f98a2
YZ
220 }
221 return 0;
222}
bcc63abb 223
7585717f
CM
224
225int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
226 struct btrfs_root *root)
227{
228 if (!root->ref_cows)
229 return 0;
230
231 /*
232 * see record_root_in_trans for comments about in_trans_setup usage
233 * and barriers
234 */
235 smp_rmb();
236 if (root->last_trans == trans->transid &&
237 !root->in_trans_setup)
238 return 0;
239
240 mutex_lock(&root->fs_info->reloc_mutex);
241 record_root_in_trans(trans, root);
242 mutex_unlock(&root->fs_info->reloc_mutex);
243
244 return 0;
245}
246
d352ac68
CM
247/* wait for commit against the current transaction to become unblocked
248 * when this is done, it is safe to start a new transaction, but the current
249 * transaction might not be fully on disk.
250 */
37d1aeee 251static void wait_current_trans(struct btrfs_root *root)
79154b1b 252{
f9295749 253 struct btrfs_transaction *cur_trans;
79154b1b 254
a4abeea4 255 spin_lock(&root->fs_info->trans_lock);
f9295749 256 cur_trans = root->fs_info->running_transaction;
37d1aeee 257 if (cur_trans && cur_trans->blocked) {
13c5a93e 258 atomic_inc(&cur_trans->use_count);
a4abeea4 259 spin_unlock(&root->fs_info->trans_lock);
72d63ed6
LZ
260
261 wait_event(root->fs_info->transaction_wait,
262 !cur_trans->blocked);
f9295749 263 put_transaction(cur_trans);
a4abeea4
JB
264 } else {
265 spin_unlock(&root->fs_info->trans_lock);
f9295749 266 }
37d1aeee
CM
267}
268
249ac1e5
JB
269enum btrfs_trans_type {
270 TRANS_START,
271 TRANS_JOIN,
272 TRANS_USERSPACE,
0af3d00b 273 TRANS_JOIN_NOLOCK,
249ac1e5
JB
274};
275
a22285a6
YZ
276static int may_wait_transaction(struct btrfs_root *root, int type)
277{
a4abeea4
JB
278 if (root->fs_info->log_root_recovering)
279 return 0;
280
281 if (type == TRANS_USERSPACE)
282 return 1;
283
284 if (type == TRANS_START &&
285 !atomic_read(&root->fs_info->open_ioctl_trans))
a22285a6 286 return 1;
a4abeea4 287
a22285a6
YZ
288 return 0;
289}
290
e02119d5 291static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
a22285a6 292 u64 num_items, int type)
37d1aeee 293{
a22285a6
YZ
294 struct btrfs_trans_handle *h;
295 struct btrfs_transaction *cur_trans;
b5009945 296 u64 num_bytes = 0;
37d1aeee 297 int ret;
acce952b 298
299 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
300 return ERR_PTR(-EROFS);
2a1eb461
JB
301
302 if (current->journal_info) {
303 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
304 h = current->journal_info;
305 h->use_count++;
306 h->orig_rsv = h->block_rsv;
307 h->block_rsv = NULL;
308 goto got_it;
309 }
b5009945
JB
310
311 /*
312 * Do the reservation before we join the transaction so we can do all
313 * the appropriate flushing if need be.
314 */
315 if (num_items > 0 && root != root->fs_info->chunk_root) {
316 num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
4a92b1b8 317 ret = btrfs_block_rsv_add(root,
b5009945
JB
318 &root->fs_info->trans_block_rsv,
319 num_bytes);
320 if (ret)
321 return ERR_PTR(ret);
322 }
a22285a6
YZ
323again:
324 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
325 if (!h)
326 return ERR_PTR(-ENOMEM);
37d1aeee 327
a22285a6 328 if (may_wait_transaction(root, type))
37d1aeee 329 wait_current_trans(root);
a22285a6 330
a4abeea4
JB
331 do {
332 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
333 if (ret == -EBUSY)
334 wait_current_trans(root);
335 } while (ret == -EBUSY);
336
db5b493a 337 if (ret < 0) {
6e8df2ae 338 kmem_cache_free(btrfs_trans_handle_cachep, h);
db5b493a
TI
339 return ERR_PTR(ret);
340 }
0f7d52f4 341
a22285a6 342 cur_trans = root->fs_info->running_transaction;
a22285a6
YZ
343
344 h->transid = cur_trans->transid;
345 h->transaction = cur_trans;
79154b1b 346 h->blocks_used = 0;
a22285a6 347 h->bytes_reserved = 0;
d13603ef 348 h->root = root;
56bec294 349 h->delayed_ref_updates = 0;
2a1eb461 350 h->use_count = 1;
f0486c68 351 h->block_rsv = NULL;
2a1eb461 352 h->orig_rsv = NULL;
49b25e05 353 h->aborted = 0;
bed92eae
AJ
354 h->delayed_ref_elem.seq = 0;
355 INIT_LIST_HEAD(&h->qgroup_ref_list);
b7ec40d7 356
a22285a6
YZ
357 smp_mb();
358 if (cur_trans->blocked && may_wait_transaction(root, type)) {
359 btrfs_commit_transaction(h, root);
360 goto again;
361 }
362
b5009945 363 if (num_bytes) {
8c2a3ca2 364 trace_btrfs_space_reservation(root->fs_info, "transaction",
2bcc0328 365 h->transid, num_bytes, 1);
b5009945
JB
366 h->block_rsv = &root->fs_info->trans_block_rsv;
367 h->bytes_reserved = num_bytes;
a22285a6 368 }
9ed74f2d 369
2a1eb461 370got_it:
a4abeea4 371 btrfs_record_root_in_trans(h, root);
a22285a6
YZ
372
373 if (!current->journal_info && type != TRANS_USERSPACE)
374 current->journal_info = h;
79154b1b
CM
375 return h;
376}
377
f9295749 378struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
a22285a6 379 int num_items)
f9295749 380{
a22285a6 381 return start_transaction(root, num_items, TRANS_START);
f9295749 382}
7a7eaa40 383struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
f9295749 384{
a22285a6 385 return start_transaction(root, 0, TRANS_JOIN);
f9295749
CM
386}
387
7a7eaa40 388struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
0af3d00b
JB
389{
390 return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
391}
392
7a7eaa40 393struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
9ca9ee09 394{
7a7eaa40 395 return start_transaction(root, 0, TRANS_USERSPACE);
9ca9ee09
SW
396}
397
d352ac68 398/* wait for a transaction commit to be fully complete */
b9c8300c 399static noinline void wait_for_commit(struct btrfs_root *root,
89ce8a63
CM
400 struct btrfs_transaction *commit)
401{
72d63ed6 402 wait_event(commit->commit_wait, commit->commit_done);
89ce8a63
CM
403}
404
46204592
SW
405int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
406{
407 struct btrfs_transaction *cur_trans = NULL, *t;
408 int ret;
409
46204592
SW
410 ret = 0;
411 if (transid) {
412 if (transid <= root->fs_info->last_trans_committed)
a4abeea4 413 goto out;
46204592
SW
414
415 /* find specified transaction */
a4abeea4 416 spin_lock(&root->fs_info->trans_lock);
46204592
SW
417 list_for_each_entry(t, &root->fs_info->trans_list, list) {
418 if (t->transid == transid) {
419 cur_trans = t;
a4abeea4 420 atomic_inc(&cur_trans->use_count);
46204592
SW
421 break;
422 }
423 if (t->transid > transid)
424 break;
425 }
a4abeea4 426 spin_unlock(&root->fs_info->trans_lock);
46204592
SW
427 ret = -EINVAL;
428 if (!cur_trans)
a4abeea4 429 goto out; /* bad transid */
46204592
SW
430 } else {
431 /* find newest transaction that is committing | committed */
a4abeea4 432 spin_lock(&root->fs_info->trans_lock);
46204592
SW
433 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
434 list) {
435 if (t->in_commit) {
436 if (t->commit_done)
3473f3c0 437 break;
46204592 438 cur_trans = t;
a4abeea4 439 atomic_inc(&cur_trans->use_count);
46204592
SW
440 break;
441 }
442 }
a4abeea4 443 spin_unlock(&root->fs_info->trans_lock);
46204592 444 if (!cur_trans)
a4abeea4 445 goto out; /* nothing committing|committed */
46204592
SW
446 }
447
46204592
SW
448 wait_for_commit(root, cur_trans);
449
46204592
SW
450 put_transaction(cur_trans);
451 ret = 0;
a4abeea4 452out:
46204592
SW
453 return ret;
454}
455
37d1aeee
CM
456void btrfs_throttle(struct btrfs_root *root)
457{
a4abeea4 458 if (!atomic_read(&root->fs_info->open_ioctl_trans))
9ca9ee09 459 wait_current_trans(root);
37d1aeee
CM
460}
461
8929ecfa
YZ
462static int should_end_transaction(struct btrfs_trans_handle *trans,
463 struct btrfs_root *root)
464{
465 int ret;
36ba022a
JB
466
467 ret = btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
8929ecfa
YZ
468 return ret ? 1 : 0;
469}
470
471int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
472 struct btrfs_root *root)
473{
474 struct btrfs_transaction *cur_trans = trans->transaction;
9c8d86db 475 struct btrfs_block_rsv *rsv = trans->block_rsv;
8929ecfa 476 int updates;
49b25e05 477 int err;
8929ecfa 478
a4abeea4 479 smp_mb();
8929ecfa
YZ
480 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
481 return 1;
482
9c8d86db
JB
483 /*
484 * We need to do this in case we're deleting csums so the global block
485 * rsv get's used instead of the csum block rsv.
486 */
487 trans->block_rsv = NULL;
488
8929ecfa
YZ
489 updates = trans->delayed_ref_updates;
490 trans->delayed_ref_updates = 0;
49b25e05
JM
491 if (updates) {
492 err = btrfs_run_delayed_refs(trans, root, updates);
493 if (err) /* Error code will also eval true */
494 return err;
495 }
8929ecfa 496
9c8d86db
JB
497 trans->block_rsv = rsv;
498
8929ecfa
YZ
499 return should_end_transaction(trans, root);
500}
501
89ce8a63 502static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
0af3d00b 503 struct btrfs_root *root, int throttle, int lock)
79154b1b 504{
8929ecfa 505 struct btrfs_transaction *cur_trans = trans->transaction;
ab78c84d 506 struct btrfs_fs_info *info = root->fs_info;
c3e69d58 507 int count = 0;
4edc2ca3 508 int err = 0;
c3e69d58 509
2a1eb461
JB
510 if (--trans->use_count) {
511 trans->block_rsv = trans->orig_rsv;
512 return 0;
513 }
514
edf39272
JS
515 /*
516 * do the qgroup accounting as early as possible
517 */
518 err = btrfs_delayed_refs_qgroup_accounting(trans, info);
519
b24e03db 520 btrfs_trans_release_metadata(trans, root);
4c13d758 521 trans->block_rsv = NULL;
d13603ef
AJ
522 /*
523 * the same root has to be passed to start_transaction and
524 * end_transaction. Subvolume quota depends on this.
525 */
526 WARN_ON(trans->root != root);
203bf287 527 while (count < 2) {
c3e69d58
CM
528 unsigned long cur = trans->delayed_ref_updates;
529 trans->delayed_ref_updates = 0;
530 if (cur &&
531 trans->transaction->delayed_refs.num_heads_ready > 64) {
532 trans->delayed_ref_updates = 0;
533 btrfs_run_delayed_refs(trans, root, cur);
534 } else {
535 break;
536 }
537 count++;
56bec294
CM
538 }
539
a4abeea4
JB
540 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
541 should_end_transaction(trans, root)) {
8929ecfa 542 trans->transaction->blocked = 1;
a4abeea4
JB
543 smp_wmb();
544 }
8929ecfa 545
0af3d00b 546 if (lock && cur_trans->blocked && !cur_trans->in_commit) {
81317fde
JB
547 if (throttle) {
548 /*
549 * We may race with somebody else here so end up having
550 * to call end_transaction on ourselves again, so inc
551 * our use_count.
552 */
553 trans->use_count++;
8929ecfa 554 return btrfs_commit_transaction(trans, root);
81317fde 555 } else {
8929ecfa 556 wake_up_process(info->transaction_kthread);
81317fde 557 }
8929ecfa
YZ
558 }
559
8929ecfa 560 WARN_ON(cur_trans != info->running_transaction);
13c5a93e
JB
561 WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
562 atomic_dec(&cur_trans->num_writers);
89ce8a63 563
99d16cbc 564 smp_mb();
79154b1b
CM
565 if (waitqueue_active(&cur_trans->writer_wait))
566 wake_up(&cur_trans->writer_wait);
79154b1b 567 put_transaction(cur_trans);
9ed74f2d
JB
568
569 if (current->journal_info == trans)
570 current->journal_info = NULL;
ab78c84d 571
24bbcf04
YZ
572 if (throttle)
573 btrfs_run_delayed_iputs(root);
574
49b25e05
JM
575 if (trans->aborted ||
576 root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
4edc2ca3 577 err = -EIO;
49b25e05 578 }
edf39272 579 assert_qgroups_uptodate(trans);
49b25e05 580
4edc2ca3
DJ
581 memset(trans, 0, sizeof(*trans));
582 kmem_cache_free(btrfs_trans_handle_cachep, trans);
583 return err;
79154b1b
CM
584}
585
89ce8a63
CM
586int btrfs_end_transaction(struct btrfs_trans_handle *trans,
587 struct btrfs_root *root)
588{
16cdcec7
MX
589 int ret;
590
591 ret = __btrfs_end_transaction(trans, root, 0, 1);
592 if (ret)
593 return ret;
594 return 0;
89ce8a63
CM
595}
596
597int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
598 struct btrfs_root *root)
599{
16cdcec7
MX
600 int ret;
601
602 ret = __btrfs_end_transaction(trans, root, 1, 1);
603 if (ret)
604 return ret;
605 return 0;
0af3d00b
JB
606}
607
608int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
609 struct btrfs_root *root)
610{
16cdcec7
MX
611 int ret;
612
613 ret = __btrfs_end_transaction(trans, root, 0, 0);
614 if (ret)
615 return ret;
616 return 0;
617}
618
619int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
620 struct btrfs_root *root)
621{
622 return __btrfs_end_transaction(trans, root, 1, 1);
89ce8a63
CM
623}
624
d352ac68
CM
625/*
626 * when btree blocks are allocated, they have some corresponding bits set for
627 * them in one of two extent_io trees. This is used to make sure all of
690587d1 628 * those extents are sent to disk but does not wait on them
d352ac68 629 */
690587d1 630int btrfs_write_marked_extents(struct btrfs_root *root,
8cef4e16 631 struct extent_io_tree *dirty_pages, int mark)
79154b1b 632{
777e6bd7 633 int err = 0;
7c4452b9 634 int werr = 0;
1728366e 635 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
777e6bd7 636 u64 start = 0;
5f39d397 637 u64 end;
7c4452b9 638
1728366e
JB
639 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
640 mark)) {
641 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
642 GFP_NOFS);
643 err = filemap_fdatawrite_range(mapping, start, end);
644 if (err)
645 werr = err;
646 cond_resched();
647 start = end + 1;
7c4452b9 648 }
690587d1
CM
649 if (err)
650 werr = err;
651 return werr;
652}
653
654/*
655 * when btree blocks are allocated, they have some corresponding bits set for
656 * them in one of two extent_io trees. This is used to make sure all of
657 * those extents are on disk for transaction or log commit. We wait
658 * on all the pages and clear them from the dirty pages state tree
659 */
660int btrfs_wait_marked_extents(struct btrfs_root *root,
8cef4e16 661 struct extent_io_tree *dirty_pages, int mark)
690587d1 662{
690587d1
CM
663 int err = 0;
664 int werr = 0;
1728366e 665 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
690587d1
CM
666 u64 start = 0;
667 u64 end;
777e6bd7 668
1728366e
JB
669 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
670 EXTENT_NEED_WAIT)) {
671 clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
672 err = filemap_fdatawait_range(mapping, start, end);
673 if (err)
674 werr = err;
675 cond_resched();
676 start = end + 1;
777e6bd7 677 }
7c4452b9
CM
678 if (err)
679 werr = err;
680 return werr;
79154b1b
CM
681}
682
690587d1
CM
683/*
684 * when btree blocks are allocated, they have some corresponding bits set for
685 * them in one of two extent_io trees. This is used to make sure all of
686 * those extents are on disk for transaction or log commit
687 */
688int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
8cef4e16 689 struct extent_io_tree *dirty_pages, int mark)
690587d1
CM
690{
691 int ret;
692 int ret2;
693
8cef4e16
YZ
694 ret = btrfs_write_marked_extents(root, dirty_pages, mark);
695 ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
bf0da8c1
CM
696
697 if (ret)
698 return ret;
699 if (ret2)
700 return ret2;
701 return 0;
690587d1
CM
702}
703
d0c803c4
CM
704int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
705 struct btrfs_root *root)
706{
707 if (!trans || !trans->transaction) {
708 struct inode *btree_inode;
709 btree_inode = root->fs_info->btree_inode;
710 return filemap_write_and_wait(btree_inode->i_mapping);
711 }
712 return btrfs_write_and_wait_marked_extents(root,
8cef4e16
YZ
713 &trans->transaction->dirty_pages,
714 EXTENT_DIRTY);
d0c803c4
CM
715}
716
d352ac68
CM
717/*
718 * this is used to update the root pointer in the tree of tree roots.
719 *
720 * But, in the case of the extent allocation tree, updating the root
721 * pointer may allocate blocks which may change the root of the extent
722 * allocation tree.
723 *
724 * So, this loops and repeats and makes sure the cowonly root didn't
725 * change while the root pointer was being updated in the metadata.
726 */
0b86a832
CM
727static int update_cowonly_root(struct btrfs_trans_handle *trans,
728 struct btrfs_root *root)
79154b1b
CM
729{
730 int ret;
0b86a832 731 u64 old_root_bytenr;
86b9f2ec 732 u64 old_root_used;
0b86a832 733 struct btrfs_root *tree_root = root->fs_info->tree_root;
79154b1b 734
86b9f2ec 735 old_root_used = btrfs_root_used(&root->root_item);
0b86a832 736 btrfs_write_dirty_block_groups(trans, root);
56bec294 737
d397712b 738 while (1) {
0b86a832 739 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
86b9f2ec
YZ
740 if (old_root_bytenr == root->node->start &&
741 old_root_used == btrfs_root_used(&root->root_item))
79154b1b 742 break;
87ef2bb4 743
5d4f98a2 744 btrfs_set_root_node(&root->root_item, root->node);
79154b1b 745 ret = btrfs_update_root(trans, tree_root,
0b86a832
CM
746 &root->root_key,
747 &root->root_item);
49b25e05
JM
748 if (ret)
749 return ret;
56bec294 750
86b9f2ec 751 old_root_used = btrfs_root_used(&root->root_item);
4a8c9a62 752 ret = btrfs_write_dirty_block_groups(trans, root);
49b25e05
JM
753 if (ret)
754 return ret;
0b86a832 755 }
276e680d
YZ
756
757 if (root != root->fs_info->extent_root)
758 switch_commit_root(root);
759
0b86a832
CM
760 return 0;
761}
762
d352ac68
CM
763/*
764 * update all the cowonly tree roots on disk
49b25e05
JM
765 *
766 * The error handling in this function may not be obvious. Any of the
767 * failures will cause the file system to go offline. We still need
768 * to clean up the delayed refs.
d352ac68 769 */
5d4f98a2
YZ
770static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
771 struct btrfs_root *root)
0b86a832
CM
772{
773 struct btrfs_fs_info *fs_info = root->fs_info;
774 struct list_head *next;
84234f3a 775 struct extent_buffer *eb;
56bec294 776 int ret;
84234f3a 777
56bec294 778 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
49b25e05
JM
779 if (ret)
780 return ret;
87ef2bb4 781
84234f3a 782 eb = btrfs_lock_root_node(fs_info->tree_root);
49b25e05
JM
783 ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
784 0, &eb);
84234f3a
YZ
785 btrfs_tree_unlock(eb);
786 free_extent_buffer(eb);
0b86a832 787
49b25e05
JM
788 if (ret)
789 return ret;
790
56bec294 791 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
49b25e05
JM
792 if (ret)
793 return ret;
87ef2bb4 794
733f4fbb
SB
795 ret = btrfs_run_dev_stats(trans, root->fs_info);
796 BUG_ON(ret);
797
d397712b 798 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
0b86a832
CM
799 next = fs_info->dirty_cowonly_roots.next;
800 list_del_init(next);
801 root = list_entry(next, struct btrfs_root, dirty_list);
87ef2bb4 802
49b25e05
JM
803 ret = update_cowonly_root(trans, root);
804 if (ret)
805 return ret;
79154b1b 806 }
276e680d
YZ
807
808 down_write(&fs_info->extent_commit_sem);
809 switch_commit_root(fs_info->extent_root);
810 up_write(&fs_info->extent_commit_sem);
811
79154b1b
CM
812 return 0;
813}
814
d352ac68
CM
815/*
816 * dead roots are old snapshots that need to be deleted. This allocates
817 * a dirty root struct and adds it into the list of dead roots that need to
818 * be deleted
819 */
5d4f98a2 820int btrfs_add_dead_root(struct btrfs_root *root)
5eda7b5e 821{
a4abeea4 822 spin_lock(&root->fs_info->trans_lock);
5d4f98a2 823 list_add(&root->root_list, &root->fs_info->dead_roots);
a4abeea4 824 spin_unlock(&root->fs_info->trans_lock);
5eda7b5e
CM
825 return 0;
826}
827
d352ac68 828/*
5d4f98a2 829 * update all the cowonly tree roots on disk
d352ac68 830 */
5d4f98a2
YZ
831static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
832 struct btrfs_root *root)
0f7d52f4 833{
0f7d52f4 834 struct btrfs_root *gang[8];
5d4f98a2 835 struct btrfs_fs_info *fs_info = root->fs_info;
0f7d52f4
CM
836 int i;
837 int ret;
54aa1f4d
CM
838 int err = 0;
839
a4abeea4 840 spin_lock(&fs_info->fs_roots_radix_lock);
d397712b 841 while (1) {
5d4f98a2
YZ
842 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
843 (void **)gang, 0,
0f7d52f4
CM
844 ARRAY_SIZE(gang),
845 BTRFS_ROOT_TRANS_TAG);
846 if (ret == 0)
847 break;
848 for (i = 0; i < ret; i++) {
849 root = gang[i];
5d4f98a2
YZ
850 radix_tree_tag_clear(&fs_info->fs_roots_radix,
851 (unsigned long)root->root_key.objectid,
852 BTRFS_ROOT_TRANS_TAG);
a4abeea4 853 spin_unlock(&fs_info->fs_roots_radix_lock);
31153d81 854
e02119d5 855 btrfs_free_log(trans, root);
5d4f98a2 856 btrfs_update_reloc_root(trans, root);
d68fc57b 857 btrfs_orphan_commit_root(trans, root);
bcc63abb 858
82d5902d
LZ
859 btrfs_save_ino_cache(root, trans);
860
f1ebcc74
LB
861 /* see comments in should_cow_block() */
862 root->force_cow = 0;
863 smp_wmb();
864
978d910d 865 if (root->commit_root != root->node) {
581bb050 866 mutex_lock(&root->fs_commit_mutex);
817d52f8 867 switch_commit_root(root);
581bb050
LZ
868 btrfs_unpin_free_ino(root);
869 mutex_unlock(&root->fs_commit_mutex);
870
978d910d
YZ
871 btrfs_set_root_node(&root->root_item,
872 root->node);
873 }
5d4f98a2 874
5d4f98a2 875 err = btrfs_update_root(trans, fs_info->tree_root,
0f7d52f4
CM
876 &root->root_key,
877 &root->root_item);
a4abeea4 878 spin_lock(&fs_info->fs_roots_radix_lock);
54aa1f4d
CM
879 if (err)
880 break;
0f7d52f4
CM
881 }
882 }
a4abeea4 883 spin_unlock(&fs_info->fs_roots_radix_lock);
54aa1f4d 884 return err;
0f7d52f4
CM
885}
886
d352ac68
CM
887/*
888 * defrag a given btree. If cacheonly == 1, this won't read from the disk,
889 * otherwise every leaf in the btree is read and defragged.
890 */
e9d0b13b
CM
891int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
892{
893 struct btrfs_fs_info *info = root->fs_info;
e9d0b13b 894 struct btrfs_trans_handle *trans;
8929ecfa 895 int ret;
d3c2fdcf 896 unsigned long nr;
e9d0b13b 897
8929ecfa 898 if (xchg(&root->defrag_running, 1))
e9d0b13b 899 return 0;
8929ecfa 900
6b80053d 901 while (1) {
8929ecfa
YZ
902 trans = btrfs_start_transaction(root, 0);
903 if (IS_ERR(trans))
904 return PTR_ERR(trans);
905
e9d0b13b 906 ret = btrfs_defrag_leaves(trans, root, cacheonly);
8929ecfa 907
d3c2fdcf 908 nr = trans->blocks_used;
e9d0b13b 909 btrfs_end_transaction(trans, root);
d3c2fdcf 910 btrfs_btree_balance_dirty(info->tree_root, nr);
e9d0b13b
CM
911 cond_resched();
912
7841cb28 913 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
e9d0b13b
CM
914 break;
915 }
916 root->defrag_running = 0;
8929ecfa 917 return ret;
e9d0b13b
CM
918}
919
d352ac68
CM
920/*
921 * new snapshots need to be created at a very specific time in the
922 * transaction commit. This does the actual creation
923 */
80b6794d 924static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
3063d29f
CM
925 struct btrfs_fs_info *fs_info,
926 struct btrfs_pending_snapshot *pending)
927{
928 struct btrfs_key key;
80b6794d 929 struct btrfs_root_item *new_root_item;
3063d29f
CM
930 struct btrfs_root *tree_root = fs_info->tree_root;
931 struct btrfs_root *root = pending->root;
6bdb72de 932 struct btrfs_root *parent_root;
98c9942a 933 struct btrfs_block_rsv *rsv;
6bdb72de 934 struct inode *parent_inode;
6a912213 935 struct dentry *parent;
a22285a6 936 struct dentry *dentry;
3063d29f 937 struct extent_buffer *tmp;
925baedd 938 struct extent_buffer *old;
3063d29f 939 int ret;
d68fc57b 940 u64 to_reserve = 0;
6bdb72de 941 u64 index = 0;
a22285a6 942 u64 objectid;
b83cc969 943 u64 root_flags;
3063d29f 944
98c9942a
LB
945 rsv = trans->block_rsv;
946
80b6794d
CM
947 new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
948 if (!new_root_item) {
49b25e05 949 ret = pending->error = -ENOMEM;
80b6794d
CM
950 goto fail;
951 }
a22285a6 952
581bb050 953 ret = btrfs_find_free_objectid(tree_root, &objectid);
a22285a6
YZ
954 if (ret) {
955 pending->error = ret;
3063d29f 956 goto fail;
a22285a6 957 }
3063d29f 958
3fd0a558 959 btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
d68fc57b
YZ
960
961 if (to_reserve > 0) {
62f30c54
MX
962 ret = btrfs_block_rsv_add_noflush(root, &pending->block_rsv,
963 to_reserve);
d68fc57b
YZ
964 if (ret) {
965 pending->error = ret;
966 goto fail;
967 }
968 }
969
3063d29f 970 key.objectid = objectid;
a22285a6
YZ
971 key.offset = (u64)-1;
972 key.type = BTRFS_ROOT_ITEM_KEY;
3063d29f 973
a22285a6 974 trans->block_rsv = &pending->block_rsv;
3de4586c 975
a22285a6 976 dentry = pending->dentry;
6a912213
JB
977 parent = dget_parent(dentry);
978 parent_inode = parent->d_inode;
a22285a6 979 parent_root = BTRFS_I(parent_inode)->root;
7585717f 980 record_root_in_trans(trans, parent_root);
a22285a6 981
3063d29f
CM
982 /*
983 * insert the directory item
984 */
3de4586c 985 ret = btrfs_set_inode_index(parent_inode, &index);
49b25e05 986 BUG_ON(ret); /* -ENOMEM */
0660b5af 987 ret = btrfs_insert_dir_item(trans, parent_root,
a22285a6 988 dentry->d_name.name, dentry->d_name.len,
16cdcec7 989 parent_inode, &key,
a22285a6 990 BTRFS_FT_DIR, index);
79787eaa 991 if (ret == -EEXIST) {
fe66a05a
CM
992 pending->error = -EEXIST;
993 dput(parent);
994 goto fail;
79787eaa
JM
995 } else if (ret) {
996 goto abort_trans_dput;
997 }
0660b5af 998
a22285a6
YZ
999 btrfs_i_size_write(parent_inode, parent_inode->i_size +
1000 dentry->d_name.len * 2);
52c26179 1001 ret = btrfs_update_inode(trans, parent_root, parent_inode);
49b25e05 1002 if (ret)
79787eaa 1003 goto abort_trans_dput;
52c26179 1004
e999376f
CM
1005 /*
1006 * pull in the delayed directory update
1007 * and the delayed inode item
1008 * otherwise we corrupt the FS during
1009 * snapshot
1010 */
1011 ret = btrfs_run_delayed_items(trans, root);
79787eaa
JM
1012 if (ret) { /* Transaction aborted */
1013 dput(parent);
49b25e05 1014 goto fail;
79787eaa 1015 }
e999376f 1016
7585717f 1017 record_root_in_trans(trans, root);
6bdb72de
SW
1018 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
1019 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
08fe4db1 1020 btrfs_check_and_init_root_item(new_root_item);
6bdb72de 1021
b83cc969
LZ
1022 root_flags = btrfs_root_flags(new_root_item);
1023 if (pending->readonly)
1024 root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
1025 else
1026 root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
1027 btrfs_set_root_flags(new_root_item, root_flags);
1028
6bdb72de 1029 old = btrfs_lock_root_node(root);
49b25e05 1030 ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
79787eaa
JM
1031 if (ret) {
1032 btrfs_tree_unlock(old);
1033 free_extent_buffer(old);
1034 goto abort_trans_dput;
1035 }
49b25e05 1036
6bdb72de
SW
1037 btrfs_set_lock_blocking(old);
1038
49b25e05 1039 ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
79787eaa 1040 /* clean up in any case */
6bdb72de
SW
1041 btrfs_tree_unlock(old);
1042 free_extent_buffer(old);
79787eaa
JM
1043 if (ret)
1044 goto abort_trans_dput;
6bdb72de 1045
f1ebcc74
LB
1046 /* see comments in should_cow_block() */
1047 root->force_cow = 1;
1048 smp_wmb();
1049
6bdb72de 1050 btrfs_set_root_node(new_root_item, tmp);
a22285a6
YZ
1051 /* record when the snapshot was created in key.offset */
1052 key.offset = trans->transid;
1053 ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
6bdb72de
SW
1054 btrfs_tree_unlock(tmp);
1055 free_extent_buffer(tmp);
49b25e05 1056 if (ret)
79787eaa 1057 goto abort_trans_dput;
6bdb72de 1058
a22285a6
YZ
1059 /*
1060 * insert root back/forward references
1061 */
1062 ret = btrfs_add_root_ref(trans, tree_root, objectid,
0660b5af 1063 parent_root->root_key.objectid,
33345d01 1064 btrfs_ino(parent_inode), index,
a22285a6 1065 dentry->d_name.name, dentry->d_name.len);
79787eaa 1066 dput(parent);
49b25e05
JM
1067 if (ret)
1068 goto fail;
0660b5af 1069
a22285a6
YZ
1070 key.offset = (u64)-1;
1071 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
79787eaa
JM
1072 if (IS_ERR(pending->snap)) {
1073 ret = PTR_ERR(pending->snap);
49b25e05 1074 goto abort_trans;
79787eaa 1075 }
d68fc57b 1076
49b25e05
JM
1077 ret = btrfs_reloc_post_snapshot(trans, pending);
1078 if (ret)
1079 goto abort_trans;
1080 ret = 0;
3063d29f 1081fail:
6bdb72de 1082 kfree(new_root_item);
98c9942a 1083 trans->block_rsv = rsv;
a22285a6 1084 btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1);
49b25e05
JM
1085 return ret;
1086
79787eaa
JM
1087abort_trans_dput:
1088 dput(parent);
49b25e05
JM
1089abort_trans:
1090 btrfs_abort_transaction(trans, root, ret);
1091 goto fail;
3063d29f
CM
1092}
1093
d352ac68
CM
1094/*
1095 * create all the snapshots we've scheduled for creation
1096 */
80b6794d
CM
1097static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
1098 struct btrfs_fs_info *fs_info)
3de4586c
CM
1099{
1100 struct btrfs_pending_snapshot *pending;
1101 struct list_head *head = &trans->transaction->pending_snapshots;
3de4586c 1102
fe66a05a
CM
1103 list_for_each_entry(pending, head, list)
1104 create_pending_snapshot(trans, fs_info, pending);
3de4586c
CM
1105 return 0;
1106}
1107
5d4f98a2
YZ
1108static void update_super_roots(struct btrfs_root *root)
1109{
1110 struct btrfs_root_item *root_item;
1111 struct btrfs_super_block *super;
1112
6c41761f 1113 super = root->fs_info->super_copy;
5d4f98a2
YZ
1114
1115 root_item = &root->fs_info->chunk_root->root_item;
1116 super->chunk_root = root_item->bytenr;
1117 super->chunk_root_generation = root_item->generation;
1118 super->chunk_root_level = root_item->level;
1119
1120 root_item = &root->fs_info->tree_root->root_item;
1121 super->root = root_item->bytenr;
1122 super->generation = root_item->generation;
1123 super->root_level = root_item->level;
73bc1876 1124 if (btrfs_test_opt(root, SPACE_CACHE))
0af3d00b 1125 super->cache_generation = root_item->generation;
5d4f98a2
YZ
1126}
1127
f36f3042
CM
1128int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
1129{
1130 int ret = 0;
a4abeea4 1131 spin_lock(&info->trans_lock);
f36f3042
CM
1132 if (info->running_transaction)
1133 ret = info->running_transaction->in_commit;
a4abeea4 1134 spin_unlock(&info->trans_lock);
f36f3042
CM
1135 return ret;
1136}
1137
8929ecfa
YZ
1138int btrfs_transaction_blocked(struct btrfs_fs_info *info)
1139{
1140 int ret = 0;
a4abeea4 1141 spin_lock(&info->trans_lock);
8929ecfa
YZ
1142 if (info->running_transaction)
1143 ret = info->running_transaction->blocked;
a4abeea4 1144 spin_unlock(&info->trans_lock);
8929ecfa
YZ
1145 return ret;
1146}
1147
bb9c12c9
SW
1148/*
1149 * wait for the current transaction commit to start and block subsequent
1150 * transaction joins
1151 */
1152static void wait_current_trans_commit_start(struct btrfs_root *root,
1153 struct btrfs_transaction *trans)
1154{
72d63ed6 1155 wait_event(root->fs_info->transaction_blocked_wait, trans->in_commit);
bb9c12c9
SW
1156}
1157
1158/*
1159 * wait for the current transaction to start and then become unblocked.
1160 * caller holds ref.
1161 */
1162static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1163 struct btrfs_transaction *trans)
1164{
72d63ed6
LZ
1165 wait_event(root->fs_info->transaction_wait,
1166 trans->commit_done || (trans->in_commit && !trans->blocked));
bb9c12c9
SW
1167}
1168
1169/*
1170 * commit transactions asynchronously. once btrfs_commit_transaction_async
1171 * returns, any subsequent transaction will not be allowed to join.
1172 */
1173struct btrfs_async_commit {
1174 struct btrfs_trans_handle *newtrans;
1175 struct btrfs_root *root;
1176 struct delayed_work work;
1177};
1178
1179static void do_async_commit(struct work_struct *work)
1180{
1181 struct btrfs_async_commit *ac =
1182 container_of(work, struct btrfs_async_commit, work.work);
1183
1184 btrfs_commit_transaction(ac->newtrans, ac->root);
1185 kfree(ac);
1186}
1187
1188int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1189 struct btrfs_root *root,
1190 int wait_for_unblock)
1191{
1192 struct btrfs_async_commit *ac;
1193 struct btrfs_transaction *cur_trans;
1194
1195 ac = kmalloc(sizeof(*ac), GFP_NOFS);
db5b493a
TI
1196 if (!ac)
1197 return -ENOMEM;
bb9c12c9
SW
1198
1199 INIT_DELAYED_WORK(&ac->work, do_async_commit);
1200 ac->root = root;
7a7eaa40 1201 ac->newtrans = btrfs_join_transaction(root);
3612b495
TI
1202 if (IS_ERR(ac->newtrans)) {
1203 int err = PTR_ERR(ac->newtrans);
1204 kfree(ac);
1205 return err;
1206 }
bb9c12c9
SW
1207
1208 /* take transaction reference */
bb9c12c9 1209 cur_trans = trans->transaction;
13c5a93e 1210 atomic_inc(&cur_trans->use_count);
bb9c12c9
SW
1211
1212 btrfs_end_transaction(trans, root);
1213 schedule_delayed_work(&ac->work, 0);
1214
1215 /* wait for transaction to start and unblock */
bb9c12c9
SW
1216 if (wait_for_unblock)
1217 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1218 else
1219 wait_current_trans_commit_start(root, cur_trans);
bb9c12c9 1220
38e88054
SW
1221 if (current->journal_info == trans)
1222 current->journal_info = NULL;
1223
1224 put_transaction(cur_trans);
bb9c12c9
SW
1225 return 0;
1226}
1227
49b25e05
JM
1228
1229static void cleanup_transaction(struct btrfs_trans_handle *trans,
7b8b92af 1230 struct btrfs_root *root, int err)
49b25e05
JM
1231{
1232 struct btrfs_transaction *cur_trans = trans->transaction;
1233
1234 WARN_ON(trans->use_count > 1);
1235
7b8b92af
JB
1236 btrfs_abort_transaction(trans, root, err);
1237
49b25e05
JM
1238 spin_lock(&root->fs_info->trans_lock);
1239 list_del_init(&cur_trans->list);
d7096fc3
JB
1240 if (cur_trans == root->fs_info->running_transaction) {
1241 root->fs_info->running_transaction = NULL;
1242 root->fs_info->trans_no_join = 0;
1243 }
49b25e05
JM
1244 spin_unlock(&root->fs_info->trans_lock);
1245
1246 btrfs_cleanup_one_transaction(trans->transaction, root);
1247
1248 put_transaction(cur_trans);
1249 put_transaction(cur_trans);
1250
1251 trace_btrfs_transaction_commit(root);
1252
1253 btrfs_scrub_continue(root);
1254
1255 if (current->journal_info == trans)
1256 current->journal_info = NULL;
1257
1258 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1259}
1260
bb9c12c9
SW
1261/*
1262 * btrfs_transaction state sequence:
1263 * in_commit = 0, blocked = 0 (initial)
1264 * in_commit = 1, blocked = 1
1265 * blocked = 0
1266 * commit_done = 1
1267 */
79154b1b
CM
1268int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1269 struct btrfs_root *root)
1270{
15ee9bc7 1271 unsigned long joined = 0;
49b25e05 1272 struct btrfs_transaction *cur_trans = trans->transaction;
8fd17795 1273 struct btrfs_transaction *prev_trans = NULL;
79154b1b 1274 DEFINE_WAIT(wait);
49b25e05 1275 int ret = -EIO;
89573b9c
CM
1276 int should_grow = 0;
1277 unsigned long now = get_seconds();
dccae999 1278 int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
79154b1b 1279
5a3f23d5
CM
1280 btrfs_run_ordered_operations(root, 0);
1281
b24e03db 1282 btrfs_trans_release_metadata(trans, root);
9c8d86db
JB
1283 trans->block_rsv = NULL;
1284
49b25e05
JM
1285 if (cur_trans->aborted)
1286 goto cleanup_transaction;
1287
56bec294
CM
1288 /* make a pass through all the delayed refs we have so far
1289 * any runnings procs may add more while we are here
1290 */
1291 ret = btrfs_run_delayed_refs(trans, root, 0);
49b25e05
JM
1292 if (ret)
1293 goto cleanup_transaction;
56bec294 1294
b7ec40d7 1295 cur_trans = trans->transaction;
49b25e05 1296
56bec294
CM
1297 /*
1298 * set the flushing flag so procs in this transaction have to
1299 * start sending their work down.
1300 */
b7ec40d7 1301 cur_trans->delayed_refs.flushing = 1;
56bec294 1302
c3e69d58 1303 ret = btrfs_run_delayed_refs(trans, root, 0);
49b25e05
JM
1304 if (ret)
1305 goto cleanup_transaction;
56bec294 1306
a4abeea4 1307 spin_lock(&cur_trans->commit_lock);
b7ec40d7 1308 if (cur_trans->in_commit) {
a4abeea4 1309 spin_unlock(&cur_trans->commit_lock);
13c5a93e 1310 atomic_inc(&cur_trans->use_count);
49b25e05 1311 ret = btrfs_end_transaction(trans, root);
ccd467d6 1312
b9c8300c 1313 wait_for_commit(root, cur_trans);
15ee9bc7 1314
79154b1b 1315 put_transaction(cur_trans);
15ee9bc7 1316
49b25e05 1317 return ret;
79154b1b 1318 }
4313b399 1319
2c90e5d6 1320 trans->transaction->in_commit = 1;
f9295749 1321 trans->transaction->blocked = 1;
a4abeea4 1322 spin_unlock(&cur_trans->commit_lock);
bb9c12c9
SW
1323 wake_up(&root->fs_info->transaction_blocked_wait);
1324
a4abeea4 1325 spin_lock(&root->fs_info->trans_lock);
ccd467d6
CM
1326 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1327 prev_trans = list_entry(cur_trans->list.prev,
1328 struct btrfs_transaction, list);
1329 if (!prev_trans->commit_done) {
13c5a93e 1330 atomic_inc(&prev_trans->use_count);
a4abeea4 1331 spin_unlock(&root->fs_info->trans_lock);
ccd467d6
CM
1332
1333 wait_for_commit(root, prev_trans);
ccd467d6 1334
15ee9bc7 1335 put_transaction(prev_trans);
a4abeea4
JB
1336 } else {
1337 spin_unlock(&root->fs_info->trans_lock);
ccd467d6 1338 }
a4abeea4
JB
1339 } else {
1340 spin_unlock(&root->fs_info->trans_lock);
ccd467d6 1341 }
15ee9bc7 1342
89573b9c
CM
1343 if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
1344 should_grow = 1;
1345
15ee9bc7 1346 do {
7ea394f1 1347 int snap_pending = 0;
a4abeea4 1348
15ee9bc7 1349 joined = cur_trans->num_joined;
7ea394f1
YZ
1350 if (!list_empty(&trans->transaction->pending_snapshots))
1351 snap_pending = 1;
1352
2c90e5d6 1353 WARN_ON(cur_trans != trans->transaction);
15ee9bc7 1354
0bdb1db2 1355 if (flush_on_commit || snap_pending) {
24bbcf04 1356 btrfs_start_delalloc_inodes(root, 1);
143bede5 1357 btrfs_wait_ordered_extents(root, 0, 1);
7ea394f1
YZ
1358 }
1359
16cdcec7 1360 ret = btrfs_run_delayed_items(trans, root);
49b25e05
JM
1361 if (ret)
1362 goto cleanup_transaction;
16cdcec7 1363
edf39272
JS
1364 /*
1365 * running the delayed items may have added new refs. account
1366 * them now so that they hinder processing of more delayed refs
1367 * as little as possible.
1368 */
1369 btrfs_delayed_refs_qgroup_accounting(trans, root->fs_info);
1370
5a3f23d5
CM
1371 /*
1372 * rename don't use btrfs_join_transaction, so, once we
1373 * set the transaction to blocked above, we aren't going
1374 * to get any new ordered operations. We can safely run
1375 * it here and no for sure that nothing new will be added
1376 * to the list
1377 */
1378 btrfs_run_ordered_operations(root, 1);
1379
ed3b3d31
CM
1380 prepare_to_wait(&cur_trans->writer_wait, &wait,
1381 TASK_UNINTERRUPTIBLE);
1382
13c5a93e 1383 if (atomic_read(&cur_trans->num_writers) > 1)
99d16cbc
SW
1384 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1385 else if (should_grow)
1386 schedule_timeout(1);
15ee9bc7 1387
15ee9bc7 1388 finish_wait(&cur_trans->writer_wait, &wait);
13c5a93e 1389 } while (atomic_read(&cur_trans->num_writers) > 1 ||
89573b9c 1390 (should_grow && cur_trans->num_joined != joined));
15ee9bc7 1391
ed0ca140
JB
1392 /*
1393 * Ok now we need to make sure to block out any other joins while we
1394 * commit the transaction. We could have started a join before setting
1395 * no_join so make sure to wait for num_writers to == 1 again.
1396 */
1397 spin_lock(&root->fs_info->trans_lock);
1398 root->fs_info->trans_no_join = 1;
1399 spin_unlock(&root->fs_info->trans_lock);
1400 wait_event(cur_trans->writer_wait,
1401 atomic_read(&cur_trans->num_writers) == 1);
1402
7585717f
CM
1403 /*
1404 * the reloc mutex makes sure that we stop
1405 * the balancing code from coming in and moving
1406 * extents around in the middle of the commit
1407 */
1408 mutex_lock(&root->fs_info->reloc_mutex);
1409
e999376f 1410 ret = btrfs_run_delayed_items(trans, root);
49b25e05
JM
1411 if (ret) {
1412 mutex_unlock(&root->fs_info->reloc_mutex);
1413 goto cleanup_transaction;
1414 }
3063d29f 1415
e999376f 1416 ret = create_pending_snapshots(trans, root->fs_info);
49b25e05
JM
1417 if (ret) {
1418 mutex_unlock(&root->fs_info->reloc_mutex);
1419 goto cleanup_transaction;
1420 }
16cdcec7 1421
56bec294 1422 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
49b25e05
JM
1423 if (ret) {
1424 mutex_unlock(&root->fs_info->reloc_mutex);
1425 goto cleanup_transaction;
1426 }
56bec294 1427
e999376f
CM
1428 /*
1429 * make sure none of the code above managed to slip in a
1430 * delayed item
1431 */
1432 btrfs_assert_delayed_root_empty(root);
1433
2c90e5d6 1434 WARN_ON(cur_trans != trans->transaction);
dc17ff8f 1435
a2de733c 1436 btrfs_scrub_pause(root);
e02119d5
CM
1437 /* btrfs_commit_tree_roots is responsible for getting the
1438 * various roots consistent with each other. Every pointer
1439 * in the tree of tree roots has to point to the most up to date
1440 * root for every subvolume and other tree. So, we have to keep
1441 * the tree logging code from jumping in and changing any
1442 * of the trees.
1443 *
1444 * At this point in the commit, there can't be any tree-log
1445 * writers, but a little lower down we drop the trans mutex
1446 * and let new people in. By holding the tree_log_mutex
1447 * from now until after the super is written, we avoid races
1448 * with the tree-log code.
1449 */
1450 mutex_lock(&root->fs_info->tree_log_mutex);
1451
5d4f98a2 1452 ret = commit_fs_roots(trans, root);
49b25e05
JM
1453 if (ret) {
1454 mutex_unlock(&root->fs_info->tree_log_mutex);
871383be 1455 mutex_unlock(&root->fs_info->reloc_mutex);
49b25e05
JM
1456 goto cleanup_transaction;
1457 }
54aa1f4d 1458
5d4f98a2 1459 /* commit_fs_roots gets rid of all the tree log roots, it is now
e02119d5
CM
1460 * safe to free the root of tree log roots
1461 */
1462 btrfs_free_log_root_tree(trans, root->fs_info);
1463
5d4f98a2 1464 ret = commit_cowonly_roots(trans, root);
49b25e05
JM
1465 if (ret) {
1466 mutex_unlock(&root->fs_info->tree_log_mutex);
871383be 1467 mutex_unlock(&root->fs_info->reloc_mutex);
49b25e05
JM
1468 goto cleanup_transaction;
1469 }
54aa1f4d 1470
11833d66
YZ
1471 btrfs_prepare_extent_commit(trans, root);
1472
78fae27e 1473 cur_trans = root->fs_info->running_transaction;
5d4f98a2
YZ
1474
1475 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1476 root->fs_info->tree_root->node);
817d52f8 1477 switch_commit_root(root->fs_info->tree_root);
5d4f98a2
YZ
1478
1479 btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1480 root->fs_info->chunk_root->node);
817d52f8 1481 switch_commit_root(root->fs_info->chunk_root);
5d4f98a2 1482
edf39272 1483 assert_qgroups_uptodate(trans);
5d4f98a2 1484 update_super_roots(root);
e02119d5
CM
1485
1486 if (!root->fs_info->log_root_recovering) {
6c41761f
DS
1487 btrfs_set_super_log_root(root->fs_info->super_copy, 0);
1488 btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
e02119d5
CM
1489 }
1490
6c41761f
DS
1491 memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
1492 sizeof(*root->fs_info->super_copy));
ccd467d6 1493
f9295749 1494 trans->transaction->blocked = 0;
a4abeea4
JB
1495 spin_lock(&root->fs_info->trans_lock);
1496 root->fs_info->running_transaction = NULL;
1497 root->fs_info->trans_no_join = 0;
1498 spin_unlock(&root->fs_info->trans_lock);
7585717f 1499 mutex_unlock(&root->fs_info->reloc_mutex);
b7ec40d7 1500
f9295749 1501 wake_up(&root->fs_info->transaction_wait);
e6dcd2dc 1502
79154b1b 1503 ret = btrfs_write_and_wait_transaction(trans, root);
49b25e05
JM
1504 if (ret) {
1505 btrfs_error(root->fs_info, ret,
1506 "Error while writing out transaction.");
1507 mutex_unlock(&root->fs_info->tree_log_mutex);
1508 goto cleanup_transaction;
1509 }
1510
1511 ret = write_ctree_super(trans, root, 0);
1512 if (ret) {
1513 mutex_unlock(&root->fs_info->tree_log_mutex);
1514 goto cleanup_transaction;
1515 }
4313b399 1516
e02119d5
CM
1517 /*
1518 * the super is written, we can safely allow the tree-loggers
1519 * to go about their business
1520 */
1521 mutex_unlock(&root->fs_info->tree_log_mutex);
1522
11833d66 1523 btrfs_finish_extent_commit(trans, root);
4313b399 1524
2c90e5d6 1525 cur_trans->commit_done = 1;
b7ec40d7 1526
15ee9bc7 1527 root->fs_info->last_trans_committed = cur_trans->transid;
817d52f8 1528
2c90e5d6 1529 wake_up(&cur_trans->commit_wait);
3de4586c 1530
a4abeea4 1531 spin_lock(&root->fs_info->trans_lock);
13c5a93e 1532 list_del_init(&cur_trans->list);
a4abeea4
JB
1533 spin_unlock(&root->fs_info->trans_lock);
1534
78fae27e 1535 put_transaction(cur_trans);
79154b1b 1536 put_transaction(cur_trans);
58176a96 1537
1abe9b8a 1538 trace_btrfs_transaction_commit(root);
1539
a2de733c
AJ
1540 btrfs_scrub_continue(root);
1541
9ed74f2d
JB
1542 if (current->journal_info == trans)
1543 current->journal_info = NULL;
1544
2c90e5d6 1545 kmem_cache_free(btrfs_trans_handle_cachep, trans);
24bbcf04
YZ
1546
1547 if (current != root->fs_info->transaction_kthread)
1548 btrfs_run_delayed_iputs(root);
1549
79154b1b 1550 return ret;
49b25e05
JM
1551
1552cleanup_transaction:
1553 btrfs_printk(root->fs_info, "Skipping commit of aborted transaction.\n");
1554// WARN_ON(1);
1555 if (current->journal_info == trans)
1556 current->journal_info = NULL;
7b8b92af 1557 cleanup_transaction(trans, root, ret);
49b25e05
JM
1558
1559 return ret;
79154b1b
CM
1560}
1561
d352ac68
CM
1562/*
1563 * interface function to delete all the snapshots we have scheduled for deletion
1564 */
e9d0b13b
CM
1565int btrfs_clean_old_snapshots(struct btrfs_root *root)
1566{
5d4f98a2
YZ
1567 LIST_HEAD(list);
1568 struct btrfs_fs_info *fs_info = root->fs_info;
1569
a4abeea4 1570 spin_lock(&fs_info->trans_lock);
5d4f98a2 1571 list_splice_init(&fs_info->dead_roots, &list);
a4abeea4 1572 spin_unlock(&fs_info->trans_lock);
e9d0b13b 1573
5d4f98a2 1574 while (!list_empty(&list)) {
2c536799
JM
1575 int ret;
1576
5d4f98a2 1577 root = list_entry(list.next, struct btrfs_root, root_list);
76dda93c
YZ
1578 list_del(&root->root_list);
1579
16cdcec7
MX
1580 btrfs_kill_all_delayed_nodes(root);
1581
76dda93c
YZ
1582 if (btrfs_header_backref_rev(root->node) <
1583 BTRFS_MIXED_BACKREF_REV)
2c536799 1584 ret = btrfs_drop_snapshot(root, NULL, 0, 0);
76dda93c 1585 else
2c536799
JM
1586 ret =btrfs_drop_snapshot(root, NULL, 1, 0);
1587 BUG_ON(ret < 0);
e9d0b13b
CM
1588 }
1589 return 0;
1590}
This page took 0.274736 seconds and 5 git commands to generate.