2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
31 #include <linux/migrate.h>
32 #include <linux/ratelimit.h>
33 #include <asm/unaligned.h>
37 #include "transaction.h"
38 #include "btrfs_inode.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
44 #include "free-space-cache.h"
45 #include "inode-map.h"
46 #include "check-integrity.h"
47 #include "rcu-string.h"
49 static struct extent_io_ops btree_extent_io_ops
;
50 static void end_workqueue_fn(struct btrfs_work
*work
);
51 static void free_fs_root(struct btrfs_root
*root
);
52 static int btrfs_check_super_valid(struct btrfs_fs_info
*fs_info
,
54 static void btrfs_destroy_ordered_operations(struct btrfs_root
*root
);
55 static void btrfs_destroy_ordered_extents(struct btrfs_root
*root
);
56 static int btrfs_destroy_delayed_refs(struct btrfs_transaction
*trans
,
57 struct btrfs_root
*root
);
58 static void btrfs_destroy_pending_snapshots(struct btrfs_transaction
*t
);
59 static void btrfs_destroy_delalloc_inodes(struct btrfs_root
*root
);
60 static int btrfs_destroy_marked_extents(struct btrfs_root
*root
,
61 struct extent_io_tree
*dirty_pages
,
63 static int btrfs_destroy_pinned_extent(struct btrfs_root
*root
,
64 struct extent_io_tree
*pinned_extents
);
67 * end_io_wq structs are used to do processing in task context when an IO is
68 * complete. This is used during reads to verify checksums, and it is used
69 * by writes to insert metadata for new file extents after IO is complete.
75 struct btrfs_fs_info
*info
;
78 struct list_head list
;
79 struct btrfs_work work
;
83 * async submit bios are used to offload expensive checksumming
84 * onto the worker threads. They checksum file and metadata bios
85 * just before they are sent down the IO stack.
87 struct async_submit_bio
{
90 struct list_head list
;
91 extent_submit_bio_hook_t
*submit_bio_start
;
92 extent_submit_bio_hook_t
*submit_bio_done
;
95 unsigned long bio_flags
;
97 * bio_offset is optional, can be used if the pages in the bio
98 * can't tell us where in the file the bio should go
101 struct btrfs_work work
;
106 * Lockdep class keys for extent_buffer->lock's in this root. For a given
107 * eb, the lockdep key is determined by the btrfs_root it belongs to and
108 * the level the eb occupies in the tree.
110 * Different roots are used for different purposes and may nest inside each
111 * other and they require separate keysets. As lockdep keys should be
112 * static, assign keysets according to the purpose of the root as indicated
113 * by btrfs_root->objectid. This ensures that all special purpose roots
114 * have separate keysets.
116 * Lock-nesting across peer nodes is always done with the immediate parent
117 * node locked thus preventing deadlock. As lockdep doesn't know this, use
118 * subclass to avoid triggering lockdep warning in such cases.
120 * The key is set by the readpage_end_io_hook after the buffer has passed
121 * csum validation but before the pages are unlocked. It is also set by
122 * btrfs_init_new_buffer on freshly allocated blocks.
124 * We also add a check to make sure the highest level of the tree is the
125 * same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this code
126 * needs update as well.
128 #ifdef CONFIG_DEBUG_LOCK_ALLOC
129 # if BTRFS_MAX_LEVEL != 8
133 static struct btrfs_lockdep_keyset
{
134 u64 id
; /* root objectid */
135 const char *name_stem
; /* lock name stem */
136 char names
[BTRFS_MAX_LEVEL
+ 1][20];
137 struct lock_class_key keys
[BTRFS_MAX_LEVEL
+ 1];
138 } btrfs_lockdep_keysets
[] = {
139 { .id
= BTRFS_ROOT_TREE_OBJECTID
, .name_stem
= "root" },
140 { .id
= BTRFS_EXTENT_TREE_OBJECTID
, .name_stem
= "extent" },
141 { .id
= BTRFS_CHUNK_TREE_OBJECTID
, .name_stem
= "chunk" },
142 { .id
= BTRFS_DEV_TREE_OBJECTID
, .name_stem
= "dev" },
143 { .id
= BTRFS_FS_TREE_OBJECTID
, .name_stem
= "fs" },
144 { .id
= BTRFS_CSUM_TREE_OBJECTID
, .name_stem
= "csum" },
145 { .id
= BTRFS_ORPHAN_OBJECTID
, .name_stem
= "orphan" },
146 { .id
= BTRFS_TREE_LOG_OBJECTID
, .name_stem
= "log" },
147 { .id
= BTRFS_TREE_RELOC_OBJECTID
, .name_stem
= "treloc" },
148 { .id
= BTRFS_DATA_RELOC_TREE_OBJECTID
, .name_stem
= "dreloc" },
149 { .id
= 0, .name_stem
= "tree" },
152 void __init
btrfs_init_lockdep(void)
156 /* initialize lockdep class names */
157 for (i
= 0; i
< ARRAY_SIZE(btrfs_lockdep_keysets
); i
++) {
158 struct btrfs_lockdep_keyset
*ks
= &btrfs_lockdep_keysets
[i
];
160 for (j
= 0; j
< ARRAY_SIZE(ks
->names
); j
++)
161 snprintf(ks
->names
[j
], sizeof(ks
->names
[j
]),
162 "btrfs-%s-%02d", ks
->name_stem
, j
);
166 void btrfs_set_buffer_lockdep_class(u64 objectid
, struct extent_buffer
*eb
,
169 struct btrfs_lockdep_keyset
*ks
;
171 BUG_ON(level
>= ARRAY_SIZE(ks
->keys
));
173 /* find the matching keyset, id 0 is the default entry */
174 for (ks
= btrfs_lockdep_keysets
; ks
->id
; ks
++)
175 if (ks
->id
== objectid
)
178 lockdep_set_class_and_name(&eb
->lock
,
179 &ks
->keys
[level
], ks
->names
[level
]);
185 * extents on the btree inode are pretty simple, there's one extent
186 * that covers the entire device
188 static struct extent_map
*btree_get_extent(struct inode
*inode
,
189 struct page
*page
, size_t pg_offset
, u64 start
, u64 len
,
192 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
193 struct extent_map
*em
;
196 read_lock(&em_tree
->lock
);
197 em
= lookup_extent_mapping(em_tree
, start
, len
);
200 BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
201 read_unlock(&em_tree
->lock
);
204 read_unlock(&em_tree
->lock
);
206 em
= alloc_extent_map();
208 em
= ERR_PTR(-ENOMEM
);
213 em
->block_len
= (u64
)-1;
215 em
->bdev
= BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
217 write_lock(&em_tree
->lock
);
218 ret
= add_extent_mapping(em_tree
, em
);
219 if (ret
== -EEXIST
) {
220 u64 failed_start
= em
->start
;
221 u64 failed_len
= em
->len
;
224 em
= lookup_extent_mapping(em_tree
, start
, len
);
228 em
= lookup_extent_mapping(em_tree
, failed_start
,
236 write_unlock(&em_tree
->lock
);
244 u32
btrfs_csum_data(struct btrfs_root
*root
, char *data
, u32 seed
, size_t len
)
246 return crc32c(seed
, data
, len
);
249 void btrfs_csum_final(u32 crc
, char *result
)
251 put_unaligned_le32(~crc
, result
);
255 * compute the csum for a btree block, and either verify it or write it
256 * into the csum field of the block.
258 static int csum_tree_block(struct btrfs_root
*root
, struct extent_buffer
*buf
,
261 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
264 unsigned long cur_len
;
265 unsigned long offset
= BTRFS_CSUM_SIZE
;
267 unsigned long map_start
;
268 unsigned long map_len
;
271 unsigned long inline_result
;
273 len
= buf
->len
- offset
;
275 err
= map_private_extent_buffer(buf
, offset
, 32,
276 &kaddr
, &map_start
, &map_len
);
279 cur_len
= min(len
, map_len
- (offset
- map_start
));
280 crc
= btrfs_csum_data(root
, kaddr
+ offset
- map_start
,
285 if (csum_size
> sizeof(inline_result
)) {
286 result
= kzalloc(csum_size
* sizeof(char), GFP_NOFS
);
290 result
= (char *)&inline_result
;
293 btrfs_csum_final(crc
, result
);
296 if (memcmp_extent_buffer(buf
, result
, 0, csum_size
)) {
299 memcpy(&found
, result
, csum_size
);
301 read_extent_buffer(buf
, &val
, 0, csum_size
);
302 printk_ratelimited(KERN_INFO
"btrfs: %s checksum verify "
303 "failed on %llu wanted %X found %X "
305 root
->fs_info
->sb
->s_id
,
306 (unsigned long long)buf
->start
, val
, found
,
307 btrfs_header_level(buf
));
308 if (result
!= (char *)&inline_result
)
313 write_extent_buffer(buf
, result
, 0, csum_size
);
315 if (result
!= (char *)&inline_result
)
321 * we can't consider a given block up to date unless the transid of the
322 * block matches the transid in the parent node's pointer. This is how we
323 * detect blocks that either didn't get written at all or got written
324 * in the wrong place.
326 static int verify_parent_transid(struct extent_io_tree
*io_tree
,
327 struct extent_buffer
*eb
, u64 parent_transid
,
330 struct extent_state
*cached_state
= NULL
;
333 if (!parent_transid
|| btrfs_header_generation(eb
) == parent_transid
)
339 lock_extent_bits(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
341 if (extent_buffer_uptodate(eb
) &&
342 btrfs_header_generation(eb
) == parent_transid
) {
346 printk_ratelimited("parent transid verify failed on %llu wanted %llu "
348 (unsigned long long)eb
->start
,
349 (unsigned long long)parent_transid
,
350 (unsigned long long)btrfs_header_generation(eb
));
352 clear_extent_buffer_uptodate(eb
);
354 unlock_extent_cached(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
355 &cached_state
, GFP_NOFS
);
360 * helper to read a given tree block, doing retries as required when
361 * the checksums don't match and we have alternate mirrors to try.
363 static int btree_read_extent_buffer_pages(struct btrfs_root
*root
,
364 struct extent_buffer
*eb
,
365 u64 start
, u64 parent_transid
)
367 struct extent_io_tree
*io_tree
;
372 int failed_mirror
= 0;
374 clear_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
);
375 io_tree
= &BTRFS_I(root
->fs_info
->btree_inode
)->io_tree
;
377 ret
= read_extent_buffer_pages(io_tree
, eb
, start
,
379 btree_get_extent
, mirror_num
);
380 if (!ret
&& !verify_parent_transid(io_tree
, eb
,
385 * This buffer's crc is fine, but its contents are corrupted, so
386 * there is no reason to read the other copies, they won't be
389 if (test_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
))
392 num_copies
= btrfs_num_copies(&root
->fs_info
->mapping_tree
,
397 if (!failed_mirror
) {
399 failed_mirror
= eb
->read_mirror
;
403 if (mirror_num
== failed_mirror
)
406 if (mirror_num
> num_copies
)
411 repair_eb_io_failure(root
, eb
, failed_mirror
);
417 * checksum a dirty tree block before IO. This has extra checks to make sure
418 * we only fill in the checksum field in the first page of a multi-page block
421 static int csum_dirty_buffer(struct btrfs_root
*root
, struct page
*page
)
423 struct extent_io_tree
*tree
;
424 u64 start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
426 struct extent_buffer
*eb
;
428 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
430 eb
= (struct extent_buffer
*)page
->private;
431 if (page
!= eb
->pages
[0])
433 found_start
= btrfs_header_bytenr(eb
);
434 if (found_start
!= start
) {
438 if (eb
->pages
[0] != page
) {
442 if (!PageUptodate(page
)) {
446 csum_tree_block(root
, eb
, 0);
450 static int check_tree_block_fsid(struct btrfs_root
*root
,
451 struct extent_buffer
*eb
)
453 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
454 u8 fsid
[BTRFS_UUID_SIZE
];
457 read_extent_buffer(eb
, fsid
, (unsigned long)btrfs_header_fsid(eb
),
460 if (!memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
)) {
464 fs_devices
= fs_devices
->seed
;
469 #define CORRUPT(reason, eb, root, slot) \
470 printk(KERN_CRIT "btrfs: corrupt leaf, %s: block=%llu," \
471 "root=%llu, slot=%d\n", reason, \
472 (unsigned long long)btrfs_header_bytenr(eb), \
473 (unsigned long long)root->objectid, slot)
475 static noinline
int check_leaf(struct btrfs_root
*root
,
476 struct extent_buffer
*leaf
)
478 struct btrfs_key key
;
479 struct btrfs_key leaf_key
;
480 u32 nritems
= btrfs_header_nritems(leaf
);
486 /* Check the 0 item */
487 if (btrfs_item_offset_nr(leaf
, 0) + btrfs_item_size_nr(leaf
, 0) !=
488 BTRFS_LEAF_DATA_SIZE(root
)) {
489 CORRUPT("invalid item offset size pair", leaf
, root
, 0);
494 * Check to make sure each items keys are in the correct order and their
495 * offsets make sense. We only have to loop through nritems-1 because
496 * we check the current slot against the next slot, which verifies the
497 * next slot's offset+size makes sense and that the current's slot
500 for (slot
= 0; slot
< nritems
- 1; slot
++) {
501 btrfs_item_key_to_cpu(leaf
, &leaf_key
, slot
);
502 btrfs_item_key_to_cpu(leaf
, &key
, slot
+ 1);
504 /* Make sure the keys are in the right order */
505 if (btrfs_comp_cpu_keys(&leaf_key
, &key
) >= 0) {
506 CORRUPT("bad key order", leaf
, root
, slot
);
511 * Make sure the offset and ends are right, remember that the
512 * item data starts at the end of the leaf and grows towards the
515 if (btrfs_item_offset_nr(leaf
, slot
) !=
516 btrfs_item_end_nr(leaf
, slot
+ 1)) {
517 CORRUPT("slot offset bad", leaf
, root
, slot
);
522 * Check to make sure that we don't point outside of the leaf,
523 * just incase all the items are consistent to eachother, but
524 * all point outside of the leaf.
526 if (btrfs_item_end_nr(leaf
, slot
) >
527 BTRFS_LEAF_DATA_SIZE(root
)) {
528 CORRUPT("slot end outside of leaf", leaf
, root
, slot
);
536 struct extent_buffer
*find_eb_for_page(struct extent_io_tree
*tree
,
537 struct page
*page
, int max_walk
)
539 struct extent_buffer
*eb
;
540 u64 start
= page_offset(page
);
544 if (start
< max_walk
)
547 min_start
= start
- max_walk
;
549 while (start
>= min_start
) {
550 eb
= find_extent_buffer(tree
, start
, 0);
553 * we found an extent buffer and it contains our page
556 if (eb
->start
<= target
&&
557 eb
->start
+ eb
->len
> target
)
560 /* we found an extent buffer that wasn't for us */
561 free_extent_buffer(eb
);
566 start
-= PAGE_CACHE_SIZE
;
571 static int btree_readpage_end_io_hook(struct page
*page
, u64 start
, u64 end
,
572 struct extent_state
*state
, int mirror
)
574 struct extent_io_tree
*tree
;
577 struct extent_buffer
*eb
;
578 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
585 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
586 eb
= (struct extent_buffer
*)page
->private;
588 /* the pending IO might have been the only thing that kept this buffer
589 * in memory. Make sure we have a ref for all this other checks
591 extent_buffer_get(eb
);
593 reads_done
= atomic_dec_and_test(&eb
->io_pages
);
597 eb
->read_mirror
= mirror
;
598 if (test_bit(EXTENT_BUFFER_IOERR
, &eb
->bflags
)) {
603 found_start
= btrfs_header_bytenr(eb
);
604 if (found_start
!= eb
->start
) {
605 printk_ratelimited(KERN_INFO
"btrfs bad tree block start "
607 (unsigned long long)found_start
,
608 (unsigned long long)eb
->start
);
612 if (check_tree_block_fsid(root
, eb
)) {
613 printk_ratelimited(KERN_INFO
"btrfs bad fsid on block %llu\n",
614 (unsigned long long)eb
->start
);
618 found_level
= btrfs_header_level(eb
);
620 btrfs_set_buffer_lockdep_class(btrfs_header_owner(eb
),
623 ret
= csum_tree_block(root
, eb
, 1);
630 * If this is a leaf block and it is corrupt, set the corrupt bit so
631 * that we don't try and read the other copies of this block, just
634 if (found_level
== 0 && check_leaf(root
, eb
)) {
635 set_bit(EXTENT_BUFFER_CORRUPT
, &eb
->bflags
);
640 set_extent_buffer_uptodate(eb
);
642 if (test_bit(EXTENT_BUFFER_READAHEAD
, &eb
->bflags
)) {
643 clear_bit(EXTENT_BUFFER_READAHEAD
, &eb
->bflags
);
644 btree_readahead_hook(root
, eb
, eb
->start
, ret
);
648 clear_extent_buffer_uptodate(eb
);
649 free_extent_buffer(eb
);
654 static int btree_io_failed_hook(struct page
*page
, int failed_mirror
)
656 struct extent_buffer
*eb
;
657 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
659 eb
= (struct extent_buffer
*)page
->private;
660 set_bit(EXTENT_BUFFER_IOERR
, &eb
->bflags
);
661 eb
->read_mirror
= failed_mirror
;
662 if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD
, &eb
->bflags
))
663 btree_readahead_hook(root
, eb
, eb
->start
, -EIO
);
664 return -EIO
; /* we fixed nothing */
667 static void end_workqueue_bio(struct bio
*bio
, int err
)
669 struct end_io_wq
*end_io_wq
= bio
->bi_private
;
670 struct btrfs_fs_info
*fs_info
;
672 fs_info
= end_io_wq
->info
;
673 end_io_wq
->error
= err
;
674 end_io_wq
->work
.func
= end_workqueue_fn
;
675 end_io_wq
->work
.flags
= 0;
677 if (bio
->bi_rw
& REQ_WRITE
) {
678 if (end_io_wq
->metadata
== 1)
679 btrfs_queue_worker(&fs_info
->endio_meta_write_workers
,
681 else if (end_io_wq
->metadata
== 2)
682 btrfs_queue_worker(&fs_info
->endio_freespace_worker
,
685 btrfs_queue_worker(&fs_info
->endio_write_workers
,
688 if (end_io_wq
->metadata
)
689 btrfs_queue_worker(&fs_info
->endio_meta_workers
,
692 btrfs_queue_worker(&fs_info
->endio_workers
,
698 * For the metadata arg you want
701 * 1 - if normal metadta
702 * 2 - if writing to the free space cache area
704 int btrfs_bio_wq_end_io(struct btrfs_fs_info
*info
, struct bio
*bio
,
707 struct end_io_wq
*end_io_wq
;
708 end_io_wq
= kmalloc(sizeof(*end_io_wq
), GFP_NOFS
);
712 end_io_wq
->private = bio
->bi_private
;
713 end_io_wq
->end_io
= bio
->bi_end_io
;
714 end_io_wq
->info
= info
;
715 end_io_wq
->error
= 0;
716 end_io_wq
->bio
= bio
;
717 end_io_wq
->metadata
= metadata
;
719 bio
->bi_private
= end_io_wq
;
720 bio
->bi_end_io
= end_workqueue_bio
;
724 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info
*info
)
726 unsigned long limit
= min_t(unsigned long,
727 info
->workers
.max_workers
,
728 info
->fs_devices
->open_devices
);
732 static void run_one_async_start(struct btrfs_work
*work
)
734 struct async_submit_bio
*async
;
737 async
= container_of(work
, struct async_submit_bio
, work
);
738 ret
= async
->submit_bio_start(async
->inode
, async
->rw
, async
->bio
,
739 async
->mirror_num
, async
->bio_flags
,
745 static void run_one_async_done(struct btrfs_work
*work
)
747 struct btrfs_fs_info
*fs_info
;
748 struct async_submit_bio
*async
;
751 async
= container_of(work
, struct async_submit_bio
, work
);
752 fs_info
= BTRFS_I(async
->inode
)->root
->fs_info
;
754 limit
= btrfs_async_submit_limit(fs_info
);
755 limit
= limit
* 2 / 3;
757 atomic_dec(&fs_info
->nr_async_submits
);
759 if (atomic_read(&fs_info
->nr_async_submits
) < limit
&&
760 waitqueue_active(&fs_info
->async_submit_wait
))
761 wake_up(&fs_info
->async_submit_wait
);
763 /* If an error occured we just want to clean up the bio and move on */
765 bio_endio(async
->bio
, async
->error
);
769 async
->submit_bio_done(async
->inode
, async
->rw
, async
->bio
,
770 async
->mirror_num
, async
->bio_flags
,
774 static void run_one_async_free(struct btrfs_work
*work
)
776 struct async_submit_bio
*async
;
778 async
= container_of(work
, struct async_submit_bio
, work
);
782 int btrfs_wq_submit_bio(struct btrfs_fs_info
*fs_info
, struct inode
*inode
,
783 int rw
, struct bio
*bio
, int mirror_num
,
784 unsigned long bio_flags
,
786 extent_submit_bio_hook_t
*submit_bio_start
,
787 extent_submit_bio_hook_t
*submit_bio_done
)
789 struct async_submit_bio
*async
;
791 async
= kmalloc(sizeof(*async
), GFP_NOFS
);
795 async
->inode
= inode
;
798 async
->mirror_num
= mirror_num
;
799 async
->submit_bio_start
= submit_bio_start
;
800 async
->submit_bio_done
= submit_bio_done
;
802 async
->work
.func
= run_one_async_start
;
803 async
->work
.ordered_func
= run_one_async_done
;
804 async
->work
.ordered_free
= run_one_async_free
;
806 async
->work
.flags
= 0;
807 async
->bio_flags
= bio_flags
;
808 async
->bio_offset
= bio_offset
;
812 atomic_inc(&fs_info
->nr_async_submits
);
815 btrfs_set_work_high_prio(&async
->work
);
817 btrfs_queue_worker(&fs_info
->workers
, &async
->work
);
819 while (atomic_read(&fs_info
->async_submit_draining
) &&
820 atomic_read(&fs_info
->nr_async_submits
)) {
821 wait_event(fs_info
->async_submit_wait
,
822 (atomic_read(&fs_info
->nr_async_submits
) == 0));
828 static int btree_csum_one_bio(struct bio
*bio
)
830 struct bio_vec
*bvec
= bio
->bi_io_vec
;
832 struct btrfs_root
*root
;
835 WARN_ON(bio
->bi_vcnt
<= 0);
836 while (bio_index
< bio
->bi_vcnt
) {
837 root
= BTRFS_I(bvec
->bv_page
->mapping
->host
)->root
;
838 ret
= csum_dirty_buffer(root
, bvec
->bv_page
);
847 static int __btree_submit_bio_start(struct inode
*inode
, int rw
,
848 struct bio
*bio
, int mirror_num
,
849 unsigned long bio_flags
,
853 * when we're called for a write, we're already in the async
854 * submission context. Just jump into btrfs_map_bio
856 return btree_csum_one_bio(bio
);
859 static int __btree_submit_bio_done(struct inode
*inode
, int rw
, struct bio
*bio
,
860 int mirror_num
, unsigned long bio_flags
,
864 * when we're called for a write, we're already in the async
865 * submission context. Just jump into btrfs_map_bio
867 return btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
, mirror_num
, 1);
870 static int btree_submit_bio_hook(struct inode
*inode
, int rw
, struct bio
*bio
,
871 int mirror_num
, unsigned long bio_flags
,
876 if (!(rw
& REQ_WRITE
)) {
879 * called for a read, do the setup so that checksum validation
880 * can happen in the async kernel threads
882 ret
= btrfs_bio_wq_end_io(BTRFS_I(inode
)->root
->fs_info
,
886 return btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
,
891 * kthread helpers are used to submit writes so that checksumming
892 * can happen in parallel across all CPUs
894 return btrfs_wq_submit_bio(BTRFS_I(inode
)->root
->fs_info
,
895 inode
, rw
, bio
, mirror_num
, 0,
897 __btree_submit_bio_start
,
898 __btree_submit_bio_done
);
901 #ifdef CONFIG_MIGRATION
902 static int btree_migratepage(struct address_space
*mapping
,
903 struct page
*newpage
, struct page
*page
,
904 enum migrate_mode mode
)
907 * we can't safely write a btree page from here,
908 * we haven't done the locking hook
913 * Buffers may be managed in a filesystem specific way.
914 * We must have no buffers or drop them.
916 if (page_has_private(page
) &&
917 !try_to_release_page(page
, GFP_KERNEL
))
919 return migrate_page(mapping
, newpage
, page
, mode
);
924 static int btree_writepages(struct address_space
*mapping
,
925 struct writeback_control
*wbc
)
927 struct extent_io_tree
*tree
;
928 tree
= &BTRFS_I(mapping
->host
)->io_tree
;
929 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
930 struct btrfs_root
*root
= BTRFS_I(mapping
->host
)->root
;
932 unsigned long thresh
= 32 * 1024 * 1024;
934 if (wbc
->for_kupdate
)
937 /* this is a bit racy, but that's ok */
938 num_dirty
= root
->fs_info
->dirty_metadata_bytes
;
939 if (num_dirty
< thresh
)
942 return btree_write_cache_pages(mapping
, wbc
);
945 static int btree_readpage(struct file
*file
, struct page
*page
)
947 struct extent_io_tree
*tree
;
948 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
949 return extent_read_full_page(tree
, page
, btree_get_extent
, 0);
952 static int btree_releasepage(struct page
*page
, gfp_t gfp_flags
)
954 if (PageWriteback(page
) || PageDirty(page
))
957 * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
958 * slab allocation from alloc_extent_state down the callchain where
959 * it'd hit a BUG_ON as those flags are not allowed.
961 gfp_flags
&= ~GFP_SLAB_BUG_MASK
;
963 return try_release_extent_buffer(page
, gfp_flags
);
966 static void btree_invalidatepage(struct page
*page
, unsigned long offset
)
968 struct extent_io_tree
*tree
;
969 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
970 extent_invalidatepage(tree
, page
, offset
);
971 btree_releasepage(page
, GFP_NOFS
);
972 if (PagePrivate(page
)) {
973 printk(KERN_WARNING
"btrfs warning page private not zero "
974 "on page %llu\n", (unsigned long long)page_offset(page
));
975 ClearPagePrivate(page
);
976 set_page_private(page
, 0);
977 page_cache_release(page
);
981 static int btree_set_page_dirty(struct page
*page
)
983 struct extent_buffer
*eb
;
985 BUG_ON(!PagePrivate(page
));
986 eb
= (struct extent_buffer
*)page
->private;
988 BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
));
989 BUG_ON(!atomic_read(&eb
->refs
));
990 btrfs_assert_tree_locked(eb
);
991 return __set_page_dirty_nobuffers(page
);
994 static const struct address_space_operations btree_aops
= {
995 .readpage
= btree_readpage
,
996 .writepages
= btree_writepages
,
997 .releasepage
= btree_releasepage
,
998 .invalidatepage
= btree_invalidatepage
,
999 #ifdef CONFIG_MIGRATION
1000 .migratepage
= btree_migratepage
,
1002 .set_page_dirty
= btree_set_page_dirty
,
1005 int readahead_tree_block(struct btrfs_root
*root
, u64 bytenr
, u32 blocksize
,
1008 struct extent_buffer
*buf
= NULL
;
1009 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1012 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
1015 read_extent_buffer_pages(&BTRFS_I(btree_inode
)->io_tree
,
1016 buf
, 0, WAIT_NONE
, btree_get_extent
, 0);
1017 free_extent_buffer(buf
);
1021 int reada_tree_block_flagged(struct btrfs_root
*root
, u64 bytenr
, u32 blocksize
,
1022 int mirror_num
, struct extent_buffer
**eb
)
1024 struct extent_buffer
*buf
= NULL
;
1025 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1026 struct extent_io_tree
*io_tree
= &BTRFS_I(btree_inode
)->io_tree
;
1029 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
1033 set_bit(EXTENT_BUFFER_READAHEAD
, &buf
->bflags
);
1035 ret
= read_extent_buffer_pages(io_tree
, buf
, 0, WAIT_PAGE_LOCK
,
1036 btree_get_extent
, mirror_num
);
1038 free_extent_buffer(buf
);
1042 if (test_bit(EXTENT_BUFFER_CORRUPT
, &buf
->bflags
)) {
1043 free_extent_buffer(buf
);
1045 } else if (extent_buffer_uptodate(buf
)) {
1048 free_extent_buffer(buf
);
1053 struct extent_buffer
*btrfs_find_tree_block(struct btrfs_root
*root
,
1054 u64 bytenr
, u32 blocksize
)
1056 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1057 struct extent_buffer
*eb
;
1058 eb
= find_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
1063 struct extent_buffer
*btrfs_find_create_tree_block(struct btrfs_root
*root
,
1064 u64 bytenr
, u32 blocksize
)
1066 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
1067 struct extent_buffer
*eb
;
1069 eb
= alloc_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
1075 int btrfs_write_tree_block(struct extent_buffer
*buf
)
1077 return filemap_fdatawrite_range(buf
->pages
[0]->mapping
, buf
->start
,
1078 buf
->start
+ buf
->len
- 1);
1081 int btrfs_wait_tree_block_writeback(struct extent_buffer
*buf
)
1083 return filemap_fdatawait_range(buf
->pages
[0]->mapping
,
1084 buf
->start
, buf
->start
+ buf
->len
- 1);
1087 struct extent_buffer
*read_tree_block(struct btrfs_root
*root
, u64 bytenr
,
1088 u32 blocksize
, u64 parent_transid
)
1090 struct extent_buffer
*buf
= NULL
;
1093 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
1097 ret
= btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
1102 void clean_tree_block(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
1103 struct extent_buffer
*buf
)
1105 if (btrfs_header_generation(buf
) ==
1106 root
->fs_info
->running_transaction
->transid
) {
1107 btrfs_assert_tree_locked(buf
);
1109 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &buf
->bflags
)) {
1110 spin_lock(&root
->fs_info
->delalloc_lock
);
1111 if (root
->fs_info
->dirty_metadata_bytes
>= buf
->len
)
1112 root
->fs_info
->dirty_metadata_bytes
-= buf
->len
;
1114 spin_unlock(&root
->fs_info
->delalloc_lock
);
1115 btrfs_panic(root
->fs_info
, -EOVERFLOW
,
1116 "Can't clear %lu bytes from "
1117 " dirty_mdatadata_bytes (%lu)",
1119 root
->fs_info
->dirty_metadata_bytes
);
1121 spin_unlock(&root
->fs_info
->delalloc_lock
);
1124 /* ugh, clear_extent_buffer_dirty needs to lock the page */
1125 btrfs_set_lock_blocking(buf
);
1126 clear_extent_buffer_dirty(buf
);
1130 static void __setup_root(u32 nodesize
, u32 leafsize
, u32 sectorsize
,
1131 u32 stripesize
, struct btrfs_root
*root
,
1132 struct btrfs_fs_info
*fs_info
,
1136 root
->commit_root
= NULL
;
1137 root
->sectorsize
= sectorsize
;
1138 root
->nodesize
= nodesize
;
1139 root
->leafsize
= leafsize
;
1140 root
->stripesize
= stripesize
;
1142 root
->track_dirty
= 0;
1144 root
->orphan_item_inserted
= 0;
1145 root
->orphan_cleanup_state
= 0;
1147 root
->objectid
= objectid
;
1148 root
->last_trans
= 0;
1149 root
->highest_objectid
= 0;
1151 root
->inode_tree
= RB_ROOT
;
1152 INIT_RADIX_TREE(&root
->delayed_nodes_tree
, GFP_ATOMIC
);
1153 root
->block_rsv
= NULL
;
1154 root
->orphan_block_rsv
= NULL
;
1156 INIT_LIST_HEAD(&root
->dirty_list
);
1157 INIT_LIST_HEAD(&root
->root_list
);
1158 spin_lock_init(&root
->orphan_lock
);
1159 spin_lock_init(&root
->inode_lock
);
1160 spin_lock_init(&root
->accounting_lock
);
1161 mutex_init(&root
->objectid_mutex
);
1162 mutex_init(&root
->log_mutex
);
1163 init_waitqueue_head(&root
->log_writer_wait
);
1164 init_waitqueue_head(&root
->log_commit_wait
[0]);
1165 init_waitqueue_head(&root
->log_commit_wait
[1]);
1166 atomic_set(&root
->log_commit
[0], 0);
1167 atomic_set(&root
->log_commit
[1], 0);
1168 atomic_set(&root
->log_writers
, 0);
1169 atomic_set(&root
->orphan_inodes
, 0);
1170 root
->log_batch
= 0;
1171 root
->log_transid
= 0;
1172 root
->last_log_commit
= 0;
1173 extent_io_tree_init(&root
->dirty_log_pages
,
1174 fs_info
->btree_inode
->i_mapping
);
1176 memset(&root
->root_key
, 0, sizeof(root
->root_key
));
1177 memset(&root
->root_item
, 0, sizeof(root
->root_item
));
1178 memset(&root
->defrag_progress
, 0, sizeof(root
->defrag_progress
));
1179 memset(&root
->root_kobj
, 0, sizeof(root
->root_kobj
));
1180 root
->defrag_trans_start
= fs_info
->generation
;
1181 init_completion(&root
->kobj_unregister
);
1182 root
->defrag_running
= 0;
1183 root
->root_key
.objectid
= objectid
;
1187 static int __must_check
find_and_setup_root(struct btrfs_root
*tree_root
,
1188 struct btrfs_fs_info
*fs_info
,
1190 struct btrfs_root
*root
)
1196 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1197 tree_root
->sectorsize
, tree_root
->stripesize
,
1198 root
, fs_info
, objectid
);
1199 ret
= btrfs_find_last_root(tree_root
, objectid
,
1200 &root
->root_item
, &root
->root_key
);
1206 generation
= btrfs_root_generation(&root
->root_item
);
1207 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
1208 root
->commit_root
= NULL
;
1209 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
1210 blocksize
, generation
);
1211 if (!root
->node
|| !btrfs_buffer_uptodate(root
->node
, generation
, 0)) {
1212 free_extent_buffer(root
->node
);
1216 root
->commit_root
= btrfs_root_node(root
);
1220 static struct btrfs_root
*btrfs_alloc_root(struct btrfs_fs_info
*fs_info
)
1222 struct btrfs_root
*root
= kzalloc(sizeof(*root
), GFP_NOFS
);
1224 root
->fs_info
= fs_info
;
1228 static struct btrfs_root
*alloc_log_tree(struct btrfs_trans_handle
*trans
,
1229 struct btrfs_fs_info
*fs_info
)
1231 struct btrfs_root
*root
;
1232 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
1233 struct extent_buffer
*leaf
;
1235 root
= btrfs_alloc_root(fs_info
);
1237 return ERR_PTR(-ENOMEM
);
1239 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1240 tree_root
->sectorsize
, tree_root
->stripesize
,
1241 root
, fs_info
, BTRFS_TREE_LOG_OBJECTID
);
1243 root
->root_key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
1244 root
->root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
1245 root
->root_key
.offset
= BTRFS_TREE_LOG_OBJECTID
;
1247 * log trees do not get reference counted because they go away
1248 * before a real commit is actually done. They do store pointers
1249 * to file data extents, and those reference counts still get
1250 * updated (along with back refs to the log tree).
1254 leaf
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
1255 BTRFS_TREE_LOG_OBJECTID
, NULL
,
1259 return ERR_CAST(leaf
);
1262 memset_extent_buffer(leaf
, 0, 0, sizeof(struct btrfs_header
));
1263 btrfs_set_header_bytenr(leaf
, leaf
->start
);
1264 btrfs_set_header_generation(leaf
, trans
->transid
);
1265 btrfs_set_header_backref_rev(leaf
, BTRFS_MIXED_BACKREF_REV
);
1266 btrfs_set_header_owner(leaf
, BTRFS_TREE_LOG_OBJECTID
);
1269 write_extent_buffer(root
->node
, root
->fs_info
->fsid
,
1270 (unsigned long)btrfs_header_fsid(root
->node
),
1272 btrfs_mark_buffer_dirty(root
->node
);
1273 btrfs_tree_unlock(root
->node
);
1277 int btrfs_init_log_root_tree(struct btrfs_trans_handle
*trans
,
1278 struct btrfs_fs_info
*fs_info
)
1280 struct btrfs_root
*log_root
;
1282 log_root
= alloc_log_tree(trans
, fs_info
);
1283 if (IS_ERR(log_root
))
1284 return PTR_ERR(log_root
);
1285 WARN_ON(fs_info
->log_root_tree
);
1286 fs_info
->log_root_tree
= log_root
;
1290 int btrfs_add_log_tree(struct btrfs_trans_handle
*trans
,
1291 struct btrfs_root
*root
)
1293 struct btrfs_root
*log_root
;
1294 struct btrfs_inode_item
*inode_item
;
1296 log_root
= alloc_log_tree(trans
, root
->fs_info
);
1297 if (IS_ERR(log_root
))
1298 return PTR_ERR(log_root
);
1300 log_root
->last_trans
= trans
->transid
;
1301 log_root
->root_key
.offset
= root
->root_key
.objectid
;
1303 inode_item
= &log_root
->root_item
.inode
;
1304 inode_item
->generation
= cpu_to_le64(1);
1305 inode_item
->size
= cpu_to_le64(3);
1306 inode_item
->nlink
= cpu_to_le32(1);
1307 inode_item
->nbytes
= cpu_to_le64(root
->leafsize
);
1308 inode_item
->mode
= cpu_to_le32(S_IFDIR
| 0755);
1310 btrfs_set_root_node(&log_root
->root_item
, log_root
->node
);
1312 WARN_ON(root
->log_root
);
1313 root
->log_root
= log_root
;
1314 root
->log_transid
= 0;
1315 root
->last_log_commit
= 0;
1319 struct btrfs_root
*btrfs_read_fs_root_no_radix(struct btrfs_root
*tree_root
,
1320 struct btrfs_key
*location
)
1322 struct btrfs_root
*root
;
1323 struct btrfs_fs_info
*fs_info
= tree_root
->fs_info
;
1324 struct btrfs_path
*path
;
1325 struct extent_buffer
*l
;
1330 root
= btrfs_alloc_root(fs_info
);
1332 return ERR_PTR(-ENOMEM
);
1333 if (location
->offset
== (u64
)-1) {
1334 ret
= find_and_setup_root(tree_root
, fs_info
,
1335 location
->objectid
, root
);
1338 return ERR_PTR(ret
);
1343 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1344 tree_root
->sectorsize
, tree_root
->stripesize
,
1345 root
, fs_info
, location
->objectid
);
1347 path
= btrfs_alloc_path();
1350 return ERR_PTR(-ENOMEM
);
1352 ret
= btrfs_search_slot(NULL
, tree_root
, location
, path
, 0, 0);
1355 read_extent_buffer(l
, &root
->root_item
,
1356 btrfs_item_ptr_offset(l
, path
->slots
[0]),
1357 sizeof(root
->root_item
));
1358 memcpy(&root
->root_key
, location
, sizeof(*location
));
1360 btrfs_free_path(path
);
1365 return ERR_PTR(ret
);
1368 generation
= btrfs_root_generation(&root
->root_item
);
1369 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
1370 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
1371 blocksize
, generation
);
1372 root
->commit_root
= btrfs_root_node(root
);
1373 BUG_ON(!root
->node
); /* -ENOMEM */
1375 if (location
->objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
1377 btrfs_check_and_init_root_item(&root
->root_item
);
1383 struct btrfs_root
*btrfs_read_fs_root_no_name(struct btrfs_fs_info
*fs_info
,
1384 struct btrfs_key
*location
)
1386 struct btrfs_root
*root
;
1389 if (location
->objectid
== BTRFS_ROOT_TREE_OBJECTID
)
1390 return fs_info
->tree_root
;
1391 if (location
->objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
1392 return fs_info
->extent_root
;
1393 if (location
->objectid
== BTRFS_CHUNK_TREE_OBJECTID
)
1394 return fs_info
->chunk_root
;
1395 if (location
->objectid
== BTRFS_DEV_TREE_OBJECTID
)
1396 return fs_info
->dev_root
;
1397 if (location
->objectid
== BTRFS_CSUM_TREE_OBJECTID
)
1398 return fs_info
->csum_root
;
1400 spin_lock(&fs_info
->fs_roots_radix_lock
);
1401 root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
1402 (unsigned long)location
->objectid
);
1403 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1407 root
= btrfs_read_fs_root_no_radix(fs_info
->tree_root
, location
);
1411 root
->free_ino_ctl
= kzalloc(sizeof(*root
->free_ino_ctl
), GFP_NOFS
);
1412 root
->free_ino_pinned
= kzalloc(sizeof(*root
->free_ino_pinned
),
1414 if (!root
->free_ino_pinned
|| !root
->free_ino_ctl
) {
1419 btrfs_init_free_ino_ctl(root
);
1420 mutex_init(&root
->fs_commit_mutex
);
1421 spin_lock_init(&root
->cache_lock
);
1422 init_waitqueue_head(&root
->cache_wait
);
1424 ret
= get_anon_bdev(&root
->anon_dev
);
1428 if (btrfs_root_refs(&root
->root_item
) == 0) {
1433 ret
= btrfs_find_orphan_item(fs_info
->tree_root
, location
->objectid
);
1437 root
->orphan_item_inserted
= 1;
1439 ret
= radix_tree_preload(GFP_NOFS
& ~__GFP_HIGHMEM
);
1443 spin_lock(&fs_info
->fs_roots_radix_lock
);
1444 ret
= radix_tree_insert(&fs_info
->fs_roots_radix
,
1445 (unsigned long)root
->root_key
.objectid
,
1450 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1451 radix_tree_preload_end();
1453 if (ret
== -EEXIST
) {
1460 ret
= btrfs_find_dead_roots(fs_info
->tree_root
,
1461 root
->root_key
.objectid
);
1466 return ERR_PTR(ret
);
1469 static int btrfs_congested_fn(void *congested_data
, int bdi_bits
)
1471 struct btrfs_fs_info
*info
= (struct btrfs_fs_info
*)congested_data
;
1473 struct btrfs_device
*device
;
1474 struct backing_dev_info
*bdi
;
1477 list_for_each_entry_rcu(device
, &info
->fs_devices
->devices
, dev_list
) {
1480 bdi
= blk_get_backing_dev_info(device
->bdev
);
1481 if (bdi
&& bdi_congested(bdi
, bdi_bits
)) {
1491 * If this fails, caller must call bdi_destroy() to get rid of the
1494 static int setup_bdi(struct btrfs_fs_info
*info
, struct backing_dev_info
*bdi
)
1498 bdi
->capabilities
= BDI_CAP_MAP_COPY
;
1499 err
= bdi_setup_and_register(bdi
, "btrfs", BDI_CAP_MAP_COPY
);
1503 bdi
->ra_pages
= default_backing_dev_info
.ra_pages
;
1504 bdi
->congested_fn
= btrfs_congested_fn
;
1505 bdi
->congested_data
= info
;
1510 * called by the kthread helper functions to finally call the bio end_io
1511 * functions. This is where read checksum verification actually happens
1513 static void end_workqueue_fn(struct btrfs_work
*work
)
1516 struct end_io_wq
*end_io_wq
;
1517 struct btrfs_fs_info
*fs_info
;
1520 end_io_wq
= container_of(work
, struct end_io_wq
, work
);
1521 bio
= end_io_wq
->bio
;
1522 fs_info
= end_io_wq
->info
;
1524 error
= end_io_wq
->error
;
1525 bio
->bi_private
= end_io_wq
->private;
1526 bio
->bi_end_io
= end_io_wq
->end_io
;
1528 bio_endio(bio
, error
);
1531 static int cleaner_kthread(void *arg
)
1533 struct btrfs_root
*root
= arg
;
1536 vfs_check_frozen(root
->fs_info
->sb
, SB_FREEZE_WRITE
);
1538 if (!(root
->fs_info
->sb
->s_flags
& MS_RDONLY
) &&
1539 mutex_trylock(&root
->fs_info
->cleaner_mutex
)) {
1540 btrfs_run_delayed_iputs(root
);
1541 btrfs_clean_old_snapshots(root
);
1542 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
1543 btrfs_run_defrag_inodes(root
->fs_info
);
1546 if (!try_to_freeze()) {
1547 set_current_state(TASK_INTERRUPTIBLE
);
1548 if (!kthread_should_stop())
1550 __set_current_state(TASK_RUNNING
);
1552 } while (!kthread_should_stop());
1556 static int transaction_kthread(void *arg
)
1558 struct btrfs_root
*root
= arg
;
1559 struct btrfs_trans_handle
*trans
;
1560 struct btrfs_transaction
*cur
;
1563 unsigned long delay
;
1567 cannot_commit
= false;
1569 vfs_check_frozen(root
->fs_info
->sb
, SB_FREEZE_WRITE
);
1570 mutex_lock(&root
->fs_info
->transaction_kthread_mutex
);
1572 spin_lock(&root
->fs_info
->trans_lock
);
1573 cur
= root
->fs_info
->running_transaction
;
1575 spin_unlock(&root
->fs_info
->trans_lock
);
1579 now
= get_seconds();
1580 if (!cur
->blocked
&&
1581 (now
< cur
->start_time
|| now
- cur
->start_time
< 30)) {
1582 spin_unlock(&root
->fs_info
->trans_lock
);
1586 transid
= cur
->transid
;
1587 spin_unlock(&root
->fs_info
->trans_lock
);
1589 /* If the file system is aborted, this will always fail. */
1590 trans
= btrfs_join_transaction(root
);
1591 if (IS_ERR(trans
)) {
1592 cannot_commit
= true;
1595 if (transid
== trans
->transid
) {
1596 btrfs_commit_transaction(trans
, root
);
1598 btrfs_end_transaction(trans
, root
);
1601 wake_up_process(root
->fs_info
->cleaner_kthread
);
1602 mutex_unlock(&root
->fs_info
->transaction_kthread_mutex
);
1604 if (!try_to_freeze()) {
1605 set_current_state(TASK_INTERRUPTIBLE
);
1606 if (!kthread_should_stop() &&
1607 (!btrfs_transaction_blocked(root
->fs_info
) ||
1609 schedule_timeout(delay
);
1610 __set_current_state(TASK_RUNNING
);
1612 } while (!kthread_should_stop());
1617 * this will find the highest generation in the array of
1618 * root backups. The index of the highest array is returned,
1619 * or -1 if we can't find anything.
1621 * We check to make sure the array is valid by comparing the
1622 * generation of the latest root in the array with the generation
1623 * in the super block. If they don't match we pitch it.
1625 static int find_newest_super_backup(struct btrfs_fs_info
*info
, u64 newest_gen
)
1628 int newest_index
= -1;
1629 struct btrfs_root_backup
*root_backup
;
1632 for (i
= 0; i
< BTRFS_NUM_BACKUP_ROOTS
; i
++) {
1633 root_backup
= info
->super_copy
->super_roots
+ i
;
1634 cur
= btrfs_backup_tree_root_gen(root_backup
);
1635 if (cur
== newest_gen
)
1639 /* check to see if we actually wrapped around */
1640 if (newest_index
== BTRFS_NUM_BACKUP_ROOTS
- 1) {
1641 root_backup
= info
->super_copy
->super_roots
;
1642 cur
= btrfs_backup_tree_root_gen(root_backup
);
1643 if (cur
== newest_gen
)
1646 return newest_index
;
1651 * find the oldest backup so we know where to store new entries
1652 * in the backup array. This will set the backup_root_index
1653 * field in the fs_info struct
1655 static void find_oldest_super_backup(struct btrfs_fs_info
*info
,
1658 int newest_index
= -1;
1660 newest_index
= find_newest_super_backup(info
, newest_gen
);
1661 /* if there was garbage in there, just move along */
1662 if (newest_index
== -1) {
1663 info
->backup_root_index
= 0;
1665 info
->backup_root_index
= (newest_index
+ 1) % BTRFS_NUM_BACKUP_ROOTS
;
1670 * copy all the root pointers into the super backup array.
1671 * this will bump the backup pointer by one when it is
1674 static void backup_super_roots(struct btrfs_fs_info
*info
)
1677 struct btrfs_root_backup
*root_backup
;
1680 next_backup
= info
->backup_root_index
;
1681 last_backup
= (next_backup
+ BTRFS_NUM_BACKUP_ROOTS
- 1) %
1682 BTRFS_NUM_BACKUP_ROOTS
;
1685 * just overwrite the last backup if we're at the same generation
1686 * this happens only at umount
1688 root_backup
= info
->super_for_commit
->super_roots
+ last_backup
;
1689 if (btrfs_backup_tree_root_gen(root_backup
) ==
1690 btrfs_header_generation(info
->tree_root
->node
))
1691 next_backup
= last_backup
;
1693 root_backup
= info
->super_for_commit
->super_roots
+ next_backup
;
1696 * make sure all of our padding and empty slots get zero filled
1697 * regardless of which ones we use today
1699 memset(root_backup
, 0, sizeof(*root_backup
));
1701 info
->backup_root_index
= (next_backup
+ 1) % BTRFS_NUM_BACKUP_ROOTS
;
1703 btrfs_set_backup_tree_root(root_backup
, info
->tree_root
->node
->start
);
1704 btrfs_set_backup_tree_root_gen(root_backup
,
1705 btrfs_header_generation(info
->tree_root
->node
));
1707 btrfs_set_backup_tree_root_level(root_backup
,
1708 btrfs_header_level(info
->tree_root
->node
));
1710 btrfs_set_backup_chunk_root(root_backup
, info
->chunk_root
->node
->start
);
1711 btrfs_set_backup_chunk_root_gen(root_backup
,
1712 btrfs_header_generation(info
->chunk_root
->node
));
1713 btrfs_set_backup_chunk_root_level(root_backup
,
1714 btrfs_header_level(info
->chunk_root
->node
));
1716 btrfs_set_backup_extent_root(root_backup
, info
->extent_root
->node
->start
);
1717 btrfs_set_backup_extent_root_gen(root_backup
,
1718 btrfs_header_generation(info
->extent_root
->node
));
1719 btrfs_set_backup_extent_root_level(root_backup
,
1720 btrfs_header_level(info
->extent_root
->node
));
1723 * we might commit during log recovery, which happens before we set
1724 * the fs_root. Make sure it is valid before we fill it in.
1726 if (info
->fs_root
&& info
->fs_root
->node
) {
1727 btrfs_set_backup_fs_root(root_backup
,
1728 info
->fs_root
->node
->start
);
1729 btrfs_set_backup_fs_root_gen(root_backup
,
1730 btrfs_header_generation(info
->fs_root
->node
));
1731 btrfs_set_backup_fs_root_level(root_backup
,
1732 btrfs_header_level(info
->fs_root
->node
));
1735 btrfs_set_backup_dev_root(root_backup
, info
->dev_root
->node
->start
);
1736 btrfs_set_backup_dev_root_gen(root_backup
,
1737 btrfs_header_generation(info
->dev_root
->node
));
1738 btrfs_set_backup_dev_root_level(root_backup
,
1739 btrfs_header_level(info
->dev_root
->node
));
1741 btrfs_set_backup_csum_root(root_backup
, info
->csum_root
->node
->start
);
1742 btrfs_set_backup_csum_root_gen(root_backup
,
1743 btrfs_header_generation(info
->csum_root
->node
));
1744 btrfs_set_backup_csum_root_level(root_backup
,
1745 btrfs_header_level(info
->csum_root
->node
));
1747 btrfs_set_backup_total_bytes(root_backup
,
1748 btrfs_super_total_bytes(info
->super_copy
));
1749 btrfs_set_backup_bytes_used(root_backup
,
1750 btrfs_super_bytes_used(info
->super_copy
));
1751 btrfs_set_backup_num_devices(root_backup
,
1752 btrfs_super_num_devices(info
->super_copy
));
1755 * if we don't copy this out to the super_copy, it won't get remembered
1756 * for the next commit
1758 memcpy(&info
->super_copy
->super_roots
,
1759 &info
->super_for_commit
->super_roots
,
1760 sizeof(*root_backup
) * BTRFS_NUM_BACKUP_ROOTS
);
1764 * this copies info out of the root backup array and back into
1765 * the in-memory super block. It is meant to help iterate through
1766 * the array, so you send it the number of backups you've already
1767 * tried and the last backup index you used.
1769 * this returns -1 when it has tried all the backups
1771 static noinline
int next_root_backup(struct btrfs_fs_info
*info
,
1772 struct btrfs_super_block
*super
,
1773 int *num_backups_tried
, int *backup_index
)
1775 struct btrfs_root_backup
*root_backup
;
1776 int newest
= *backup_index
;
1778 if (*num_backups_tried
== 0) {
1779 u64 gen
= btrfs_super_generation(super
);
1781 newest
= find_newest_super_backup(info
, gen
);
1785 *backup_index
= newest
;
1786 *num_backups_tried
= 1;
1787 } else if (*num_backups_tried
== BTRFS_NUM_BACKUP_ROOTS
) {
1788 /* we've tried all the backups, all done */
1791 /* jump to the next oldest backup */
1792 newest
= (*backup_index
+ BTRFS_NUM_BACKUP_ROOTS
- 1) %
1793 BTRFS_NUM_BACKUP_ROOTS
;
1794 *backup_index
= newest
;
1795 *num_backups_tried
+= 1;
1797 root_backup
= super
->super_roots
+ newest
;
1799 btrfs_set_super_generation(super
,
1800 btrfs_backup_tree_root_gen(root_backup
));
1801 btrfs_set_super_root(super
, btrfs_backup_tree_root(root_backup
));
1802 btrfs_set_super_root_level(super
,
1803 btrfs_backup_tree_root_level(root_backup
));
1804 btrfs_set_super_bytes_used(super
, btrfs_backup_bytes_used(root_backup
));
1807 * fixme: the total bytes and num_devices need to match or we should
1810 btrfs_set_super_total_bytes(super
, btrfs_backup_total_bytes(root_backup
));
1811 btrfs_set_super_num_devices(super
, btrfs_backup_num_devices(root_backup
));
1815 /* helper to cleanup tree roots */
1816 static void free_root_pointers(struct btrfs_fs_info
*info
, int chunk_root
)
1818 free_extent_buffer(info
->tree_root
->node
);
1819 free_extent_buffer(info
->tree_root
->commit_root
);
1820 free_extent_buffer(info
->dev_root
->node
);
1821 free_extent_buffer(info
->dev_root
->commit_root
);
1822 free_extent_buffer(info
->extent_root
->node
);
1823 free_extent_buffer(info
->extent_root
->commit_root
);
1824 free_extent_buffer(info
->csum_root
->node
);
1825 free_extent_buffer(info
->csum_root
->commit_root
);
1827 info
->tree_root
->node
= NULL
;
1828 info
->tree_root
->commit_root
= NULL
;
1829 info
->dev_root
->node
= NULL
;
1830 info
->dev_root
->commit_root
= NULL
;
1831 info
->extent_root
->node
= NULL
;
1832 info
->extent_root
->commit_root
= NULL
;
1833 info
->csum_root
->node
= NULL
;
1834 info
->csum_root
->commit_root
= NULL
;
1837 free_extent_buffer(info
->chunk_root
->node
);
1838 free_extent_buffer(info
->chunk_root
->commit_root
);
1839 info
->chunk_root
->node
= NULL
;
1840 info
->chunk_root
->commit_root
= NULL
;
1845 int open_ctree(struct super_block
*sb
,
1846 struct btrfs_fs_devices
*fs_devices
,
1856 struct btrfs_key location
;
1857 struct buffer_head
*bh
;
1858 struct btrfs_super_block
*disk_super
;
1859 struct btrfs_fs_info
*fs_info
= btrfs_sb(sb
);
1860 struct btrfs_root
*tree_root
;
1861 struct btrfs_root
*extent_root
;
1862 struct btrfs_root
*csum_root
;
1863 struct btrfs_root
*chunk_root
;
1864 struct btrfs_root
*dev_root
;
1865 struct btrfs_root
*log_tree_root
;
1868 int num_backups_tried
= 0;
1869 int backup_index
= 0;
1871 tree_root
= fs_info
->tree_root
= btrfs_alloc_root(fs_info
);
1872 extent_root
= fs_info
->extent_root
= btrfs_alloc_root(fs_info
);
1873 csum_root
= fs_info
->csum_root
= btrfs_alloc_root(fs_info
);
1874 chunk_root
= fs_info
->chunk_root
= btrfs_alloc_root(fs_info
);
1875 dev_root
= fs_info
->dev_root
= btrfs_alloc_root(fs_info
);
1877 if (!tree_root
|| !extent_root
|| !csum_root
||
1878 !chunk_root
|| !dev_root
) {
1883 ret
= init_srcu_struct(&fs_info
->subvol_srcu
);
1889 ret
= setup_bdi(fs_info
, &fs_info
->bdi
);
1895 fs_info
->btree_inode
= new_inode(sb
);
1896 if (!fs_info
->btree_inode
) {
1901 mapping_set_gfp_mask(fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1903 INIT_RADIX_TREE(&fs_info
->fs_roots_radix
, GFP_ATOMIC
);
1904 INIT_LIST_HEAD(&fs_info
->trans_list
);
1905 INIT_LIST_HEAD(&fs_info
->dead_roots
);
1906 INIT_LIST_HEAD(&fs_info
->delayed_iputs
);
1907 INIT_LIST_HEAD(&fs_info
->hashers
);
1908 INIT_LIST_HEAD(&fs_info
->delalloc_inodes
);
1909 INIT_LIST_HEAD(&fs_info
->ordered_operations
);
1910 INIT_LIST_HEAD(&fs_info
->caching_block_groups
);
1911 spin_lock_init(&fs_info
->delalloc_lock
);
1912 spin_lock_init(&fs_info
->trans_lock
);
1913 spin_lock_init(&fs_info
->ref_cache_lock
);
1914 spin_lock_init(&fs_info
->fs_roots_radix_lock
);
1915 spin_lock_init(&fs_info
->delayed_iput_lock
);
1916 spin_lock_init(&fs_info
->defrag_inodes_lock
);
1917 spin_lock_init(&fs_info
->free_chunk_lock
);
1918 spin_lock_init(&fs_info
->tree_mod_seq_lock
);
1919 rwlock_init(&fs_info
->tree_mod_log_lock
);
1920 mutex_init(&fs_info
->reloc_mutex
);
1922 init_completion(&fs_info
->kobj_unregister
);
1923 INIT_LIST_HEAD(&fs_info
->dirty_cowonly_roots
);
1924 INIT_LIST_HEAD(&fs_info
->space_info
);
1925 INIT_LIST_HEAD(&fs_info
->tree_mod_seq_list
);
1926 btrfs_mapping_init(&fs_info
->mapping_tree
);
1927 btrfs_init_block_rsv(&fs_info
->global_block_rsv
);
1928 btrfs_init_block_rsv(&fs_info
->delalloc_block_rsv
);
1929 btrfs_init_block_rsv(&fs_info
->trans_block_rsv
);
1930 btrfs_init_block_rsv(&fs_info
->chunk_block_rsv
);
1931 btrfs_init_block_rsv(&fs_info
->empty_block_rsv
);
1932 btrfs_init_block_rsv(&fs_info
->delayed_block_rsv
);
1933 atomic_set(&fs_info
->nr_async_submits
, 0);
1934 atomic_set(&fs_info
->async_delalloc_pages
, 0);
1935 atomic_set(&fs_info
->async_submit_draining
, 0);
1936 atomic_set(&fs_info
->nr_async_bios
, 0);
1937 atomic_set(&fs_info
->defrag_running
, 0);
1938 atomic_set(&fs_info
->tree_mod_seq
, 0);
1940 fs_info
->max_inline
= 8192 * 1024;
1941 fs_info
->metadata_ratio
= 0;
1942 fs_info
->defrag_inodes
= RB_ROOT
;
1943 fs_info
->trans_no_join
= 0;
1944 fs_info
->free_chunk_space
= 0;
1945 fs_info
->tree_mod_log
= RB_ROOT
;
1947 /* readahead state */
1948 INIT_RADIX_TREE(&fs_info
->reada_tree
, GFP_NOFS
& ~__GFP_WAIT
);
1949 spin_lock_init(&fs_info
->reada_lock
);
1951 fs_info
->thread_pool_size
= min_t(unsigned long,
1952 num_online_cpus() + 2, 8);
1954 INIT_LIST_HEAD(&fs_info
->ordered_extents
);
1955 spin_lock_init(&fs_info
->ordered_extent_lock
);
1956 fs_info
->delayed_root
= kmalloc(sizeof(struct btrfs_delayed_root
),
1958 if (!fs_info
->delayed_root
) {
1962 btrfs_init_delayed_root(fs_info
->delayed_root
);
1964 mutex_init(&fs_info
->scrub_lock
);
1965 atomic_set(&fs_info
->scrubs_running
, 0);
1966 atomic_set(&fs_info
->scrub_pause_req
, 0);
1967 atomic_set(&fs_info
->scrubs_paused
, 0);
1968 atomic_set(&fs_info
->scrub_cancel_req
, 0);
1969 init_waitqueue_head(&fs_info
->scrub_pause_wait
);
1970 init_rwsem(&fs_info
->scrub_super_lock
);
1971 fs_info
->scrub_workers_refcnt
= 0;
1972 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
1973 fs_info
->check_integrity_print_mask
= 0;
1976 spin_lock_init(&fs_info
->balance_lock
);
1977 mutex_init(&fs_info
->balance_mutex
);
1978 atomic_set(&fs_info
->balance_running
, 0);
1979 atomic_set(&fs_info
->balance_pause_req
, 0);
1980 atomic_set(&fs_info
->balance_cancel_req
, 0);
1981 fs_info
->balance_ctl
= NULL
;
1982 init_waitqueue_head(&fs_info
->balance_wait_q
);
1984 sb
->s_blocksize
= 4096;
1985 sb
->s_blocksize_bits
= blksize_bits(4096);
1986 sb
->s_bdi
= &fs_info
->bdi
;
1988 fs_info
->btree_inode
->i_ino
= BTRFS_BTREE_INODE_OBJECTID
;
1989 set_nlink(fs_info
->btree_inode
, 1);
1991 * we set the i_size on the btree inode to the max possible int.
1992 * the real end of the address space is determined by all of
1993 * the devices in the system
1995 fs_info
->btree_inode
->i_size
= OFFSET_MAX
;
1996 fs_info
->btree_inode
->i_mapping
->a_ops
= &btree_aops
;
1997 fs_info
->btree_inode
->i_mapping
->backing_dev_info
= &fs_info
->bdi
;
1999 RB_CLEAR_NODE(&BTRFS_I(fs_info
->btree_inode
)->rb_node
);
2000 extent_io_tree_init(&BTRFS_I(fs_info
->btree_inode
)->io_tree
,
2001 fs_info
->btree_inode
->i_mapping
);
2002 BTRFS_I(fs_info
->btree_inode
)->io_tree
.track_uptodate
= 0;
2003 extent_map_tree_init(&BTRFS_I(fs_info
->btree_inode
)->extent_tree
);
2005 BTRFS_I(fs_info
->btree_inode
)->io_tree
.ops
= &btree_extent_io_ops
;
2007 BTRFS_I(fs_info
->btree_inode
)->root
= tree_root
;
2008 memset(&BTRFS_I(fs_info
->btree_inode
)->location
, 0,
2009 sizeof(struct btrfs_key
));
2010 set_bit(BTRFS_INODE_DUMMY
,
2011 &BTRFS_I(fs_info
->btree_inode
)->runtime_flags
);
2012 insert_inode_hash(fs_info
->btree_inode
);
2014 spin_lock_init(&fs_info
->block_group_cache_lock
);
2015 fs_info
->block_group_cache_tree
= RB_ROOT
;
2017 extent_io_tree_init(&fs_info
->freed_extents
[0],
2018 fs_info
->btree_inode
->i_mapping
);
2019 extent_io_tree_init(&fs_info
->freed_extents
[1],
2020 fs_info
->btree_inode
->i_mapping
);
2021 fs_info
->pinned_extents
= &fs_info
->freed_extents
[0];
2022 fs_info
->do_barriers
= 1;
2025 mutex_init(&fs_info
->ordered_operations_mutex
);
2026 mutex_init(&fs_info
->tree_log_mutex
);
2027 mutex_init(&fs_info
->chunk_mutex
);
2028 mutex_init(&fs_info
->transaction_kthread_mutex
);
2029 mutex_init(&fs_info
->cleaner_mutex
);
2030 mutex_init(&fs_info
->volume_mutex
);
2031 init_rwsem(&fs_info
->extent_commit_sem
);
2032 init_rwsem(&fs_info
->cleanup_work_sem
);
2033 init_rwsem(&fs_info
->subvol_sem
);
2035 btrfs_init_free_cluster(&fs_info
->meta_alloc_cluster
);
2036 btrfs_init_free_cluster(&fs_info
->data_alloc_cluster
);
2038 init_waitqueue_head(&fs_info
->transaction_throttle
);
2039 init_waitqueue_head(&fs_info
->transaction_wait
);
2040 init_waitqueue_head(&fs_info
->transaction_blocked_wait
);
2041 init_waitqueue_head(&fs_info
->async_submit_wait
);
2043 __setup_root(4096, 4096, 4096, 4096, tree_root
,
2044 fs_info
, BTRFS_ROOT_TREE_OBJECTID
);
2046 invalidate_bdev(fs_devices
->latest_bdev
);
2047 bh
= btrfs_read_dev_super(fs_devices
->latest_bdev
);
2053 memcpy(fs_info
->super_copy
, bh
->b_data
, sizeof(*fs_info
->super_copy
));
2054 memcpy(fs_info
->super_for_commit
, fs_info
->super_copy
,
2055 sizeof(*fs_info
->super_for_commit
));
2058 memcpy(fs_info
->fsid
, fs_info
->super_copy
->fsid
, BTRFS_FSID_SIZE
);
2060 disk_super
= fs_info
->super_copy
;
2061 if (!btrfs_super_root(disk_super
))
2064 /* check FS state, whether FS is broken. */
2065 fs_info
->fs_state
|= btrfs_super_flags(disk_super
);
2067 ret
= btrfs_check_super_valid(fs_info
, sb
->s_flags
& MS_RDONLY
);
2069 printk(KERN_ERR
"btrfs: superblock contains fatal errors\n");
2075 * run through our array of backup supers and setup
2076 * our ring pointer to the oldest one
2078 generation
= btrfs_super_generation(disk_super
);
2079 find_oldest_super_backup(fs_info
, generation
);
2082 * In the long term, we'll store the compression type in the super
2083 * block, and it'll be used for per file compression control.
2085 fs_info
->compress_type
= BTRFS_COMPRESS_ZLIB
;
2087 ret
= btrfs_parse_options(tree_root
, options
);
2093 features
= btrfs_super_incompat_flags(disk_super
) &
2094 ~BTRFS_FEATURE_INCOMPAT_SUPP
;
2096 printk(KERN_ERR
"BTRFS: couldn't mount because of "
2097 "unsupported optional features (%Lx).\n",
2098 (unsigned long long)features
);
2103 if (btrfs_super_leafsize(disk_super
) !=
2104 btrfs_super_nodesize(disk_super
)) {
2105 printk(KERN_ERR
"BTRFS: couldn't mount because metadata "
2106 "blocksizes don't match. node %d leaf %d\n",
2107 btrfs_super_nodesize(disk_super
),
2108 btrfs_super_leafsize(disk_super
));
2112 if (btrfs_super_leafsize(disk_super
) > BTRFS_MAX_METADATA_BLOCKSIZE
) {
2113 printk(KERN_ERR
"BTRFS: couldn't mount because metadata "
2114 "blocksize (%d) was too large\n",
2115 btrfs_super_leafsize(disk_super
));
2120 features
= btrfs_super_incompat_flags(disk_super
);
2121 features
|= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF
;
2122 if (tree_root
->fs_info
->compress_type
== BTRFS_COMPRESS_LZO
)
2123 features
|= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO
;
2126 * flag our filesystem as having big metadata blocks if
2127 * they are bigger than the page size
2129 if (btrfs_super_leafsize(disk_super
) > PAGE_CACHE_SIZE
) {
2130 if (!(features
& BTRFS_FEATURE_INCOMPAT_BIG_METADATA
))
2131 printk(KERN_INFO
"btrfs flagging fs with big metadata feature\n");
2132 features
|= BTRFS_FEATURE_INCOMPAT_BIG_METADATA
;
2135 nodesize
= btrfs_super_nodesize(disk_super
);
2136 leafsize
= btrfs_super_leafsize(disk_super
);
2137 sectorsize
= btrfs_super_sectorsize(disk_super
);
2138 stripesize
= btrfs_super_stripesize(disk_super
);
2141 * mixed block groups end up with duplicate but slightly offset
2142 * extent buffers for the same range. It leads to corruptions
2144 if ((features
& BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS
) &&
2145 (sectorsize
!= leafsize
)) {
2146 printk(KERN_WARNING
"btrfs: unequal leaf/node/sector sizes "
2147 "are not allowed for mixed block groups on %s\n",
2152 btrfs_set_super_incompat_flags(disk_super
, features
);
2154 features
= btrfs_super_compat_ro_flags(disk_super
) &
2155 ~BTRFS_FEATURE_COMPAT_RO_SUPP
;
2156 if (!(sb
->s_flags
& MS_RDONLY
) && features
) {
2157 printk(KERN_ERR
"BTRFS: couldn't mount RDWR because of "
2158 "unsupported option features (%Lx).\n",
2159 (unsigned long long)features
);
2164 btrfs_init_workers(&fs_info
->generic_worker
,
2165 "genwork", 1, NULL
);
2167 btrfs_init_workers(&fs_info
->workers
, "worker",
2168 fs_info
->thread_pool_size
,
2169 &fs_info
->generic_worker
);
2171 btrfs_init_workers(&fs_info
->delalloc_workers
, "delalloc",
2172 fs_info
->thread_pool_size
,
2173 &fs_info
->generic_worker
);
2175 btrfs_init_workers(&fs_info
->submit_workers
, "submit",
2176 min_t(u64
, fs_devices
->num_devices
,
2177 fs_info
->thread_pool_size
),
2178 &fs_info
->generic_worker
);
2180 btrfs_init_workers(&fs_info
->caching_workers
, "cache",
2181 2, &fs_info
->generic_worker
);
2183 /* a higher idle thresh on the submit workers makes it much more
2184 * likely that bios will be send down in a sane order to the
2187 fs_info
->submit_workers
.idle_thresh
= 64;
2189 fs_info
->workers
.idle_thresh
= 16;
2190 fs_info
->workers
.ordered
= 1;
2192 fs_info
->delalloc_workers
.idle_thresh
= 2;
2193 fs_info
->delalloc_workers
.ordered
= 1;
2195 btrfs_init_workers(&fs_info
->fixup_workers
, "fixup", 1,
2196 &fs_info
->generic_worker
);
2197 btrfs_init_workers(&fs_info
->endio_workers
, "endio",
2198 fs_info
->thread_pool_size
,
2199 &fs_info
->generic_worker
);
2200 btrfs_init_workers(&fs_info
->endio_meta_workers
, "endio-meta",
2201 fs_info
->thread_pool_size
,
2202 &fs_info
->generic_worker
);
2203 btrfs_init_workers(&fs_info
->endio_meta_write_workers
,
2204 "endio-meta-write", fs_info
->thread_pool_size
,
2205 &fs_info
->generic_worker
);
2206 btrfs_init_workers(&fs_info
->endio_write_workers
, "endio-write",
2207 fs_info
->thread_pool_size
,
2208 &fs_info
->generic_worker
);
2209 btrfs_init_workers(&fs_info
->endio_freespace_worker
, "freespace-write",
2210 1, &fs_info
->generic_worker
);
2211 btrfs_init_workers(&fs_info
->delayed_workers
, "delayed-meta",
2212 fs_info
->thread_pool_size
,
2213 &fs_info
->generic_worker
);
2214 btrfs_init_workers(&fs_info
->readahead_workers
, "readahead",
2215 fs_info
->thread_pool_size
,
2216 &fs_info
->generic_worker
);
2219 * endios are largely parallel and should have a very
2222 fs_info
->endio_workers
.idle_thresh
= 4;
2223 fs_info
->endio_meta_workers
.idle_thresh
= 4;
2225 fs_info
->endio_write_workers
.idle_thresh
= 2;
2226 fs_info
->endio_meta_write_workers
.idle_thresh
= 2;
2227 fs_info
->readahead_workers
.idle_thresh
= 2;
2230 * btrfs_start_workers can really only fail because of ENOMEM so just
2231 * return -ENOMEM if any of these fail.
2233 ret
= btrfs_start_workers(&fs_info
->workers
);
2234 ret
|= btrfs_start_workers(&fs_info
->generic_worker
);
2235 ret
|= btrfs_start_workers(&fs_info
->submit_workers
);
2236 ret
|= btrfs_start_workers(&fs_info
->delalloc_workers
);
2237 ret
|= btrfs_start_workers(&fs_info
->fixup_workers
);
2238 ret
|= btrfs_start_workers(&fs_info
->endio_workers
);
2239 ret
|= btrfs_start_workers(&fs_info
->endio_meta_workers
);
2240 ret
|= btrfs_start_workers(&fs_info
->endio_meta_write_workers
);
2241 ret
|= btrfs_start_workers(&fs_info
->endio_write_workers
);
2242 ret
|= btrfs_start_workers(&fs_info
->endio_freespace_worker
);
2243 ret
|= btrfs_start_workers(&fs_info
->delayed_workers
);
2244 ret
|= btrfs_start_workers(&fs_info
->caching_workers
);
2245 ret
|= btrfs_start_workers(&fs_info
->readahead_workers
);
2248 goto fail_sb_buffer
;
2251 fs_info
->bdi
.ra_pages
*= btrfs_super_num_devices(disk_super
);
2252 fs_info
->bdi
.ra_pages
= max(fs_info
->bdi
.ra_pages
,
2253 4 * 1024 * 1024 / PAGE_CACHE_SIZE
);
2255 tree_root
->nodesize
= nodesize
;
2256 tree_root
->leafsize
= leafsize
;
2257 tree_root
->sectorsize
= sectorsize
;
2258 tree_root
->stripesize
= stripesize
;
2260 sb
->s_blocksize
= sectorsize
;
2261 sb
->s_blocksize_bits
= blksize_bits(sectorsize
);
2263 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
2264 sizeof(disk_super
->magic
))) {
2265 printk(KERN_INFO
"btrfs: valid FS not found on %s\n", sb
->s_id
);
2266 goto fail_sb_buffer
;
2269 if (sectorsize
!= PAGE_SIZE
) {
2270 printk(KERN_WARNING
"btrfs: Incompatible sector size(%lu) "
2271 "found on %s\n", (unsigned long)sectorsize
, sb
->s_id
);
2272 goto fail_sb_buffer
;
2275 mutex_lock(&fs_info
->chunk_mutex
);
2276 ret
= btrfs_read_sys_array(tree_root
);
2277 mutex_unlock(&fs_info
->chunk_mutex
);
2279 printk(KERN_WARNING
"btrfs: failed to read the system "
2280 "array on %s\n", sb
->s_id
);
2281 goto fail_sb_buffer
;
2284 blocksize
= btrfs_level_size(tree_root
,
2285 btrfs_super_chunk_root_level(disk_super
));
2286 generation
= btrfs_super_chunk_root_generation(disk_super
);
2288 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
2289 chunk_root
, fs_info
, BTRFS_CHUNK_TREE_OBJECTID
);
2291 chunk_root
->node
= read_tree_block(chunk_root
,
2292 btrfs_super_chunk_root(disk_super
),
2293 blocksize
, generation
);
2294 BUG_ON(!chunk_root
->node
); /* -ENOMEM */
2295 if (!test_bit(EXTENT_BUFFER_UPTODATE
, &chunk_root
->node
->bflags
)) {
2296 printk(KERN_WARNING
"btrfs: failed to read chunk root on %s\n",
2298 goto fail_tree_roots
;
2300 btrfs_set_root_node(&chunk_root
->root_item
, chunk_root
->node
);
2301 chunk_root
->commit_root
= btrfs_root_node(chunk_root
);
2303 read_extent_buffer(chunk_root
->node
, fs_info
->chunk_tree_uuid
,
2304 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root
->node
),
2307 ret
= btrfs_read_chunk_tree(chunk_root
);
2309 printk(KERN_WARNING
"btrfs: failed to read chunk tree on %s\n",
2311 goto fail_tree_roots
;
2314 btrfs_close_extra_devices(fs_devices
);
2316 if (!fs_devices
->latest_bdev
) {
2317 printk(KERN_CRIT
"btrfs: failed to read devices on %s\n",
2319 goto fail_tree_roots
;
2323 blocksize
= btrfs_level_size(tree_root
,
2324 btrfs_super_root_level(disk_super
));
2325 generation
= btrfs_super_generation(disk_super
);
2327 tree_root
->node
= read_tree_block(tree_root
,
2328 btrfs_super_root(disk_super
),
2329 blocksize
, generation
);
2330 if (!tree_root
->node
||
2331 !test_bit(EXTENT_BUFFER_UPTODATE
, &tree_root
->node
->bflags
)) {
2332 printk(KERN_WARNING
"btrfs: failed to read tree root on %s\n",
2335 goto recovery_tree_root
;
2338 btrfs_set_root_node(&tree_root
->root_item
, tree_root
->node
);
2339 tree_root
->commit_root
= btrfs_root_node(tree_root
);
2341 ret
= find_and_setup_root(tree_root
, fs_info
,
2342 BTRFS_EXTENT_TREE_OBJECTID
, extent_root
);
2344 goto recovery_tree_root
;
2345 extent_root
->track_dirty
= 1;
2347 ret
= find_and_setup_root(tree_root
, fs_info
,
2348 BTRFS_DEV_TREE_OBJECTID
, dev_root
);
2350 goto recovery_tree_root
;
2351 dev_root
->track_dirty
= 1;
2353 ret
= find_and_setup_root(tree_root
, fs_info
,
2354 BTRFS_CSUM_TREE_OBJECTID
, csum_root
);
2356 goto recovery_tree_root
;
2357 csum_root
->track_dirty
= 1;
2359 fs_info
->generation
= generation
;
2360 fs_info
->last_trans_committed
= generation
;
2362 ret
= btrfs_recover_balance(fs_info
);
2364 printk(KERN_WARNING
"btrfs: failed to recover balance\n");
2365 goto fail_block_groups
;
2368 ret
= btrfs_init_dev_stats(fs_info
);
2370 printk(KERN_ERR
"btrfs: failed to init dev_stats: %d\n",
2372 goto fail_block_groups
;
2375 ret
= btrfs_init_space_info(fs_info
);
2377 printk(KERN_ERR
"Failed to initial space info: %d\n", ret
);
2378 goto fail_block_groups
;
2381 ret
= btrfs_read_block_groups(extent_root
);
2383 printk(KERN_ERR
"Failed to read block groups: %d\n", ret
);
2384 goto fail_block_groups
;
2387 fs_info
->cleaner_kthread
= kthread_run(cleaner_kthread
, tree_root
,
2389 if (IS_ERR(fs_info
->cleaner_kthread
))
2390 goto fail_block_groups
;
2392 fs_info
->transaction_kthread
= kthread_run(transaction_kthread
,
2394 "btrfs-transaction");
2395 if (IS_ERR(fs_info
->transaction_kthread
))
2398 if (!btrfs_test_opt(tree_root
, SSD
) &&
2399 !btrfs_test_opt(tree_root
, NOSSD
) &&
2400 !fs_info
->fs_devices
->rotating
) {
2401 printk(KERN_INFO
"Btrfs detected SSD devices, enabling SSD "
2403 btrfs_set_opt(fs_info
->mount_opt
, SSD
);
2406 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2407 if (btrfs_test_opt(tree_root
, CHECK_INTEGRITY
)) {
2408 ret
= btrfsic_mount(tree_root
, fs_devices
,
2409 btrfs_test_opt(tree_root
,
2410 CHECK_INTEGRITY_INCLUDING_EXTENT_DATA
) ?
2412 fs_info
->check_integrity_print_mask
);
2414 printk(KERN_WARNING
"btrfs: failed to initialize"
2415 " integrity check module %s\n", sb
->s_id
);
2419 /* do not make disk changes in broken FS */
2420 if (btrfs_super_log_root(disk_super
) != 0 &&
2421 !(fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
)) {
2422 u64 bytenr
= btrfs_super_log_root(disk_super
);
2424 if (fs_devices
->rw_devices
== 0) {
2425 printk(KERN_WARNING
"Btrfs log replay required "
2428 goto fail_trans_kthread
;
2431 btrfs_level_size(tree_root
,
2432 btrfs_super_log_root_level(disk_super
));
2434 log_tree_root
= btrfs_alloc_root(fs_info
);
2435 if (!log_tree_root
) {
2437 goto fail_trans_kthread
;
2440 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
2441 log_tree_root
, fs_info
, BTRFS_TREE_LOG_OBJECTID
);
2443 log_tree_root
->node
= read_tree_block(tree_root
, bytenr
,
2446 /* returns with log_tree_root freed on success */
2447 ret
= btrfs_recover_log_trees(log_tree_root
);
2449 btrfs_error(tree_root
->fs_info
, ret
,
2450 "Failed to recover log tree");
2451 free_extent_buffer(log_tree_root
->node
);
2452 kfree(log_tree_root
);
2453 goto fail_trans_kthread
;
2456 if (sb
->s_flags
& MS_RDONLY
) {
2457 ret
= btrfs_commit_super(tree_root
);
2459 goto fail_trans_kthread
;
2463 ret
= btrfs_find_orphan_roots(tree_root
);
2465 goto fail_trans_kthread
;
2467 if (!(sb
->s_flags
& MS_RDONLY
)) {
2468 ret
= btrfs_cleanup_fs_roots(fs_info
);
2472 ret
= btrfs_recover_relocation(tree_root
);
2475 "btrfs: failed to recover relocation\n");
2477 goto fail_trans_kthread
;
2481 location
.objectid
= BTRFS_FS_TREE_OBJECTID
;
2482 location
.type
= BTRFS_ROOT_ITEM_KEY
;
2483 location
.offset
= (u64
)-1;
2485 fs_info
->fs_root
= btrfs_read_fs_root_no_name(fs_info
, &location
);
2486 if (!fs_info
->fs_root
)
2487 goto fail_trans_kthread
;
2488 if (IS_ERR(fs_info
->fs_root
)) {
2489 err
= PTR_ERR(fs_info
->fs_root
);
2490 goto fail_trans_kthread
;
2493 if (sb
->s_flags
& MS_RDONLY
)
2496 down_read(&fs_info
->cleanup_work_sem
);
2497 if ((ret
= btrfs_orphan_cleanup(fs_info
->fs_root
)) ||
2498 (ret
= btrfs_orphan_cleanup(fs_info
->tree_root
))) {
2499 up_read(&fs_info
->cleanup_work_sem
);
2500 close_ctree(tree_root
);
2503 up_read(&fs_info
->cleanup_work_sem
);
2505 ret
= btrfs_resume_balance_async(fs_info
);
2507 printk(KERN_WARNING
"btrfs: failed to resume balance\n");
2508 close_ctree(tree_root
);
2515 kthread_stop(fs_info
->transaction_kthread
);
2517 kthread_stop(fs_info
->cleaner_kthread
);
2520 * make sure we're done with the btree inode before we stop our
2523 filemap_write_and_wait(fs_info
->btree_inode
->i_mapping
);
2524 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
2527 btrfs_free_block_groups(fs_info
);
2530 free_root_pointers(fs_info
, 1);
2533 btrfs_stop_workers(&fs_info
->generic_worker
);
2534 btrfs_stop_workers(&fs_info
->readahead_workers
);
2535 btrfs_stop_workers(&fs_info
->fixup_workers
);
2536 btrfs_stop_workers(&fs_info
->delalloc_workers
);
2537 btrfs_stop_workers(&fs_info
->workers
);
2538 btrfs_stop_workers(&fs_info
->endio_workers
);
2539 btrfs_stop_workers(&fs_info
->endio_meta_workers
);
2540 btrfs_stop_workers(&fs_info
->endio_meta_write_workers
);
2541 btrfs_stop_workers(&fs_info
->endio_write_workers
);
2542 btrfs_stop_workers(&fs_info
->endio_freespace_worker
);
2543 btrfs_stop_workers(&fs_info
->submit_workers
);
2544 btrfs_stop_workers(&fs_info
->delayed_workers
);
2545 btrfs_stop_workers(&fs_info
->caching_workers
);
2548 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
2550 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
2551 iput(fs_info
->btree_inode
);
2553 bdi_destroy(&fs_info
->bdi
);
2555 cleanup_srcu_struct(&fs_info
->subvol_srcu
);
2557 btrfs_close_devices(fs_info
->fs_devices
);
2561 if (!btrfs_test_opt(tree_root
, RECOVERY
))
2562 goto fail_tree_roots
;
2564 free_root_pointers(fs_info
, 0);
2566 /* don't use the log in recovery mode, it won't be valid */
2567 btrfs_set_super_log_root(disk_super
, 0);
2569 /* we can't trust the free space cache either */
2570 btrfs_set_opt(fs_info
->mount_opt
, CLEAR_CACHE
);
2572 ret
= next_root_backup(fs_info
, fs_info
->super_copy
,
2573 &num_backups_tried
, &backup_index
);
2575 goto fail_block_groups
;
2576 goto retry_root_backup
;
2579 static void btrfs_end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
)
2582 set_buffer_uptodate(bh
);
2584 struct btrfs_device
*device
= (struct btrfs_device
*)
2587 printk_ratelimited_in_rcu(KERN_WARNING
"lost page write due to "
2588 "I/O error on %s\n",
2589 rcu_str_deref(device
->name
));
2590 /* note, we dont' set_buffer_write_io_error because we have
2591 * our own ways of dealing with the IO errors
2593 clear_buffer_uptodate(bh
);
2594 btrfs_dev_stat_inc_and_print(device
, BTRFS_DEV_STAT_WRITE_ERRS
);
2600 struct buffer_head
*btrfs_read_dev_super(struct block_device
*bdev
)
2602 struct buffer_head
*bh
;
2603 struct buffer_head
*latest
= NULL
;
2604 struct btrfs_super_block
*super
;
2609 /* we would like to check all the supers, but that would make
2610 * a btrfs mount succeed after a mkfs from a different FS.
2611 * So, we need to add a special mount option to scan for
2612 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2614 for (i
= 0; i
< 1; i
++) {
2615 bytenr
= btrfs_sb_offset(i
);
2616 if (bytenr
+ 4096 >= i_size_read(bdev
->bd_inode
))
2618 bh
= __bread(bdev
, bytenr
/ 4096, 4096);
2622 super
= (struct btrfs_super_block
*)bh
->b_data
;
2623 if (btrfs_super_bytenr(super
) != bytenr
||
2624 strncmp((char *)(&super
->magic
), BTRFS_MAGIC
,
2625 sizeof(super
->magic
))) {
2630 if (!latest
|| btrfs_super_generation(super
) > transid
) {
2633 transid
= btrfs_super_generation(super
);
2642 * this should be called twice, once with wait == 0 and
2643 * once with wait == 1. When wait == 0 is done, all the buffer heads
2644 * we write are pinned.
2646 * They are released when wait == 1 is done.
2647 * max_mirrors must be the same for both runs, and it indicates how
2648 * many supers on this one device should be written.
2650 * max_mirrors == 0 means to write them all.
2652 static int write_dev_supers(struct btrfs_device
*device
,
2653 struct btrfs_super_block
*sb
,
2654 int do_barriers
, int wait
, int max_mirrors
)
2656 struct buffer_head
*bh
;
2663 if (max_mirrors
== 0)
2664 max_mirrors
= BTRFS_SUPER_MIRROR_MAX
;
2666 for (i
= 0; i
< max_mirrors
; i
++) {
2667 bytenr
= btrfs_sb_offset(i
);
2668 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>= device
->total_bytes
)
2672 bh
= __find_get_block(device
->bdev
, bytenr
/ 4096,
2673 BTRFS_SUPER_INFO_SIZE
);
2676 if (!buffer_uptodate(bh
))
2679 /* drop our reference */
2682 /* drop the reference from the wait == 0 run */
2686 btrfs_set_super_bytenr(sb
, bytenr
);
2689 crc
= btrfs_csum_data(NULL
, (char *)sb
+
2690 BTRFS_CSUM_SIZE
, crc
,
2691 BTRFS_SUPER_INFO_SIZE
-
2693 btrfs_csum_final(crc
, sb
->csum
);
2696 * one reference for us, and we leave it for the
2699 bh
= __getblk(device
->bdev
, bytenr
/ 4096,
2700 BTRFS_SUPER_INFO_SIZE
);
2701 memcpy(bh
->b_data
, sb
, BTRFS_SUPER_INFO_SIZE
);
2703 /* one reference for submit_bh */
2706 set_buffer_uptodate(bh
);
2708 bh
->b_end_io
= btrfs_end_buffer_write_sync
;
2709 bh
->b_private
= device
;
2713 * we fua the first super. The others we allow
2716 ret
= btrfsic_submit_bh(WRITE_FUA
, bh
);
2720 return errors
< i
? 0 : -1;
2724 * endio for the write_dev_flush, this will wake anyone waiting
2725 * for the barrier when it is done
2727 static void btrfs_end_empty_barrier(struct bio
*bio
, int err
)
2730 if (err
== -EOPNOTSUPP
)
2731 set_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
);
2732 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
2734 if (bio
->bi_private
)
2735 complete(bio
->bi_private
);
2740 * trigger flushes for one the devices. If you pass wait == 0, the flushes are
2741 * sent down. With wait == 1, it waits for the previous flush.
2743 * any device where the flush fails with eopnotsupp are flagged as not-barrier
2746 static int write_dev_flush(struct btrfs_device
*device
, int wait
)
2751 if (device
->nobarriers
)
2755 bio
= device
->flush_bio
;
2759 wait_for_completion(&device
->flush_wait
);
2761 if (bio_flagged(bio
, BIO_EOPNOTSUPP
)) {
2762 printk_in_rcu("btrfs: disabling barriers on dev %s\n",
2763 rcu_str_deref(device
->name
));
2764 device
->nobarriers
= 1;
2766 if (!bio_flagged(bio
, BIO_UPTODATE
)) {
2768 if (!bio_flagged(bio
, BIO_EOPNOTSUPP
))
2769 btrfs_dev_stat_inc_and_print(device
,
2770 BTRFS_DEV_STAT_FLUSH_ERRS
);
2773 /* drop the reference from the wait == 0 run */
2775 device
->flush_bio
= NULL
;
2781 * one reference for us, and we leave it for the
2784 device
->flush_bio
= NULL
;;
2785 bio
= bio_alloc(GFP_NOFS
, 0);
2789 bio
->bi_end_io
= btrfs_end_empty_barrier
;
2790 bio
->bi_bdev
= device
->bdev
;
2791 init_completion(&device
->flush_wait
);
2792 bio
->bi_private
= &device
->flush_wait
;
2793 device
->flush_bio
= bio
;
2796 btrfsic_submit_bio(WRITE_FLUSH
, bio
);
2802 * send an empty flush down to each device in parallel,
2803 * then wait for them
2805 static int barrier_all_devices(struct btrfs_fs_info
*info
)
2807 struct list_head
*head
;
2808 struct btrfs_device
*dev
;
2812 /* send down all the barriers */
2813 head
= &info
->fs_devices
->devices
;
2814 list_for_each_entry_rcu(dev
, head
, dev_list
) {
2819 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
2822 ret
= write_dev_flush(dev
, 0);
2827 /* wait for all the barriers */
2828 list_for_each_entry_rcu(dev
, head
, dev_list
) {
2833 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
2836 ret
= write_dev_flush(dev
, 1);
2845 int write_all_supers(struct btrfs_root
*root
, int max_mirrors
)
2847 struct list_head
*head
;
2848 struct btrfs_device
*dev
;
2849 struct btrfs_super_block
*sb
;
2850 struct btrfs_dev_item
*dev_item
;
2854 int total_errors
= 0;
2857 max_errors
= btrfs_super_num_devices(root
->fs_info
->super_copy
) - 1;
2858 do_barriers
= !btrfs_test_opt(root
, NOBARRIER
);
2859 backup_super_roots(root
->fs_info
);
2861 sb
= root
->fs_info
->super_for_commit
;
2862 dev_item
= &sb
->dev_item
;
2864 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2865 head
= &root
->fs_info
->fs_devices
->devices
;
2868 barrier_all_devices(root
->fs_info
);
2870 list_for_each_entry_rcu(dev
, head
, dev_list
) {
2875 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
2878 btrfs_set_stack_device_generation(dev_item
, 0);
2879 btrfs_set_stack_device_type(dev_item
, dev
->type
);
2880 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
2881 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
2882 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
2883 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
2884 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
2885 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
2886 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
2887 memcpy(dev_item
->fsid
, dev
->fs_devices
->fsid
, BTRFS_UUID_SIZE
);
2889 flags
= btrfs_super_flags(sb
);
2890 btrfs_set_super_flags(sb
, flags
| BTRFS_HEADER_FLAG_WRITTEN
);
2892 ret
= write_dev_supers(dev
, sb
, do_barriers
, 0, max_mirrors
);
2896 if (total_errors
> max_errors
) {
2897 printk(KERN_ERR
"btrfs: %d errors while writing supers\n",
2900 /* This shouldn't happen. FUA is masked off if unsupported */
2905 list_for_each_entry_rcu(dev
, head
, dev_list
) {
2908 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
2911 ret
= write_dev_supers(dev
, sb
, do_barriers
, 1, max_mirrors
);
2915 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2916 if (total_errors
> max_errors
) {
2917 btrfs_error(root
->fs_info
, -EIO
,
2918 "%d errors while writing supers", total_errors
);
2924 int write_ctree_super(struct btrfs_trans_handle
*trans
,
2925 struct btrfs_root
*root
, int max_mirrors
)
2929 ret
= write_all_supers(root
, max_mirrors
);
2933 void btrfs_free_fs_root(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
2935 spin_lock(&fs_info
->fs_roots_radix_lock
);
2936 radix_tree_delete(&fs_info
->fs_roots_radix
,
2937 (unsigned long)root
->root_key
.objectid
);
2938 spin_unlock(&fs_info
->fs_roots_radix_lock
);
2940 if (btrfs_root_refs(&root
->root_item
) == 0)
2941 synchronize_srcu(&fs_info
->subvol_srcu
);
2943 __btrfs_remove_free_space_cache(root
->free_ino_pinned
);
2944 __btrfs_remove_free_space_cache(root
->free_ino_ctl
);
2948 static void free_fs_root(struct btrfs_root
*root
)
2950 iput(root
->cache_inode
);
2951 WARN_ON(!RB_EMPTY_ROOT(&root
->inode_tree
));
2953 free_anon_bdev(root
->anon_dev
);
2954 free_extent_buffer(root
->node
);
2955 free_extent_buffer(root
->commit_root
);
2956 kfree(root
->free_ino_ctl
);
2957 kfree(root
->free_ino_pinned
);
2962 static void del_fs_roots(struct btrfs_fs_info
*fs_info
)
2965 struct btrfs_root
*gang
[8];
2968 while (!list_empty(&fs_info
->dead_roots
)) {
2969 gang
[0] = list_entry(fs_info
->dead_roots
.next
,
2970 struct btrfs_root
, root_list
);
2971 list_del(&gang
[0]->root_list
);
2973 if (gang
[0]->in_radix
) {
2974 btrfs_free_fs_root(fs_info
, gang
[0]);
2976 free_extent_buffer(gang
[0]->node
);
2977 free_extent_buffer(gang
[0]->commit_root
);
2983 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
2988 for (i
= 0; i
< ret
; i
++)
2989 btrfs_free_fs_root(fs_info
, gang
[i
]);
2993 int btrfs_cleanup_fs_roots(struct btrfs_fs_info
*fs_info
)
2995 u64 root_objectid
= 0;
2996 struct btrfs_root
*gang
[8];
3001 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
3002 (void **)gang
, root_objectid
,
3007 root_objectid
= gang
[ret
- 1]->root_key
.objectid
+ 1;
3008 for (i
= 0; i
< ret
; i
++) {
3011 root_objectid
= gang
[i
]->root_key
.objectid
;
3012 err
= btrfs_orphan_cleanup(gang
[i
]);
3021 int btrfs_commit_super(struct btrfs_root
*root
)
3023 struct btrfs_trans_handle
*trans
;
3026 mutex_lock(&root
->fs_info
->cleaner_mutex
);
3027 btrfs_run_delayed_iputs(root
);
3028 btrfs_clean_old_snapshots(root
);
3029 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
3031 /* wait until ongoing cleanup work done */
3032 down_write(&root
->fs_info
->cleanup_work_sem
);
3033 up_write(&root
->fs_info
->cleanup_work_sem
);
3035 trans
= btrfs_join_transaction(root
);
3037 return PTR_ERR(trans
);
3038 ret
= btrfs_commit_transaction(trans
, root
);
3041 /* run commit again to drop the original snapshot */
3042 trans
= btrfs_join_transaction(root
);
3044 return PTR_ERR(trans
);
3045 ret
= btrfs_commit_transaction(trans
, root
);
3048 ret
= btrfs_write_and_wait_transaction(NULL
, root
);
3050 btrfs_error(root
->fs_info
, ret
,
3051 "Failed to sync btree inode to disk.");
3055 ret
= write_ctree_super(NULL
, root
, 0);
3059 int close_ctree(struct btrfs_root
*root
)
3061 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3064 fs_info
->closing
= 1;
3067 /* pause restriper - we want to resume on mount */
3068 btrfs_pause_balance(root
->fs_info
);
3070 btrfs_scrub_cancel(root
);
3072 /* wait for any defraggers to finish */
3073 wait_event(fs_info
->transaction_wait
,
3074 (atomic_read(&fs_info
->defrag_running
) == 0));
3076 /* clear out the rbtree of defraggable inodes */
3077 btrfs_run_defrag_inodes(fs_info
);
3080 * Here come 2 situations when btrfs is broken to flip readonly:
3082 * 1. when btrfs flips readonly somewhere else before
3083 * btrfs_commit_super, sb->s_flags has MS_RDONLY flag,
3084 * and btrfs will skip to write sb directly to keep
3085 * ERROR state on disk.
3087 * 2. when btrfs flips readonly just in btrfs_commit_super,
3088 * and in such case, btrfs cannot write sb via btrfs_commit_super,
3089 * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag,
3090 * btrfs will cleanup all FS resources first and write sb then.
3092 if (!(fs_info
->sb
->s_flags
& MS_RDONLY
)) {
3093 ret
= btrfs_commit_super(root
);
3095 printk(KERN_ERR
"btrfs: commit super ret %d\n", ret
);
3098 if (fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
3099 ret
= btrfs_error_commit_super(root
);
3101 printk(KERN_ERR
"btrfs: commit super ret %d\n", ret
);
3104 btrfs_put_block_group_cache(fs_info
);
3106 kthread_stop(fs_info
->transaction_kthread
);
3107 kthread_stop(fs_info
->cleaner_kthread
);
3109 fs_info
->closing
= 2;
3112 if (fs_info
->delalloc_bytes
) {
3113 printk(KERN_INFO
"btrfs: at unmount delalloc count %llu\n",
3114 (unsigned long long)fs_info
->delalloc_bytes
);
3116 if (fs_info
->total_ref_cache_size
) {
3117 printk(KERN_INFO
"btrfs: at umount reference cache size %llu\n",
3118 (unsigned long long)fs_info
->total_ref_cache_size
);
3121 free_extent_buffer(fs_info
->extent_root
->node
);
3122 free_extent_buffer(fs_info
->extent_root
->commit_root
);
3123 free_extent_buffer(fs_info
->tree_root
->node
);
3124 free_extent_buffer(fs_info
->tree_root
->commit_root
);
3125 free_extent_buffer(fs_info
->chunk_root
->node
);
3126 free_extent_buffer(fs_info
->chunk_root
->commit_root
);
3127 free_extent_buffer(fs_info
->dev_root
->node
);
3128 free_extent_buffer(fs_info
->dev_root
->commit_root
);
3129 free_extent_buffer(fs_info
->csum_root
->node
);
3130 free_extent_buffer(fs_info
->csum_root
->commit_root
);
3132 btrfs_free_block_groups(fs_info
);
3134 del_fs_roots(fs_info
);
3136 iput(fs_info
->btree_inode
);
3138 btrfs_stop_workers(&fs_info
->generic_worker
);
3139 btrfs_stop_workers(&fs_info
->fixup_workers
);
3140 btrfs_stop_workers(&fs_info
->delalloc_workers
);
3141 btrfs_stop_workers(&fs_info
->workers
);
3142 btrfs_stop_workers(&fs_info
->endio_workers
);
3143 btrfs_stop_workers(&fs_info
->endio_meta_workers
);
3144 btrfs_stop_workers(&fs_info
->endio_meta_write_workers
);
3145 btrfs_stop_workers(&fs_info
->endio_write_workers
);
3146 btrfs_stop_workers(&fs_info
->endio_freespace_worker
);
3147 btrfs_stop_workers(&fs_info
->submit_workers
);
3148 btrfs_stop_workers(&fs_info
->delayed_workers
);
3149 btrfs_stop_workers(&fs_info
->caching_workers
);
3150 btrfs_stop_workers(&fs_info
->readahead_workers
);
3152 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
3153 if (btrfs_test_opt(root
, CHECK_INTEGRITY
))
3154 btrfsic_unmount(root
, fs_info
->fs_devices
);
3157 btrfs_close_devices(fs_info
->fs_devices
);
3158 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
3160 bdi_destroy(&fs_info
->bdi
);
3161 cleanup_srcu_struct(&fs_info
->subvol_srcu
);
3166 int btrfs_buffer_uptodate(struct extent_buffer
*buf
, u64 parent_transid
,
3170 struct inode
*btree_inode
= buf
->pages
[0]->mapping
->host
;
3172 ret
= extent_buffer_uptodate(buf
);
3176 ret
= verify_parent_transid(&BTRFS_I(btree_inode
)->io_tree
, buf
,
3177 parent_transid
, atomic
);
3183 int btrfs_set_buffer_uptodate(struct extent_buffer
*buf
)
3185 return set_extent_buffer_uptodate(buf
);
3188 void btrfs_mark_buffer_dirty(struct extent_buffer
*buf
)
3190 struct btrfs_root
*root
= BTRFS_I(buf
->pages
[0]->mapping
->host
)->root
;
3191 u64 transid
= btrfs_header_generation(buf
);
3194 btrfs_assert_tree_locked(buf
);
3195 if (transid
!= root
->fs_info
->generation
) {
3196 printk(KERN_CRIT
"btrfs transid mismatch buffer %llu, "
3197 "found %llu running %llu\n",
3198 (unsigned long long)buf
->start
,
3199 (unsigned long long)transid
,
3200 (unsigned long long)root
->fs_info
->generation
);
3203 was_dirty
= set_extent_buffer_dirty(buf
);
3205 spin_lock(&root
->fs_info
->delalloc_lock
);
3206 root
->fs_info
->dirty_metadata_bytes
+= buf
->len
;
3207 spin_unlock(&root
->fs_info
->delalloc_lock
);
3211 void btrfs_btree_balance_dirty(struct btrfs_root
*root
, unsigned long nr
)
3214 * looks as though older kernels can get into trouble with
3215 * this code, they end up stuck in balance_dirty_pages forever
3218 unsigned long thresh
= 32 * 1024 * 1024;
3220 if (current
->flags
& PF_MEMALLOC
)
3223 btrfs_balance_delayed_items(root
);
3225 num_dirty
= root
->fs_info
->dirty_metadata_bytes
;
3227 if (num_dirty
> thresh
) {
3228 balance_dirty_pages_ratelimited_nr(
3229 root
->fs_info
->btree_inode
->i_mapping
, 1);
3234 void __btrfs_btree_balance_dirty(struct btrfs_root
*root
, unsigned long nr
)
3237 * looks as though older kernels can get into trouble with
3238 * this code, they end up stuck in balance_dirty_pages forever
3241 unsigned long thresh
= 32 * 1024 * 1024;
3243 if (current
->flags
& PF_MEMALLOC
)
3246 num_dirty
= root
->fs_info
->dirty_metadata_bytes
;
3248 if (num_dirty
> thresh
) {
3249 balance_dirty_pages_ratelimited_nr(
3250 root
->fs_info
->btree_inode
->i_mapping
, 1);
3255 int btrfs_read_buffer(struct extent_buffer
*buf
, u64 parent_transid
)
3257 struct btrfs_root
*root
= BTRFS_I(buf
->pages
[0]->mapping
->host
)->root
;
3258 return btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
3261 static int btree_lock_page_hook(struct page
*page
, void *data
,
3262 void (*flush_fn
)(void *))
3264 struct inode
*inode
= page
->mapping
->host
;
3265 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3266 struct extent_buffer
*eb
;
3269 * We culled this eb but the page is still hanging out on the mapping,
3272 if (!PagePrivate(page
))
3275 eb
= (struct extent_buffer
*)page
->private;
3280 if (page
!= eb
->pages
[0])
3283 if (!btrfs_try_tree_write_lock(eb
)) {
3285 btrfs_tree_lock(eb
);
3287 btrfs_set_header_flag(eb
, BTRFS_HEADER_FLAG_WRITTEN
);
3289 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
)) {
3290 spin_lock(&root
->fs_info
->delalloc_lock
);
3291 if (root
->fs_info
->dirty_metadata_bytes
>= eb
->len
)
3292 root
->fs_info
->dirty_metadata_bytes
-= eb
->len
;
3295 spin_unlock(&root
->fs_info
->delalloc_lock
);
3298 btrfs_tree_unlock(eb
);
3300 if (!trylock_page(page
)) {
3307 static int btrfs_check_super_valid(struct btrfs_fs_info
*fs_info
,
3310 if (btrfs_super_csum_type(fs_info
->super_copy
) >= ARRAY_SIZE(btrfs_csum_sizes
)) {
3311 printk(KERN_ERR
"btrfs: unsupported checksum algorithm\n");
3318 if (fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
3319 printk(KERN_WARNING
"warning: mount fs with errors, "
3320 "running btrfsck is recommended\n");
3326 int btrfs_error_commit_super(struct btrfs_root
*root
)
3330 mutex_lock(&root
->fs_info
->cleaner_mutex
);
3331 btrfs_run_delayed_iputs(root
);
3332 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
3334 down_write(&root
->fs_info
->cleanup_work_sem
);
3335 up_write(&root
->fs_info
->cleanup_work_sem
);
3337 /* cleanup FS via transaction */
3338 btrfs_cleanup_transaction(root
);
3340 ret
= write_ctree_super(NULL
, root
, 0);
3345 static void btrfs_destroy_ordered_operations(struct btrfs_root
*root
)
3347 struct btrfs_inode
*btrfs_inode
;
3348 struct list_head splice
;
3350 INIT_LIST_HEAD(&splice
);
3352 mutex_lock(&root
->fs_info
->ordered_operations_mutex
);
3353 spin_lock(&root
->fs_info
->ordered_extent_lock
);
3355 list_splice_init(&root
->fs_info
->ordered_operations
, &splice
);
3356 while (!list_empty(&splice
)) {
3357 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
3358 ordered_operations
);
3360 list_del_init(&btrfs_inode
->ordered_operations
);
3362 btrfs_invalidate_inodes(btrfs_inode
->root
);
3365 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
3366 mutex_unlock(&root
->fs_info
->ordered_operations_mutex
);
3369 static void btrfs_destroy_ordered_extents(struct btrfs_root
*root
)
3371 struct list_head splice
;
3372 struct btrfs_ordered_extent
*ordered
;
3373 struct inode
*inode
;
3375 INIT_LIST_HEAD(&splice
);
3377 spin_lock(&root
->fs_info
->ordered_extent_lock
);
3379 list_splice_init(&root
->fs_info
->ordered_extents
, &splice
);
3380 while (!list_empty(&splice
)) {
3381 ordered
= list_entry(splice
.next
, struct btrfs_ordered_extent
,
3384 list_del_init(&ordered
->root_extent_list
);
3385 atomic_inc(&ordered
->refs
);
3387 /* the inode may be getting freed (in sys_unlink path). */
3388 inode
= igrab(ordered
->inode
);
3390 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
3394 atomic_set(&ordered
->refs
, 1);
3395 btrfs_put_ordered_extent(ordered
);
3397 spin_lock(&root
->fs_info
->ordered_extent_lock
);
3400 spin_unlock(&root
->fs_info
->ordered_extent_lock
);
3403 int btrfs_destroy_delayed_refs(struct btrfs_transaction
*trans
,
3404 struct btrfs_root
*root
)
3406 struct rb_node
*node
;
3407 struct btrfs_delayed_ref_root
*delayed_refs
;
3408 struct btrfs_delayed_ref_node
*ref
;
3411 delayed_refs
= &trans
->delayed_refs
;
3413 spin_lock(&delayed_refs
->lock
);
3414 if (delayed_refs
->num_entries
== 0) {
3415 spin_unlock(&delayed_refs
->lock
);
3416 printk(KERN_INFO
"delayed_refs has NO entry\n");
3420 while ((node
= rb_first(&delayed_refs
->root
)) != NULL
) {
3421 ref
= rb_entry(node
, struct btrfs_delayed_ref_node
, rb_node
);
3423 atomic_set(&ref
->refs
, 1);
3424 if (btrfs_delayed_ref_is_head(ref
)) {
3425 struct btrfs_delayed_ref_head
*head
;
3427 head
= btrfs_delayed_node_to_head(ref
);
3428 if (!mutex_trylock(&head
->mutex
)) {
3429 atomic_inc(&ref
->refs
);
3430 spin_unlock(&delayed_refs
->lock
);
3432 /* Need to wait for the delayed ref to run */
3433 mutex_lock(&head
->mutex
);
3434 mutex_unlock(&head
->mutex
);
3435 btrfs_put_delayed_ref(ref
);
3437 spin_lock(&delayed_refs
->lock
);
3441 kfree(head
->extent_op
);
3442 delayed_refs
->num_heads
--;
3443 if (list_empty(&head
->cluster
))
3444 delayed_refs
->num_heads_ready
--;
3445 list_del_init(&head
->cluster
);
3448 rb_erase(&ref
->rb_node
, &delayed_refs
->root
);
3449 delayed_refs
->num_entries
--;
3451 spin_unlock(&delayed_refs
->lock
);
3452 btrfs_put_delayed_ref(ref
);
3455 spin_lock(&delayed_refs
->lock
);
3458 spin_unlock(&delayed_refs
->lock
);
3463 static void btrfs_destroy_pending_snapshots(struct btrfs_transaction
*t
)
3465 struct btrfs_pending_snapshot
*snapshot
;
3466 struct list_head splice
;
3468 INIT_LIST_HEAD(&splice
);
3470 list_splice_init(&t
->pending_snapshots
, &splice
);
3472 while (!list_empty(&splice
)) {
3473 snapshot
= list_entry(splice
.next
,
3474 struct btrfs_pending_snapshot
,
3477 list_del_init(&snapshot
->list
);
3483 static void btrfs_destroy_delalloc_inodes(struct btrfs_root
*root
)
3485 struct btrfs_inode
*btrfs_inode
;
3486 struct list_head splice
;
3488 INIT_LIST_HEAD(&splice
);
3490 spin_lock(&root
->fs_info
->delalloc_lock
);
3491 list_splice_init(&root
->fs_info
->delalloc_inodes
, &splice
);
3493 while (!list_empty(&splice
)) {
3494 btrfs_inode
= list_entry(splice
.next
, struct btrfs_inode
,
3497 list_del_init(&btrfs_inode
->delalloc_inodes
);
3499 btrfs_invalidate_inodes(btrfs_inode
->root
);
3502 spin_unlock(&root
->fs_info
->delalloc_lock
);
3505 static int btrfs_destroy_marked_extents(struct btrfs_root
*root
,
3506 struct extent_io_tree
*dirty_pages
,
3511 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
3512 struct extent_buffer
*eb
;
3516 unsigned long index
;
3519 ret
= find_first_extent_bit(dirty_pages
, start
, &start
, &end
,
3524 clear_extent_bits(dirty_pages
, start
, end
, mark
, GFP_NOFS
);
3525 while (start
<= end
) {
3526 index
= start
>> PAGE_CACHE_SHIFT
;
3527 start
= (u64
)(index
+ 1) << PAGE_CACHE_SHIFT
;
3528 page
= find_get_page(btree_inode
->i_mapping
, index
);
3531 offset
= page_offset(page
);
3533 spin_lock(&dirty_pages
->buffer_lock
);
3534 eb
= radix_tree_lookup(
3535 &(&BTRFS_I(page
->mapping
->host
)->io_tree
)->buffer
,
3536 offset
>> PAGE_CACHE_SHIFT
);
3537 spin_unlock(&dirty_pages
->buffer_lock
);
3539 ret
= test_and_clear_bit(EXTENT_BUFFER_DIRTY
,
3541 if (PageWriteback(page
))
3542 end_page_writeback(page
);
3545 if (PageDirty(page
)) {
3546 clear_page_dirty_for_io(page
);
3547 spin_lock_irq(&page
->mapping
->tree_lock
);
3548 radix_tree_tag_clear(&page
->mapping
->page_tree
,
3550 PAGECACHE_TAG_DIRTY
);
3551 spin_unlock_irq(&page
->mapping
->tree_lock
);
3555 page_cache_release(page
);
3562 static int btrfs_destroy_pinned_extent(struct btrfs_root
*root
,
3563 struct extent_io_tree
*pinned_extents
)
3565 struct extent_io_tree
*unpin
;
3571 unpin
= pinned_extents
;
3574 ret
= find_first_extent_bit(unpin
, 0, &start
, &end
,
3580 if (btrfs_test_opt(root
, DISCARD
))
3581 ret
= btrfs_error_discard_extent(root
, start
,
3585 clear_extent_dirty(unpin
, start
, end
, GFP_NOFS
);
3586 btrfs_error_unpin_extent_range(root
, start
, end
);
3591 if (unpin
== &root
->fs_info
->freed_extents
[0])
3592 unpin
= &root
->fs_info
->freed_extents
[1];
3594 unpin
= &root
->fs_info
->freed_extents
[0];
3602 void btrfs_cleanup_one_transaction(struct btrfs_transaction
*cur_trans
,
3603 struct btrfs_root
*root
)
3605 btrfs_destroy_delayed_refs(cur_trans
, root
);
3606 btrfs_block_rsv_release(root
, &root
->fs_info
->trans_block_rsv
,
3607 cur_trans
->dirty_pages
.dirty_bytes
);
3609 /* FIXME: cleanup wait for commit */
3610 cur_trans
->in_commit
= 1;
3611 cur_trans
->blocked
= 1;
3612 wake_up(&root
->fs_info
->transaction_blocked_wait
);
3614 cur_trans
->blocked
= 0;
3615 wake_up(&root
->fs_info
->transaction_wait
);
3617 cur_trans
->commit_done
= 1;
3618 wake_up(&cur_trans
->commit_wait
);
3620 btrfs_destroy_delayed_inodes(root
);
3621 btrfs_assert_delayed_root_empty(root
);
3623 btrfs_destroy_pending_snapshots(cur_trans
);
3625 btrfs_destroy_marked_extents(root
, &cur_trans
->dirty_pages
,
3627 btrfs_destroy_pinned_extent(root
,
3628 root
->fs_info
->pinned_extents
);
3631 memset(cur_trans, 0, sizeof(*cur_trans));
3632 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
3636 int btrfs_cleanup_transaction(struct btrfs_root
*root
)
3638 struct btrfs_transaction
*t
;
3641 mutex_lock(&root
->fs_info
->transaction_kthread_mutex
);
3643 spin_lock(&root
->fs_info
->trans_lock
);
3644 list_splice_init(&root
->fs_info
->trans_list
, &list
);
3645 root
->fs_info
->trans_no_join
= 1;
3646 spin_unlock(&root
->fs_info
->trans_lock
);
3648 while (!list_empty(&list
)) {
3649 t
= list_entry(list
.next
, struct btrfs_transaction
, list
);
3653 btrfs_destroy_ordered_operations(root
);
3655 btrfs_destroy_ordered_extents(root
);
3657 btrfs_destroy_delayed_refs(t
, root
);
3659 btrfs_block_rsv_release(root
,
3660 &root
->fs_info
->trans_block_rsv
,
3661 t
->dirty_pages
.dirty_bytes
);
3663 /* FIXME: cleanup wait for commit */
3666 if (waitqueue_active(&root
->fs_info
->transaction_blocked_wait
))
3667 wake_up(&root
->fs_info
->transaction_blocked_wait
);
3670 if (waitqueue_active(&root
->fs_info
->transaction_wait
))
3671 wake_up(&root
->fs_info
->transaction_wait
);
3674 if (waitqueue_active(&t
->commit_wait
))
3675 wake_up(&t
->commit_wait
);
3677 btrfs_destroy_delayed_inodes(root
);
3678 btrfs_assert_delayed_root_empty(root
);
3680 btrfs_destroy_pending_snapshots(t
);
3682 btrfs_destroy_delalloc_inodes(root
);
3684 spin_lock(&root
->fs_info
->trans_lock
);
3685 root
->fs_info
->running_transaction
= NULL
;
3686 spin_unlock(&root
->fs_info
->trans_lock
);
3688 btrfs_destroy_marked_extents(root
, &t
->dirty_pages
,
3691 btrfs_destroy_pinned_extent(root
,
3692 root
->fs_info
->pinned_extents
);
3694 atomic_set(&t
->use_count
, 0);
3695 list_del_init(&t
->list
);
3696 memset(t
, 0, sizeof(*t
));
3697 kmem_cache_free(btrfs_transaction_cachep
, t
);
3700 spin_lock(&root
->fs_info
->trans_lock
);
3701 root
->fs_info
->trans_no_join
= 0;
3702 spin_unlock(&root
->fs_info
->trans_lock
);
3703 mutex_unlock(&root
->fs_info
->transaction_kthread_mutex
);
3708 static struct extent_io_ops btree_extent_io_ops
= {
3709 .write_cache_pages_lock_hook
= btree_lock_page_hook
,
3710 .readpage_end_io_hook
= btree_readpage_end_io_hook
,
3711 .readpage_io_failed_hook
= btree_io_failed_hook
,
3712 .submit_bio_hook
= btree_submit_bio_hook
,
3713 /* note we're sharing with inode.c for the merge bio hook */
3714 .merge_bio_hook
= btrfs_merge_bio_hook
,