2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/blkdev.h>
21 #include <linux/scatterlist.h>
22 #include <linux/swap.h>
23 #include <linux/radix-tree.h>
24 #include <linux/writeback.h>
25 #include <linux/buffer_head.h>
26 #include <linux/workqueue.h>
27 #include <linux/kthread.h>
28 #include <linux/freezer.h>
29 #include <linux/crc32c.h>
30 #include <linux/slab.h>
34 #include "transaction.h"
35 #include "btrfs_inode.h"
37 #include "print-tree.h"
38 #include "async-thread.h"
41 #include "free-space-cache.h"
43 static struct extent_io_ops btree_extent_io_ops
;
44 static void end_workqueue_fn(struct btrfs_work
*work
);
45 static void free_fs_root(struct btrfs_root
*root
);
48 * end_io_wq structs are used to do processing in task context when an IO is
49 * complete. This is used during reads to verify checksums, and it is used
50 * by writes to insert metadata for new file extents after IO is complete.
56 struct btrfs_fs_info
*info
;
59 struct list_head list
;
60 struct btrfs_work work
;
64 * async submit bios are used to offload expensive checksumming
65 * onto the worker threads. They checksum file and metadata bios
66 * just before they are sent down the IO stack.
68 struct async_submit_bio
{
71 struct list_head list
;
72 extent_submit_bio_hook_t
*submit_bio_start
;
73 extent_submit_bio_hook_t
*submit_bio_done
;
76 unsigned long bio_flags
;
78 * bio_offset is optional, can be used if the pages in the bio
79 * can't tell us where in the file the bio should go
82 struct btrfs_work work
;
85 /* These are used to set the lockdep class on the extent buffer locks.
86 * The class is set by the readpage_end_io_hook after the buffer has
87 * passed csum validation but before the pages are unlocked.
89 * The lockdep class is also set by btrfs_init_new_buffer on freshly
92 * The class is based on the level in the tree block, which allows lockdep
93 * to know that lower nodes nest inside the locks of higher nodes.
95 * We also add a check to make sure the highest level of the tree is
96 * the same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this
97 * code needs update as well.
99 #ifdef CONFIG_DEBUG_LOCK_ALLOC
100 # if BTRFS_MAX_LEVEL != 8
103 static struct lock_class_key btrfs_eb_class
[BTRFS_MAX_LEVEL
+ 1];
104 static const char *btrfs_eb_name
[BTRFS_MAX_LEVEL
+ 1] = {
114 /* highest possible level */
120 * extents on the btree inode are pretty simple, there's one extent
121 * that covers the entire device
123 static struct extent_map
*btree_get_extent(struct inode
*inode
,
124 struct page
*page
, size_t page_offset
, u64 start
, u64 len
,
127 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
128 struct extent_map
*em
;
131 read_lock(&em_tree
->lock
);
132 em
= lookup_extent_mapping(em_tree
, start
, len
);
135 BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
136 read_unlock(&em_tree
->lock
);
139 read_unlock(&em_tree
->lock
);
141 em
= alloc_extent_map(GFP_NOFS
);
143 em
= ERR_PTR(-ENOMEM
);
148 em
->block_len
= (u64
)-1;
150 em
->bdev
= BTRFS_I(inode
)->root
->fs_info
->fs_devices
->latest_bdev
;
152 write_lock(&em_tree
->lock
);
153 ret
= add_extent_mapping(em_tree
, em
);
154 if (ret
== -EEXIST
) {
155 u64 failed_start
= em
->start
;
156 u64 failed_len
= em
->len
;
159 em
= lookup_extent_mapping(em_tree
, start
, len
);
163 em
= lookup_extent_mapping(em_tree
, failed_start
,
171 write_unlock(&em_tree
->lock
);
179 u32
btrfs_csum_data(struct btrfs_root
*root
, char *data
, u32 seed
, size_t len
)
181 return crc32c(seed
, data
, len
);
184 void btrfs_csum_final(u32 crc
, char *result
)
186 *(__le32
*)result
= ~cpu_to_le32(crc
);
190 * compute the csum for a btree block, and either verify it or write it
191 * into the csum field of the block.
193 static int csum_tree_block(struct btrfs_root
*root
, struct extent_buffer
*buf
,
197 btrfs_super_csum_size(&root
->fs_info
->super_copy
);
200 unsigned long cur_len
;
201 unsigned long offset
= BTRFS_CSUM_SIZE
;
202 char *map_token
= NULL
;
204 unsigned long map_start
;
205 unsigned long map_len
;
208 unsigned long inline_result
;
210 len
= buf
->len
- offset
;
212 err
= map_private_extent_buffer(buf
, offset
, 32,
214 &map_start
, &map_len
, KM_USER0
);
217 cur_len
= min(len
, map_len
- (offset
- map_start
));
218 crc
= btrfs_csum_data(root
, kaddr
+ offset
- map_start
,
222 unmap_extent_buffer(buf
, map_token
, KM_USER0
);
224 if (csum_size
> sizeof(inline_result
)) {
225 result
= kzalloc(csum_size
* sizeof(char), GFP_NOFS
);
229 result
= (char *)&inline_result
;
232 btrfs_csum_final(crc
, result
);
235 if (memcmp_extent_buffer(buf
, result
, 0, csum_size
)) {
238 memcpy(&found
, result
, csum_size
);
240 read_extent_buffer(buf
, &val
, 0, csum_size
);
241 if (printk_ratelimit()) {
242 printk(KERN_INFO
"btrfs: %s checksum verify "
243 "failed on %llu wanted %X found %X "
245 root
->fs_info
->sb
->s_id
,
246 (unsigned long long)buf
->start
, val
, found
,
247 btrfs_header_level(buf
));
249 if (result
!= (char *)&inline_result
)
254 write_extent_buffer(buf
, result
, 0, csum_size
);
256 if (result
!= (char *)&inline_result
)
262 * we can't consider a given block up to date unless the transid of the
263 * block matches the transid in the parent node's pointer. This is how we
264 * detect blocks that either didn't get written at all or got written
265 * in the wrong place.
267 static int verify_parent_transid(struct extent_io_tree
*io_tree
,
268 struct extent_buffer
*eb
, u64 parent_transid
)
270 struct extent_state
*cached_state
= NULL
;
273 if (!parent_transid
|| btrfs_header_generation(eb
) == parent_transid
)
276 lock_extent_bits(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
277 0, &cached_state
, GFP_NOFS
);
278 if (extent_buffer_uptodate(io_tree
, eb
, cached_state
) &&
279 btrfs_header_generation(eb
) == parent_transid
) {
283 if (printk_ratelimit()) {
284 printk("parent transid verify failed on %llu wanted %llu "
286 (unsigned long long)eb
->start
,
287 (unsigned long long)parent_transid
,
288 (unsigned long long)btrfs_header_generation(eb
));
291 clear_extent_buffer_uptodate(io_tree
, eb
, &cached_state
);
293 unlock_extent_cached(io_tree
, eb
->start
, eb
->start
+ eb
->len
- 1,
294 &cached_state
, GFP_NOFS
);
299 * helper to read a given tree block, doing retries as required when
300 * the checksums don't match and we have alternate mirrors to try.
302 static int btree_read_extent_buffer_pages(struct btrfs_root
*root
,
303 struct extent_buffer
*eb
,
304 u64 start
, u64 parent_transid
)
306 struct extent_io_tree
*io_tree
;
311 io_tree
= &BTRFS_I(root
->fs_info
->btree_inode
)->io_tree
;
313 ret
= read_extent_buffer_pages(io_tree
, eb
, start
, 1,
314 btree_get_extent
, mirror_num
);
316 !verify_parent_transid(io_tree
, eb
, parent_transid
))
319 num_copies
= btrfs_num_copies(&root
->fs_info
->mapping_tree
,
325 if (mirror_num
> num_copies
)
332 * checksum a dirty tree block before IO. This has extra checks to make sure
333 * we only fill in the checksum field in the first page of a multi-page block
336 static int csum_dirty_buffer(struct btrfs_root
*root
, struct page
*page
)
338 struct extent_io_tree
*tree
;
339 u64 start
= (u64
)page
->index
<< PAGE_CACHE_SHIFT
;
343 struct extent_buffer
*eb
;
346 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
348 if (page
->private == EXTENT_PAGE_PRIVATE
)
352 len
= page
->private >> 2;
355 eb
= alloc_extent_buffer(tree
, start
, len
, page
, GFP_NOFS
);
356 ret
= btree_read_extent_buffer_pages(root
, eb
, start
+ PAGE_CACHE_SIZE
,
357 btrfs_header_generation(eb
));
359 found_start
= btrfs_header_bytenr(eb
);
360 if (found_start
!= start
) {
364 if (eb
->first_page
!= page
) {
368 if (!PageUptodate(page
)) {
372 found_level
= btrfs_header_level(eb
);
374 csum_tree_block(root
, eb
, 0);
376 free_extent_buffer(eb
);
381 static int check_tree_block_fsid(struct btrfs_root
*root
,
382 struct extent_buffer
*eb
)
384 struct btrfs_fs_devices
*fs_devices
= root
->fs_info
->fs_devices
;
385 u8 fsid
[BTRFS_UUID_SIZE
];
388 read_extent_buffer(eb
, fsid
, (unsigned long)btrfs_header_fsid(eb
),
391 if (!memcmp(fsid
, fs_devices
->fsid
, BTRFS_FSID_SIZE
)) {
395 fs_devices
= fs_devices
->seed
;
400 #ifdef CONFIG_DEBUG_LOCK_ALLOC
401 void btrfs_set_buffer_lockdep_class(struct extent_buffer
*eb
, int level
)
403 lockdep_set_class_and_name(&eb
->lock
,
404 &btrfs_eb_class
[level
],
405 btrfs_eb_name
[level
]);
409 static int btree_readpage_end_io_hook(struct page
*page
, u64 start
, u64 end
,
410 struct extent_state
*state
)
412 struct extent_io_tree
*tree
;
416 struct extent_buffer
*eb
;
417 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
420 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
421 if (page
->private == EXTENT_PAGE_PRIVATE
)
426 len
= page
->private >> 2;
429 eb
= alloc_extent_buffer(tree
, start
, len
, page
, GFP_NOFS
);
431 found_start
= btrfs_header_bytenr(eb
);
432 if (found_start
!= start
) {
433 if (printk_ratelimit()) {
434 printk(KERN_INFO
"btrfs bad tree block start "
436 (unsigned long long)found_start
,
437 (unsigned long long)eb
->start
);
442 if (eb
->first_page
!= page
) {
443 printk(KERN_INFO
"btrfs bad first page %lu %lu\n",
444 eb
->first_page
->index
, page
->index
);
449 if (check_tree_block_fsid(root
, eb
)) {
450 if (printk_ratelimit()) {
451 printk(KERN_INFO
"btrfs bad fsid on block %llu\n",
452 (unsigned long long)eb
->start
);
457 found_level
= btrfs_header_level(eb
);
459 btrfs_set_buffer_lockdep_class(eb
, found_level
);
461 ret
= csum_tree_block(root
, eb
, 1);
465 end
= min_t(u64
, eb
->len
, PAGE_CACHE_SIZE
);
466 end
= eb
->start
+ end
- 1;
468 free_extent_buffer(eb
);
473 static void end_workqueue_bio(struct bio
*bio
, int err
)
475 struct end_io_wq
*end_io_wq
= bio
->bi_private
;
476 struct btrfs_fs_info
*fs_info
;
478 fs_info
= end_io_wq
->info
;
479 end_io_wq
->error
= err
;
480 end_io_wq
->work
.func
= end_workqueue_fn
;
481 end_io_wq
->work
.flags
= 0;
483 if (bio
->bi_rw
& REQ_WRITE
) {
484 if (end_io_wq
->metadata
== 1)
485 btrfs_queue_worker(&fs_info
->endio_meta_write_workers
,
487 else if (end_io_wq
->metadata
== 2)
488 btrfs_queue_worker(&fs_info
->endio_freespace_worker
,
491 btrfs_queue_worker(&fs_info
->endio_write_workers
,
494 if (end_io_wq
->metadata
)
495 btrfs_queue_worker(&fs_info
->endio_meta_workers
,
498 btrfs_queue_worker(&fs_info
->endio_workers
,
504 * For the metadata arg you want
507 * 1 - if normal metadta
508 * 2 - if writing to the free space cache area
510 int btrfs_bio_wq_end_io(struct btrfs_fs_info
*info
, struct bio
*bio
,
513 struct end_io_wq
*end_io_wq
;
514 end_io_wq
= kmalloc(sizeof(*end_io_wq
), GFP_NOFS
);
518 end_io_wq
->private = bio
->bi_private
;
519 end_io_wq
->end_io
= bio
->bi_end_io
;
520 end_io_wq
->info
= info
;
521 end_io_wq
->error
= 0;
522 end_io_wq
->bio
= bio
;
523 end_io_wq
->metadata
= metadata
;
525 bio
->bi_private
= end_io_wq
;
526 bio
->bi_end_io
= end_workqueue_bio
;
530 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info
*info
)
532 unsigned long limit
= min_t(unsigned long,
533 info
->workers
.max_workers
,
534 info
->fs_devices
->open_devices
);
538 int btrfs_congested_async(struct btrfs_fs_info
*info
, int iodone
)
540 return atomic_read(&info
->nr_async_bios
) >
541 btrfs_async_submit_limit(info
);
544 static void run_one_async_start(struct btrfs_work
*work
)
546 struct btrfs_fs_info
*fs_info
;
547 struct async_submit_bio
*async
;
549 async
= container_of(work
, struct async_submit_bio
, work
);
550 fs_info
= BTRFS_I(async
->inode
)->root
->fs_info
;
551 async
->submit_bio_start(async
->inode
, async
->rw
, async
->bio
,
552 async
->mirror_num
, async
->bio_flags
,
556 static void run_one_async_done(struct btrfs_work
*work
)
558 struct btrfs_fs_info
*fs_info
;
559 struct async_submit_bio
*async
;
562 async
= container_of(work
, struct async_submit_bio
, work
);
563 fs_info
= BTRFS_I(async
->inode
)->root
->fs_info
;
565 limit
= btrfs_async_submit_limit(fs_info
);
566 limit
= limit
* 2 / 3;
568 atomic_dec(&fs_info
->nr_async_submits
);
570 if (atomic_read(&fs_info
->nr_async_submits
) < limit
&&
571 waitqueue_active(&fs_info
->async_submit_wait
))
572 wake_up(&fs_info
->async_submit_wait
);
574 async
->submit_bio_done(async
->inode
, async
->rw
, async
->bio
,
575 async
->mirror_num
, async
->bio_flags
,
579 static void run_one_async_free(struct btrfs_work
*work
)
581 struct async_submit_bio
*async
;
583 async
= container_of(work
, struct async_submit_bio
, work
);
587 int btrfs_wq_submit_bio(struct btrfs_fs_info
*fs_info
, struct inode
*inode
,
588 int rw
, struct bio
*bio
, int mirror_num
,
589 unsigned long bio_flags
,
591 extent_submit_bio_hook_t
*submit_bio_start
,
592 extent_submit_bio_hook_t
*submit_bio_done
)
594 struct async_submit_bio
*async
;
596 async
= kmalloc(sizeof(*async
), GFP_NOFS
);
600 async
->inode
= inode
;
603 async
->mirror_num
= mirror_num
;
604 async
->submit_bio_start
= submit_bio_start
;
605 async
->submit_bio_done
= submit_bio_done
;
607 async
->work
.func
= run_one_async_start
;
608 async
->work
.ordered_func
= run_one_async_done
;
609 async
->work
.ordered_free
= run_one_async_free
;
611 async
->work
.flags
= 0;
612 async
->bio_flags
= bio_flags
;
613 async
->bio_offset
= bio_offset
;
615 atomic_inc(&fs_info
->nr_async_submits
);
618 btrfs_set_work_high_prio(&async
->work
);
620 btrfs_queue_worker(&fs_info
->workers
, &async
->work
);
622 while (atomic_read(&fs_info
->async_submit_draining
) &&
623 atomic_read(&fs_info
->nr_async_submits
)) {
624 wait_event(fs_info
->async_submit_wait
,
625 (atomic_read(&fs_info
->nr_async_submits
) == 0));
631 static int btree_csum_one_bio(struct bio
*bio
)
633 struct bio_vec
*bvec
= bio
->bi_io_vec
;
635 struct btrfs_root
*root
;
637 WARN_ON(bio
->bi_vcnt
<= 0);
638 while (bio_index
< bio
->bi_vcnt
) {
639 root
= BTRFS_I(bvec
->bv_page
->mapping
->host
)->root
;
640 csum_dirty_buffer(root
, bvec
->bv_page
);
647 static int __btree_submit_bio_start(struct inode
*inode
, int rw
,
648 struct bio
*bio
, int mirror_num
,
649 unsigned long bio_flags
,
653 * when we're called for a write, we're already in the async
654 * submission context. Just jump into btrfs_map_bio
656 btree_csum_one_bio(bio
);
660 static int __btree_submit_bio_done(struct inode
*inode
, int rw
, struct bio
*bio
,
661 int mirror_num
, unsigned long bio_flags
,
665 * when we're called for a write, we're already in the async
666 * submission context. Just jump into btrfs_map_bio
668 return btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
, mirror_num
, 1);
671 static int btree_submit_bio_hook(struct inode
*inode
, int rw
, struct bio
*bio
,
672 int mirror_num
, unsigned long bio_flags
,
677 ret
= btrfs_bio_wq_end_io(BTRFS_I(inode
)->root
->fs_info
,
681 if (!(rw
& REQ_WRITE
)) {
683 * called for a read, do the setup so that checksum validation
684 * can happen in the async kernel threads
686 return btrfs_map_bio(BTRFS_I(inode
)->root
, rw
, bio
,
691 * kthread helpers are used to submit writes so that checksumming
692 * can happen in parallel across all CPUs
694 return btrfs_wq_submit_bio(BTRFS_I(inode
)->root
->fs_info
,
695 inode
, rw
, bio
, mirror_num
, 0,
697 __btree_submit_bio_start
,
698 __btree_submit_bio_done
);
701 static int btree_writepage(struct page
*page
, struct writeback_control
*wbc
)
703 struct extent_io_tree
*tree
;
704 struct btrfs_root
*root
= BTRFS_I(page
->mapping
->host
)->root
;
705 struct extent_buffer
*eb
;
708 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
709 if (!(current
->flags
& PF_MEMALLOC
)) {
710 return extent_write_full_page(tree
, page
,
711 btree_get_extent
, wbc
);
714 redirty_page_for_writepage(wbc
, page
);
715 eb
= btrfs_find_tree_block(root
, page_offset(page
),
719 was_dirty
= test_and_set_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
);
721 spin_lock(&root
->fs_info
->delalloc_lock
);
722 root
->fs_info
->dirty_metadata_bytes
+= PAGE_CACHE_SIZE
;
723 spin_unlock(&root
->fs_info
->delalloc_lock
);
725 free_extent_buffer(eb
);
731 static int btree_writepages(struct address_space
*mapping
,
732 struct writeback_control
*wbc
)
734 struct extent_io_tree
*tree
;
735 tree
= &BTRFS_I(mapping
->host
)->io_tree
;
736 if (wbc
->sync_mode
== WB_SYNC_NONE
) {
737 struct btrfs_root
*root
= BTRFS_I(mapping
->host
)->root
;
739 unsigned long thresh
= 32 * 1024 * 1024;
741 if (wbc
->for_kupdate
)
744 /* this is a bit racy, but that's ok */
745 num_dirty
= root
->fs_info
->dirty_metadata_bytes
;
746 if (num_dirty
< thresh
)
749 return extent_writepages(tree
, mapping
, btree_get_extent
, wbc
);
752 static int btree_readpage(struct file
*file
, struct page
*page
)
754 struct extent_io_tree
*tree
;
755 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
756 return extent_read_full_page(tree
, page
, btree_get_extent
);
759 static int btree_releasepage(struct page
*page
, gfp_t gfp_flags
)
761 struct extent_io_tree
*tree
;
762 struct extent_map_tree
*map
;
765 if (PageWriteback(page
) || PageDirty(page
))
768 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
769 map
= &BTRFS_I(page
->mapping
->host
)->extent_tree
;
771 ret
= try_release_extent_state(map
, tree
, page
, gfp_flags
);
775 ret
= try_release_extent_buffer(tree
, page
);
777 ClearPagePrivate(page
);
778 set_page_private(page
, 0);
779 page_cache_release(page
);
785 static void btree_invalidatepage(struct page
*page
, unsigned long offset
)
787 struct extent_io_tree
*tree
;
788 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
789 extent_invalidatepage(tree
, page
, offset
);
790 btree_releasepage(page
, GFP_NOFS
);
791 if (PagePrivate(page
)) {
792 printk(KERN_WARNING
"btrfs warning page private not zero "
793 "on page %llu\n", (unsigned long long)page_offset(page
));
794 ClearPagePrivate(page
);
795 set_page_private(page
, 0);
796 page_cache_release(page
);
800 static const struct address_space_operations btree_aops
= {
801 .readpage
= btree_readpage
,
802 .writepage
= btree_writepage
,
803 .writepages
= btree_writepages
,
804 .releasepage
= btree_releasepage
,
805 .invalidatepage
= btree_invalidatepage
,
806 .sync_page
= block_sync_page
,
809 int readahead_tree_block(struct btrfs_root
*root
, u64 bytenr
, u32 blocksize
,
812 struct extent_buffer
*buf
= NULL
;
813 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
816 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
819 read_extent_buffer_pages(&BTRFS_I(btree_inode
)->io_tree
,
820 buf
, 0, 0, btree_get_extent
, 0);
821 free_extent_buffer(buf
);
825 struct extent_buffer
*btrfs_find_tree_block(struct btrfs_root
*root
,
826 u64 bytenr
, u32 blocksize
)
828 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
829 struct extent_buffer
*eb
;
830 eb
= find_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
831 bytenr
, blocksize
, GFP_NOFS
);
835 struct extent_buffer
*btrfs_find_create_tree_block(struct btrfs_root
*root
,
836 u64 bytenr
, u32 blocksize
)
838 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
839 struct extent_buffer
*eb
;
841 eb
= alloc_extent_buffer(&BTRFS_I(btree_inode
)->io_tree
,
842 bytenr
, blocksize
, NULL
, GFP_NOFS
);
847 int btrfs_write_tree_block(struct extent_buffer
*buf
)
849 return filemap_fdatawrite_range(buf
->first_page
->mapping
, buf
->start
,
850 buf
->start
+ buf
->len
- 1);
853 int btrfs_wait_tree_block_writeback(struct extent_buffer
*buf
)
855 return filemap_fdatawait_range(buf
->first_page
->mapping
,
856 buf
->start
, buf
->start
+ buf
->len
- 1);
859 struct extent_buffer
*read_tree_block(struct btrfs_root
*root
, u64 bytenr
,
860 u32 blocksize
, u64 parent_transid
)
862 struct extent_buffer
*buf
= NULL
;
863 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
864 struct extent_io_tree
*io_tree
;
867 io_tree
= &BTRFS_I(btree_inode
)->io_tree
;
869 buf
= btrfs_find_create_tree_block(root
, bytenr
, blocksize
);
873 ret
= btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
876 set_bit(EXTENT_BUFFER_UPTODATE
, &buf
->bflags
);
881 int clean_tree_block(struct btrfs_trans_handle
*trans
, struct btrfs_root
*root
,
882 struct extent_buffer
*buf
)
884 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
885 if (btrfs_header_generation(buf
) ==
886 root
->fs_info
->running_transaction
->transid
) {
887 btrfs_assert_tree_locked(buf
);
889 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &buf
->bflags
)) {
890 spin_lock(&root
->fs_info
->delalloc_lock
);
891 if (root
->fs_info
->dirty_metadata_bytes
>= buf
->len
)
892 root
->fs_info
->dirty_metadata_bytes
-= buf
->len
;
895 spin_unlock(&root
->fs_info
->delalloc_lock
);
898 /* ugh, clear_extent_buffer_dirty needs to lock the page */
899 btrfs_set_lock_blocking(buf
);
900 clear_extent_buffer_dirty(&BTRFS_I(btree_inode
)->io_tree
,
906 static int __setup_root(u32 nodesize
, u32 leafsize
, u32 sectorsize
,
907 u32 stripesize
, struct btrfs_root
*root
,
908 struct btrfs_fs_info
*fs_info
,
912 root
->commit_root
= NULL
;
913 root
->sectorsize
= sectorsize
;
914 root
->nodesize
= nodesize
;
915 root
->leafsize
= leafsize
;
916 root
->stripesize
= stripesize
;
918 root
->track_dirty
= 0;
920 root
->orphan_item_inserted
= 0;
921 root
->orphan_cleanup_state
= 0;
923 root
->fs_info
= fs_info
;
924 root
->objectid
= objectid
;
925 root
->last_trans
= 0;
926 root
->highest_objectid
= 0;
929 root
->inode_tree
= RB_ROOT
;
930 root
->block_rsv
= NULL
;
931 root
->orphan_block_rsv
= NULL
;
933 INIT_LIST_HEAD(&root
->dirty_list
);
934 INIT_LIST_HEAD(&root
->orphan_list
);
935 INIT_LIST_HEAD(&root
->root_list
);
936 spin_lock_init(&root
->node_lock
);
937 spin_lock_init(&root
->orphan_lock
);
938 spin_lock_init(&root
->inode_lock
);
939 spin_lock_init(&root
->accounting_lock
);
940 mutex_init(&root
->objectid_mutex
);
941 mutex_init(&root
->log_mutex
);
942 init_waitqueue_head(&root
->log_writer_wait
);
943 init_waitqueue_head(&root
->log_commit_wait
[0]);
944 init_waitqueue_head(&root
->log_commit_wait
[1]);
945 atomic_set(&root
->log_commit
[0], 0);
946 atomic_set(&root
->log_commit
[1], 0);
947 atomic_set(&root
->log_writers
, 0);
949 root
->log_transid
= 0;
950 root
->last_log_commit
= 0;
951 extent_io_tree_init(&root
->dirty_log_pages
,
952 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
954 memset(&root
->root_key
, 0, sizeof(root
->root_key
));
955 memset(&root
->root_item
, 0, sizeof(root
->root_item
));
956 memset(&root
->defrag_progress
, 0, sizeof(root
->defrag_progress
));
957 memset(&root
->root_kobj
, 0, sizeof(root
->root_kobj
));
958 root
->defrag_trans_start
= fs_info
->generation
;
959 init_completion(&root
->kobj_unregister
);
960 root
->defrag_running
= 0;
961 root
->root_key
.objectid
= objectid
;
962 root
->anon_super
.s_root
= NULL
;
963 root
->anon_super
.s_dev
= 0;
964 INIT_LIST_HEAD(&root
->anon_super
.s_list
);
965 INIT_LIST_HEAD(&root
->anon_super
.s_instances
);
966 init_rwsem(&root
->anon_super
.s_umount
);
971 static int find_and_setup_root(struct btrfs_root
*tree_root
,
972 struct btrfs_fs_info
*fs_info
,
974 struct btrfs_root
*root
)
980 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
981 tree_root
->sectorsize
, tree_root
->stripesize
,
982 root
, fs_info
, objectid
);
983 ret
= btrfs_find_last_root(tree_root
, objectid
,
984 &root
->root_item
, &root
->root_key
);
989 generation
= btrfs_root_generation(&root
->root_item
);
990 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
991 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
992 blocksize
, generation
);
994 root
->commit_root
= btrfs_root_node(root
);
998 static struct btrfs_root
*alloc_log_tree(struct btrfs_trans_handle
*trans
,
999 struct btrfs_fs_info
*fs_info
)
1001 struct btrfs_root
*root
;
1002 struct btrfs_root
*tree_root
= fs_info
->tree_root
;
1003 struct extent_buffer
*leaf
;
1005 root
= kzalloc(sizeof(*root
), GFP_NOFS
);
1007 return ERR_PTR(-ENOMEM
);
1009 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1010 tree_root
->sectorsize
, tree_root
->stripesize
,
1011 root
, fs_info
, BTRFS_TREE_LOG_OBJECTID
);
1013 root
->root_key
.objectid
= BTRFS_TREE_LOG_OBJECTID
;
1014 root
->root_key
.type
= BTRFS_ROOT_ITEM_KEY
;
1015 root
->root_key
.offset
= BTRFS_TREE_LOG_OBJECTID
;
1017 * log trees do not get reference counted because they go away
1018 * before a real commit is actually done. They do store pointers
1019 * to file data extents, and those reference counts still get
1020 * updated (along with back refs to the log tree).
1024 leaf
= btrfs_alloc_free_block(trans
, root
, root
->leafsize
, 0,
1025 BTRFS_TREE_LOG_OBJECTID
, NULL
, 0, 0, 0);
1028 return ERR_CAST(leaf
);
1031 memset_extent_buffer(leaf
, 0, 0, sizeof(struct btrfs_header
));
1032 btrfs_set_header_bytenr(leaf
, leaf
->start
);
1033 btrfs_set_header_generation(leaf
, trans
->transid
);
1034 btrfs_set_header_backref_rev(leaf
, BTRFS_MIXED_BACKREF_REV
);
1035 btrfs_set_header_owner(leaf
, BTRFS_TREE_LOG_OBJECTID
);
1038 write_extent_buffer(root
->node
, root
->fs_info
->fsid
,
1039 (unsigned long)btrfs_header_fsid(root
->node
),
1041 btrfs_mark_buffer_dirty(root
->node
);
1042 btrfs_tree_unlock(root
->node
);
1046 int btrfs_init_log_root_tree(struct btrfs_trans_handle
*trans
,
1047 struct btrfs_fs_info
*fs_info
)
1049 struct btrfs_root
*log_root
;
1051 log_root
= alloc_log_tree(trans
, fs_info
);
1052 if (IS_ERR(log_root
))
1053 return PTR_ERR(log_root
);
1054 WARN_ON(fs_info
->log_root_tree
);
1055 fs_info
->log_root_tree
= log_root
;
1059 int btrfs_add_log_tree(struct btrfs_trans_handle
*trans
,
1060 struct btrfs_root
*root
)
1062 struct btrfs_root
*log_root
;
1063 struct btrfs_inode_item
*inode_item
;
1065 log_root
= alloc_log_tree(trans
, root
->fs_info
);
1066 if (IS_ERR(log_root
))
1067 return PTR_ERR(log_root
);
1069 log_root
->last_trans
= trans
->transid
;
1070 log_root
->root_key
.offset
= root
->root_key
.objectid
;
1072 inode_item
= &log_root
->root_item
.inode
;
1073 inode_item
->generation
= cpu_to_le64(1);
1074 inode_item
->size
= cpu_to_le64(3);
1075 inode_item
->nlink
= cpu_to_le32(1);
1076 inode_item
->nbytes
= cpu_to_le64(root
->leafsize
);
1077 inode_item
->mode
= cpu_to_le32(S_IFDIR
| 0755);
1079 btrfs_set_root_node(&log_root
->root_item
, log_root
->node
);
1081 WARN_ON(root
->log_root
);
1082 root
->log_root
= log_root
;
1083 root
->log_transid
= 0;
1084 root
->last_log_commit
= 0;
1088 struct btrfs_root
*btrfs_read_fs_root_no_radix(struct btrfs_root
*tree_root
,
1089 struct btrfs_key
*location
)
1091 struct btrfs_root
*root
;
1092 struct btrfs_fs_info
*fs_info
= tree_root
->fs_info
;
1093 struct btrfs_path
*path
;
1094 struct extent_buffer
*l
;
1099 root
= kzalloc(sizeof(*root
), GFP_NOFS
);
1101 return ERR_PTR(-ENOMEM
);
1102 if (location
->offset
== (u64
)-1) {
1103 ret
= find_and_setup_root(tree_root
, fs_info
,
1104 location
->objectid
, root
);
1107 return ERR_PTR(ret
);
1112 __setup_root(tree_root
->nodesize
, tree_root
->leafsize
,
1113 tree_root
->sectorsize
, tree_root
->stripesize
,
1114 root
, fs_info
, location
->objectid
);
1116 path
= btrfs_alloc_path();
1118 ret
= btrfs_search_slot(NULL
, tree_root
, location
, path
, 0, 0);
1121 read_extent_buffer(l
, &root
->root_item
,
1122 btrfs_item_ptr_offset(l
, path
->slots
[0]),
1123 sizeof(root
->root_item
));
1124 memcpy(&root
->root_key
, location
, sizeof(*location
));
1126 btrfs_free_path(path
);
1130 return ERR_PTR(ret
);
1133 generation
= btrfs_root_generation(&root
->root_item
);
1134 blocksize
= btrfs_level_size(root
, btrfs_root_level(&root
->root_item
));
1135 root
->node
= read_tree_block(root
, btrfs_root_bytenr(&root
->root_item
),
1136 blocksize
, generation
);
1137 root
->commit_root
= btrfs_root_node(root
);
1138 BUG_ON(!root
->node
);
1140 if (location
->objectid
!= BTRFS_TREE_LOG_OBJECTID
)
1146 struct btrfs_root
*btrfs_lookup_fs_root(struct btrfs_fs_info
*fs_info
,
1149 struct btrfs_root
*root
;
1151 if (root_objectid
== BTRFS_ROOT_TREE_OBJECTID
)
1152 return fs_info
->tree_root
;
1153 if (root_objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
1154 return fs_info
->extent_root
;
1156 root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
1157 (unsigned long)root_objectid
);
1161 struct btrfs_root
*btrfs_read_fs_root_no_name(struct btrfs_fs_info
*fs_info
,
1162 struct btrfs_key
*location
)
1164 struct btrfs_root
*root
;
1167 if (location
->objectid
== BTRFS_ROOT_TREE_OBJECTID
)
1168 return fs_info
->tree_root
;
1169 if (location
->objectid
== BTRFS_EXTENT_TREE_OBJECTID
)
1170 return fs_info
->extent_root
;
1171 if (location
->objectid
== BTRFS_CHUNK_TREE_OBJECTID
)
1172 return fs_info
->chunk_root
;
1173 if (location
->objectid
== BTRFS_DEV_TREE_OBJECTID
)
1174 return fs_info
->dev_root
;
1175 if (location
->objectid
== BTRFS_CSUM_TREE_OBJECTID
)
1176 return fs_info
->csum_root
;
1178 spin_lock(&fs_info
->fs_roots_radix_lock
);
1179 root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
1180 (unsigned long)location
->objectid
);
1181 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1185 root
= btrfs_read_fs_root_no_radix(fs_info
->tree_root
, location
);
1189 set_anon_super(&root
->anon_super
, NULL
);
1191 if (btrfs_root_refs(&root
->root_item
) == 0) {
1196 ret
= btrfs_find_orphan_item(fs_info
->tree_root
, location
->objectid
);
1200 root
->orphan_item_inserted
= 1;
1202 ret
= radix_tree_preload(GFP_NOFS
& ~__GFP_HIGHMEM
);
1206 spin_lock(&fs_info
->fs_roots_radix_lock
);
1207 ret
= radix_tree_insert(&fs_info
->fs_roots_radix
,
1208 (unsigned long)root
->root_key
.objectid
,
1213 spin_unlock(&fs_info
->fs_roots_radix_lock
);
1214 radix_tree_preload_end();
1216 if (ret
== -EEXIST
) {
1223 ret
= btrfs_find_dead_roots(fs_info
->tree_root
,
1224 root
->root_key
.objectid
);
1229 return ERR_PTR(ret
);
1232 struct btrfs_root
*btrfs_read_fs_root(struct btrfs_fs_info
*fs_info
,
1233 struct btrfs_key
*location
,
1234 const char *name
, int namelen
)
1236 return btrfs_read_fs_root_no_name(fs_info
, location
);
1238 struct btrfs_root
*root
;
1241 root
= btrfs_read_fs_root_no_name(fs_info
, location
);
1248 ret
= btrfs_set_root_name(root
, name
, namelen
);
1250 free_extent_buffer(root
->node
);
1252 return ERR_PTR(ret
);
1255 ret
= btrfs_sysfs_add_root(root
);
1257 free_extent_buffer(root
->node
);
1260 return ERR_PTR(ret
);
1267 static int btrfs_congested_fn(void *congested_data
, int bdi_bits
)
1269 struct btrfs_fs_info
*info
= (struct btrfs_fs_info
*)congested_data
;
1271 struct btrfs_device
*device
;
1272 struct backing_dev_info
*bdi
;
1274 list_for_each_entry(device
, &info
->fs_devices
->devices
, dev_list
) {
1277 bdi
= blk_get_backing_dev_info(device
->bdev
);
1278 if (bdi
&& bdi_congested(bdi
, bdi_bits
)) {
1287 * this unplugs every device on the box, and it is only used when page
1290 static void __unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
)
1292 struct btrfs_device
*device
;
1293 struct btrfs_fs_info
*info
;
1295 info
= (struct btrfs_fs_info
*)bdi
->unplug_io_data
;
1296 list_for_each_entry(device
, &info
->fs_devices
->devices
, dev_list
) {
1300 bdi
= blk_get_backing_dev_info(device
->bdev
);
1301 if (bdi
->unplug_io_fn
)
1302 bdi
->unplug_io_fn(bdi
, page
);
1306 static void btrfs_unplug_io_fn(struct backing_dev_info
*bdi
, struct page
*page
)
1308 struct inode
*inode
;
1309 struct extent_map_tree
*em_tree
;
1310 struct extent_map
*em
;
1311 struct address_space
*mapping
;
1314 /* the generic O_DIRECT read code does this */
1316 __unplug_io_fn(bdi
, page
);
1321 * page->mapping may change at any time. Get a consistent copy
1322 * and use that for everything below
1325 mapping
= page
->mapping
;
1329 inode
= mapping
->host
;
1332 * don't do the expensive searching for a small number of
1335 if (BTRFS_I(inode
)->root
->fs_info
->fs_devices
->open_devices
<= 2) {
1336 __unplug_io_fn(bdi
, page
);
1340 offset
= page_offset(page
);
1342 em_tree
= &BTRFS_I(inode
)->extent_tree
;
1343 read_lock(&em_tree
->lock
);
1344 em
= lookup_extent_mapping(em_tree
, offset
, PAGE_CACHE_SIZE
);
1345 read_unlock(&em_tree
->lock
);
1347 __unplug_io_fn(bdi
, page
);
1351 if (em
->block_start
>= EXTENT_MAP_LAST_BYTE
) {
1352 free_extent_map(em
);
1353 __unplug_io_fn(bdi
, page
);
1356 offset
= offset
- em
->start
;
1357 btrfs_unplug_page(&BTRFS_I(inode
)->root
->fs_info
->mapping_tree
,
1358 em
->block_start
+ offset
, page
);
1359 free_extent_map(em
);
1363 * If this fails, caller must call bdi_destroy() to get rid of the
1366 static int setup_bdi(struct btrfs_fs_info
*info
, struct backing_dev_info
*bdi
)
1370 bdi
->capabilities
= BDI_CAP_MAP_COPY
;
1371 err
= bdi_setup_and_register(bdi
, "btrfs", BDI_CAP_MAP_COPY
);
1375 bdi
->ra_pages
= default_backing_dev_info
.ra_pages
;
1376 bdi
->unplug_io_fn
= btrfs_unplug_io_fn
;
1377 bdi
->unplug_io_data
= info
;
1378 bdi
->congested_fn
= btrfs_congested_fn
;
1379 bdi
->congested_data
= info
;
1383 static int bio_ready_for_csum(struct bio
*bio
)
1389 struct extent_io_tree
*io_tree
= NULL
;
1390 struct btrfs_fs_info
*info
= NULL
;
1391 struct bio_vec
*bvec
;
1395 bio_for_each_segment(bvec
, bio
, i
) {
1396 page
= bvec
->bv_page
;
1397 if (page
->private == EXTENT_PAGE_PRIVATE
) {
1398 length
+= bvec
->bv_len
;
1401 if (!page
->private) {
1402 length
+= bvec
->bv_len
;
1405 length
= bvec
->bv_len
;
1406 buf_len
= page
->private >> 2;
1407 start
= page_offset(page
) + bvec
->bv_offset
;
1408 io_tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
1409 info
= BTRFS_I(page
->mapping
->host
)->root
->fs_info
;
1411 /* are we fully contained in this bio? */
1412 if (buf_len
<= length
)
1415 ret
= extent_range_uptodate(io_tree
, start
+ length
,
1416 start
+ buf_len
- 1);
1421 * called by the kthread helper functions to finally call the bio end_io
1422 * functions. This is where read checksum verification actually happens
1424 static void end_workqueue_fn(struct btrfs_work
*work
)
1427 struct end_io_wq
*end_io_wq
;
1428 struct btrfs_fs_info
*fs_info
;
1431 end_io_wq
= container_of(work
, struct end_io_wq
, work
);
1432 bio
= end_io_wq
->bio
;
1433 fs_info
= end_io_wq
->info
;
1435 /* metadata bio reads are special because the whole tree block must
1436 * be checksummed at once. This makes sure the entire block is in
1437 * ram and up to date before trying to verify things. For
1438 * blocksize <= pagesize, it is basically a noop
1440 if (!(bio
->bi_rw
& REQ_WRITE
) && end_io_wq
->metadata
&&
1441 !bio_ready_for_csum(bio
)) {
1442 btrfs_queue_worker(&fs_info
->endio_meta_workers
,
1446 error
= end_io_wq
->error
;
1447 bio
->bi_private
= end_io_wq
->private;
1448 bio
->bi_end_io
= end_io_wq
->end_io
;
1450 bio_endio(bio
, error
);
1453 static int cleaner_kthread(void *arg
)
1455 struct btrfs_root
*root
= arg
;
1458 vfs_check_frozen(root
->fs_info
->sb
, SB_FREEZE_WRITE
);
1460 if (!(root
->fs_info
->sb
->s_flags
& MS_RDONLY
) &&
1461 mutex_trylock(&root
->fs_info
->cleaner_mutex
)) {
1462 btrfs_run_delayed_iputs(root
);
1463 btrfs_clean_old_snapshots(root
);
1464 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
1467 if (freezing(current
)) {
1470 set_current_state(TASK_INTERRUPTIBLE
);
1471 if (!kthread_should_stop())
1473 __set_current_state(TASK_RUNNING
);
1475 } while (!kthread_should_stop());
1479 static int transaction_kthread(void *arg
)
1481 struct btrfs_root
*root
= arg
;
1482 struct btrfs_trans_handle
*trans
;
1483 struct btrfs_transaction
*cur
;
1486 unsigned long delay
;
1491 vfs_check_frozen(root
->fs_info
->sb
, SB_FREEZE_WRITE
);
1492 mutex_lock(&root
->fs_info
->transaction_kthread_mutex
);
1494 spin_lock(&root
->fs_info
->new_trans_lock
);
1495 cur
= root
->fs_info
->running_transaction
;
1497 spin_unlock(&root
->fs_info
->new_trans_lock
);
1501 now
= get_seconds();
1502 if (!cur
->blocked
&&
1503 (now
< cur
->start_time
|| now
- cur
->start_time
< 30)) {
1504 spin_unlock(&root
->fs_info
->new_trans_lock
);
1508 transid
= cur
->transid
;
1509 spin_unlock(&root
->fs_info
->new_trans_lock
);
1511 trans
= btrfs_join_transaction(root
, 1);
1512 if (transid
== trans
->transid
) {
1513 ret
= btrfs_commit_transaction(trans
, root
);
1516 btrfs_end_transaction(trans
, root
);
1519 wake_up_process(root
->fs_info
->cleaner_kthread
);
1520 mutex_unlock(&root
->fs_info
->transaction_kthread_mutex
);
1522 if (freezing(current
)) {
1525 set_current_state(TASK_INTERRUPTIBLE
);
1526 if (!kthread_should_stop() &&
1527 !btrfs_transaction_blocked(root
->fs_info
))
1528 schedule_timeout(delay
);
1529 __set_current_state(TASK_RUNNING
);
1531 } while (!kthread_should_stop());
1535 struct btrfs_root
*open_ctree(struct super_block
*sb
,
1536 struct btrfs_fs_devices
*fs_devices
,
1546 struct btrfs_key location
;
1547 struct buffer_head
*bh
;
1548 struct btrfs_root
*extent_root
= kzalloc(sizeof(struct btrfs_root
),
1550 struct btrfs_root
*csum_root
= kzalloc(sizeof(struct btrfs_root
),
1552 struct btrfs_root
*tree_root
= kzalloc(sizeof(struct btrfs_root
),
1554 struct btrfs_fs_info
*fs_info
= kzalloc(sizeof(*fs_info
),
1556 struct btrfs_root
*chunk_root
= kzalloc(sizeof(struct btrfs_root
),
1558 struct btrfs_root
*dev_root
= kzalloc(sizeof(struct btrfs_root
),
1560 struct btrfs_root
*log_tree_root
;
1565 struct btrfs_super_block
*disk_super
;
1567 if (!extent_root
|| !tree_root
|| !fs_info
||
1568 !chunk_root
|| !dev_root
|| !csum_root
) {
1573 ret
= init_srcu_struct(&fs_info
->subvol_srcu
);
1579 ret
= setup_bdi(fs_info
, &fs_info
->bdi
);
1585 fs_info
->btree_inode
= new_inode(sb
);
1586 if (!fs_info
->btree_inode
) {
1591 INIT_RADIX_TREE(&fs_info
->fs_roots_radix
, GFP_ATOMIC
);
1592 INIT_LIST_HEAD(&fs_info
->trans_list
);
1593 INIT_LIST_HEAD(&fs_info
->dead_roots
);
1594 INIT_LIST_HEAD(&fs_info
->delayed_iputs
);
1595 INIT_LIST_HEAD(&fs_info
->hashers
);
1596 INIT_LIST_HEAD(&fs_info
->delalloc_inodes
);
1597 INIT_LIST_HEAD(&fs_info
->ordered_operations
);
1598 INIT_LIST_HEAD(&fs_info
->caching_block_groups
);
1599 spin_lock_init(&fs_info
->delalloc_lock
);
1600 spin_lock_init(&fs_info
->new_trans_lock
);
1601 spin_lock_init(&fs_info
->ref_cache_lock
);
1602 spin_lock_init(&fs_info
->fs_roots_radix_lock
);
1603 spin_lock_init(&fs_info
->delayed_iput_lock
);
1605 init_completion(&fs_info
->kobj_unregister
);
1606 fs_info
->tree_root
= tree_root
;
1607 fs_info
->extent_root
= extent_root
;
1608 fs_info
->csum_root
= csum_root
;
1609 fs_info
->chunk_root
= chunk_root
;
1610 fs_info
->dev_root
= dev_root
;
1611 fs_info
->fs_devices
= fs_devices
;
1612 INIT_LIST_HEAD(&fs_info
->dirty_cowonly_roots
);
1613 INIT_LIST_HEAD(&fs_info
->space_info
);
1614 btrfs_mapping_init(&fs_info
->mapping_tree
);
1615 btrfs_init_block_rsv(&fs_info
->global_block_rsv
);
1616 btrfs_init_block_rsv(&fs_info
->delalloc_block_rsv
);
1617 btrfs_init_block_rsv(&fs_info
->trans_block_rsv
);
1618 btrfs_init_block_rsv(&fs_info
->chunk_block_rsv
);
1619 btrfs_init_block_rsv(&fs_info
->empty_block_rsv
);
1620 INIT_LIST_HEAD(&fs_info
->durable_block_rsv_list
);
1621 mutex_init(&fs_info
->durable_block_rsv_mutex
);
1622 atomic_set(&fs_info
->nr_async_submits
, 0);
1623 atomic_set(&fs_info
->async_delalloc_pages
, 0);
1624 atomic_set(&fs_info
->async_submit_draining
, 0);
1625 atomic_set(&fs_info
->nr_async_bios
, 0);
1627 fs_info
->max_inline
= 8192 * 1024;
1628 fs_info
->metadata_ratio
= 0;
1630 fs_info
->thread_pool_size
= min_t(unsigned long,
1631 num_online_cpus() + 2, 8);
1633 INIT_LIST_HEAD(&fs_info
->ordered_extents
);
1634 spin_lock_init(&fs_info
->ordered_extent_lock
);
1636 sb
->s_blocksize
= 4096;
1637 sb
->s_blocksize_bits
= blksize_bits(4096);
1638 sb
->s_bdi
= &fs_info
->bdi
;
1640 fs_info
->btree_inode
->i_ino
= BTRFS_BTREE_INODE_OBJECTID
;
1641 fs_info
->btree_inode
->i_nlink
= 1;
1643 * we set the i_size on the btree inode to the max possible int.
1644 * the real end of the address space is determined by all of
1645 * the devices in the system
1647 fs_info
->btree_inode
->i_size
= OFFSET_MAX
;
1648 fs_info
->btree_inode
->i_mapping
->a_ops
= &btree_aops
;
1649 fs_info
->btree_inode
->i_mapping
->backing_dev_info
= &fs_info
->bdi
;
1651 RB_CLEAR_NODE(&BTRFS_I(fs_info
->btree_inode
)->rb_node
);
1652 extent_io_tree_init(&BTRFS_I(fs_info
->btree_inode
)->io_tree
,
1653 fs_info
->btree_inode
->i_mapping
,
1655 extent_map_tree_init(&BTRFS_I(fs_info
->btree_inode
)->extent_tree
,
1658 BTRFS_I(fs_info
->btree_inode
)->io_tree
.ops
= &btree_extent_io_ops
;
1660 BTRFS_I(fs_info
->btree_inode
)->root
= tree_root
;
1661 memset(&BTRFS_I(fs_info
->btree_inode
)->location
, 0,
1662 sizeof(struct btrfs_key
));
1663 BTRFS_I(fs_info
->btree_inode
)->dummy_inode
= 1;
1664 insert_inode_hash(fs_info
->btree_inode
);
1666 spin_lock_init(&fs_info
->block_group_cache_lock
);
1667 fs_info
->block_group_cache_tree
= RB_ROOT
;
1669 extent_io_tree_init(&fs_info
->freed_extents
[0],
1670 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1671 extent_io_tree_init(&fs_info
->freed_extents
[1],
1672 fs_info
->btree_inode
->i_mapping
, GFP_NOFS
);
1673 fs_info
->pinned_extents
= &fs_info
->freed_extents
[0];
1674 fs_info
->do_barriers
= 1;
1677 mutex_init(&fs_info
->trans_mutex
);
1678 mutex_init(&fs_info
->ordered_operations_mutex
);
1679 mutex_init(&fs_info
->tree_log_mutex
);
1680 mutex_init(&fs_info
->chunk_mutex
);
1681 mutex_init(&fs_info
->transaction_kthread_mutex
);
1682 mutex_init(&fs_info
->cleaner_mutex
);
1683 mutex_init(&fs_info
->volume_mutex
);
1684 init_rwsem(&fs_info
->extent_commit_sem
);
1685 init_rwsem(&fs_info
->cleanup_work_sem
);
1686 init_rwsem(&fs_info
->subvol_sem
);
1688 btrfs_init_free_cluster(&fs_info
->meta_alloc_cluster
);
1689 btrfs_init_free_cluster(&fs_info
->data_alloc_cluster
);
1691 init_waitqueue_head(&fs_info
->transaction_throttle
);
1692 init_waitqueue_head(&fs_info
->transaction_wait
);
1693 init_waitqueue_head(&fs_info
->async_submit_wait
);
1695 __setup_root(4096, 4096, 4096, 4096, tree_root
,
1696 fs_info
, BTRFS_ROOT_TREE_OBJECTID
);
1698 bh
= btrfs_read_dev_super(fs_devices
->latest_bdev
);
1702 memcpy(&fs_info
->super_copy
, bh
->b_data
, sizeof(fs_info
->super_copy
));
1703 memcpy(&fs_info
->super_for_commit
, &fs_info
->super_copy
,
1704 sizeof(fs_info
->super_for_commit
));
1707 memcpy(fs_info
->fsid
, fs_info
->super_copy
.fsid
, BTRFS_FSID_SIZE
);
1709 disk_super
= &fs_info
->super_copy
;
1710 if (!btrfs_super_root(disk_super
))
1713 ret
= btrfs_parse_options(tree_root
, options
);
1719 features
= btrfs_super_incompat_flags(disk_super
) &
1720 ~BTRFS_FEATURE_INCOMPAT_SUPP
;
1722 printk(KERN_ERR
"BTRFS: couldn't mount because of "
1723 "unsupported optional features (%Lx).\n",
1724 (unsigned long long)features
);
1729 features
= btrfs_super_incompat_flags(disk_super
);
1730 if (!(features
& BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF
)) {
1731 features
|= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF
;
1732 btrfs_set_super_incompat_flags(disk_super
, features
);
1735 features
= btrfs_super_compat_ro_flags(disk_super
) &
1736 ~BTRFS_FEATURE_COMPAT_RO_SUPP
;
1737 if (!(sb
->s_flags
& MS_RDONLY
) && features
) {
1738 printk(KERN_ERR
"BTRFS: couldn't mount RDWR because of "
1739 "unsupported option features (%Lx).\n",
1740 (unsigned long long)features
);
1745 btrfs_init_workers(&fs_info
->generic_worker
,
1746 "genwork", 1, NULL
);
1748 btrfs_init_workers(&fs_info
->workers
, "worker",
1749 fs_info
->thread_pool_size
,
1750 &fs_info
->generic_worker
);
1752 btrfs_init_workers(&fs_info
->delalloc_workers
, "delalloc",
1753 fs_info
->thread_pool_size
,
1754 &fs_info
->generic_worker
);
1756 btrfs_init_workers(&fs_info
->submit_workers
, "submit",
1757 min_t(u64
, fs_devices
->num_devices
,
1758 fs_info
->thread_pool_size
),
1759 &fs_info
->generic_worker
);
1761 /* a higher idle thresh on the submit workers makes it much more
1762 * likely that bios will be send down in a sane order to the
1765 fs_info
->submit_workers
.idle_thresh
= 64;
1767 fs_info
->workers
.idle_thresh
= 16;
1768 fs_info
->workers
.ordered
= 1;
1770 fs_info
->delalloc_workers
.idle_thresh
= 2;
1771 fs_info
->delalloc_workers
.ordered
= 1;
1773 btrfs_init_workers(&fs_info
->fixup_workers
, "fixup", 1,
1774 &fs_info
->generic_worker
);
1775 btrfs_init_workers(&fs_info
->endio_workers
, "endio",
1776 fs_info
->thread_pool_size
,
1777 &fs_info
->generic_worker
);
1778 btrfs_init_workers(&fs_info
->endio_meta_workers
, "endio-meta",
1779 fs_info
->thread_pool_size
,
1780 &fs_info
->generic_worker
);
1781 btrfs_init_workers(&fs_info
->endio_meta_write_workers
,
1782 "endio-meta-write", fs_info
->thread_pool_size
,
1783 &fs_info
->generic_worker
);
1784 btrfs_init_workers(&fs_info
->endio_write_workers
, "endio-write",
1785 fs_info
->thread_pool_size
,
1786 &fs_info
->generic_worker
);
1787 btrfs_init_workers(&fs_info
->endio_freespace_worker
, "freespace-write",
1788 1, &fs_info
->generic_worker
);
1791 * endios are largely parallel and should have a very
1794 fs_info
->endio_workers
.idle_thresh
= 4;
1795 fs_info
->endio_meta_workers
.idle_thresh
= 4;
1797 fs_info
->endio_write_workers
.idle_thresh
= 2;
1798 fs_info
->endio_meta_write_workers
.idle_thresh
= 2;
1800 btrfs_start_workers(&fs_info
->workers
, 1);
1801 btrfs_start_workers(&fs_info
->generic_worker
, 1);
1802 btrfs_start_workers(&fs_info
->submit_workers
, 1);
1803 btrfs_start_workers(&fs_info
->delalloc_workers
, 1);
1804 btrfs_start_workers(&fs_info
->fixup_workers
, 1);
1805 btrfs_start_workers(&fs_info
->endio_workers
, 1);
1806 btrfs_start_workers(&fs_info
->endio_meta_workers
, 1);
1807 btrfs_start_workers(&fs_info
->endio_meta_write_workers
, 1);
1808 btrfs_start_workers(&fs_info
->endio_write_workers
, 1);
1809 btrfs_start_workers(&fs_info
->endio_freespace_worker
, 1);
1811 fs_info
->bdi
.ra_pages
*= btrfs_super_num_devices(disk_super
);
1812 fs_info
->bdi
.ra_pages
= max(fs_info
->bdi
.ra_pages
,
1813 4 * 1024 * 1024 / PAGE_CACHE_SIZE
);
1815 nodesize
= btrfs_super_nodesize(disk_super
);
1816 leafsize
= btrfs_super_leafsize(disk_super
);
1817 sectorsize
= btrfs_super_sectorsize(disk_super
);
1818 stripesize
= btrfs_super_stripesize(disk_super
);
1819 tree_root
->nodesize
= nodesize
;
1820 tree_root
->leafsize
= leafsize
;
1821 tree_root
->sectorsize
= sectorsize
;
1822 tree_root
->stripesize
= stripesize
;
1824 sb
->s_blocksize
= sectorsize
;
1825 sb
->s_blocksize_bits
= blksize_bits(sectorsize
);
1827 if (strncmp((char *)(&disk_super
->magic
), BTRFS_MAGIC
,
1828 sizeof(disk_super
->magic
))) {
1829 printk(KERN_INFO
"btrfs: valid FS not found on %s\n", sb
->s_id
);
1830 goto fail_sb_buffer
;
1833 mutex_lock(&fs_info
->chunk_mutex
);
1834 ret
= btrfs_read_sys_array(tree_root
);
1835 mutex_unlock(&fs_info
->chunk_mutex
);
1837 printk(KERN_WARNING
"btrfs: failed to read the system "
1838 "array on %s\n", sb
->s_id
);
1839 goto fail_sb_buffer
;
1842 blocksize
= btrfs_level_size(tree_root
,
1843 btrfs_super_chunk_root_level(disk_super
));
1844 generation
= btrfs_super_chunk_root_generation(disk_super
);
1846 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
1847 chunk_root
, fs_info
, BTRFS_CHUNK_TREE_OBJECTID
);
1849 chunk_root
->node
= read_tree_block(chunk_root
,
1850 btrfs_super_chunk_root(disk_super
),
1851 blocksize
, generation
);
1852 BUG_ON(!chunk_root
->node
);
1853 if (!test_bit(EXTENT_BUFFER_UPTODATE
, &chunk_root
->node
->bflags
)) {
1854 printk(KERN_WARNING
"btrfs: failed to read chunk root on %s\n",
1856 goto fail_chunk_root
;
1858 btrfs_set_root_node(&chunk_root
->root_item
, chunk_root
->node
);
1859 chunk_root
->commit_root
= btrfs_root_node(chunk_root
);
1861 read_extent_buffer(chunk_root
->node
, fs_info
->chunk_tree_uuid
,
1862 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root
->node
),
1865 mutex_lock(&fs_info
->chunk_mutex
);
1866 ret
= btrfs_read_chunk_tree(chunk_root
);
1867 mutex_unlock(&fs_info
->chunk_mutex
);
1869 printk(KERN_WARNING
"btrfs: failed to read chunk tree on %s\n",
1871 goto fail_chunk_root
;
1874 btrfs_close_extra_devices(fs_devices
);
1876 blocksize
= btrfs_level_size(tree_root
,
1877 btrfs_super_root_level(disk_super
));
1878 generation
= btrfs_super_generation(disk_super
);
1880 tree_root
->node
= read_tree_block(tree_root
,
1881 btrfs_super_root(disk_super
),
1882 blocksize
, generation
);
1883 if (!tree_root
->node
)
1884 goto fail_chunk_root
;
1885 if (!test_bit(EXTENT_BUFFER_UPTODATE
, &tree_root
->node
->bflags
)) {
1886 printk(KERN_WARNING
"btrfs: failed to read tree root on %s\n",
1888 goto fail_tree_root
;
1890 btrfs_set_root_node(&tree_root
->root_item
, tree_root
->node
);
1891 tree_root
->commit_root
= btrfs_root_node(tree_root
);
1893 ret
= find_and_setup_root(tree_root
, fs_info
,
1894 BTRFS_EXTENT_TREE_OBJECTID
, extent_root
);
1896 goto fail_tree_root
;
1897 extent_root
->track_dirty
= 1;
1899 ret
= find_and_setup_root(tree_root
, fs_info
,
1900 BTRFS_DEV_TREE_OBJECTID
, dev_root
);
1902 goto fail_extent_root
;
1903 dev_root
->track_dirty
= 1;
1905 ret
= find_and_setup_root(tree_root
, fs_info
,
1906 BTRFS_CSUM_TREE_OBJECTID
, csum_root
);
1910 csum_root
->track_dirty
= 1;
1912 fs_info
->generation
= generation
;
1913 fs_info
->last_trans_committed
= generation
;
1914 fs_info
->data_alloc_profile
= (u64
)-1;
1915 fs_info
->metadata_alloc_profile
= (u64
)-1;
1916 fs_info
->system_alloc_profile
= fs_info
->metadata_alloc_profile
;
1918 ret
= btrfs_read_block_groups(extent_root
);
1920 printk(KERN_ERR
"Failed to read block groups: %d\n", ret
);
1921 goto fail_block_groups
;
1924 fs_info
->cleaner_kthread
= kthread_run(cleaner_kthread
, tree_root
,
1926 if (IS_ERR(fs_info
->cleaner_kthread
))
1927 goto fail_block_groups
;
1929 fs_info
->transaction_kthread
= kthread_run(transaction_kthread
,
1931 "btrfs-transaction");
1932 if (IS_ERR(fs_info
->transaction_kthread
))
1935 if (!btrfs_test_opt(tree_root
, SSD
) &&
1936 !btrfs_test_opt(tree_root
, NOSSD
) &&
1937 !fs_info
->fs_devices
->rotating
) {
1938 printk(KERN_INFO
"Btrfs detected SSD devices, enabling SSD "
1940 btrfs_set_opt(fs_info
->mount_opt
, SSD
);
1943 if (btrfs_super_log_root(disk_super
) != 0) {
1944 u64 bytenr
= btrfs_super_log_root(disk_super
);
1946 if (fs_devices
->rw_devices
== 0) {
1947 printk(KERN_WARNING
"Btrfs log replay required "
1950 goto fail_trans_kthread
;
1953 btrfs_level_size(tree_root
,
1954 btrfs_super_log_root_level(disk_super
));
1956 log_tree_root
= kzalloc(sizeof(struct btrfs_root
), GFP_NOFS
);
1957 if (!log_tree_root
) {
1959 goto fail_trans_kthread
;
1962 __setup_root(nodesize
, leafsize
, sectorsize
, stripesize
,
1963 log_tree_root
, fs_info
, BTRFS_TREE_LOG_OBJECTID
);
1965 log_tree_root
->node
= read_tree_block(tree_root
, bytenr
,
1968 ret
= btrfs_recover_log_trees(log_tree_root
);
1971 if (sb
->s_flags
& MS_RDONLY
) {
1972 ret
= btrfs_commit_super(tree_root
);
1977 ret
= btrfs_find_orphan_roots(tree_root
);
1980 if (!(sb
->s_flags
& MS_RDONLY
)) {
1981 ret
= btrfs_cleanup_fs_roots(fs_info
);
1984 ret
= btrfs_recover_relocation(tree_root
);
1987 "btrfs: failed to recover relocation\n");
1989 goto fail_trans_kthread
;
1993 location
.objectid
= BTRFS_FS_TREE_OBJECTID
;
1994 location
.type
= BTRFS_ROOT_ITEM_KEY
;
1995 location
.offset
= (u64
)-1;
1997 fs_info
->fs_root
= btrfs_read_fs_root_no_name(fs_info
, &location
);
1998 if (!fs_info
->fs_root
)
1999 goto fail_trans_kthread
;
2000 if (IS_ERR(fs_info
->fs_root
)) {
2001 err
= PTR_ERR(fs_info
->fs_root
);
2002 goto fail_trans_kthread
;
2005 if (!(sb
->s_flags
& MS_RDONLY
)) {
2006 down_read(&fs_info
->cleanup_work_sem
);
2007 btrfs_orphan_cleanup(fs_info
->fs_root
);
2008 btrfs_orphan_cleanup(fs_info
->tree_root
);
2009 up_read(&fs_info
->cleanup_work_sem
);
2015 kthread_stop(fs_info
->transaction_kthread
);
2017 kthread_stop(fs_info
->cleaner_kthread
);
2020 * make sure we're done with the btree inode before we stop our
2023 filemap_write_and_wait(fs_info
->btree_inode
->i_mapping
);
2024 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
2027 btrfs_free_block_groups(fs_info
);
2028 free_extent_buffer(csum_root
->node
);
2029 free_extent_buffer(csum_root
->commit_root
);
2031 free_extent_buffer(dev_root
->node
);
2032 free_extent_buffer(dev_root
->commit_root
);
2034 free_extent_buffer(extent_root
->node
);
2035 free_extent_buffer(extent_root
->commit_root
);
2037 free_extent_buffer(tree_root
->node
);
2038 free_extent_buffer(tree_root
->commit_root
);
2040 free_extent_buffer(chunk_root
->node
);
2041 free_extent_buffer(chunk_root
->commit_root
);
2043 btrfs_stop_workers(&fs_info
->generic_worker
);
2044 btrfs_stop_workers(&fs_info
->fixup_workers
);
2045 btrfs_stop_workers(&fs_info
->delalloc_workers
);
2046 btrfs_stop_workers(&fs_info
->workers
);
2047 btrfs_stop_workers(&fs_info
->endio_workers
);
2048 btrfs_stop_workers(&fs_info
->endio_meta_workers
);
2049 btrfs_stop_workers(&fs_info
->endio_meta_write_workers
);
2050 btrfs_stop_workers(&fs_info
->endio_write_workers
);
2051 btrfs_stop_workers(&fs_info
->endio_freespace_worker
);
2052 btrfs_stop_workers(&fs_info
->submit_workers
);
2054 invalidate_inode_pages2(fs_info
->btree_inode
->i_mapping
);
2055 iput(fs_info
->btree_inode
);
2057 btrfs_close_devices(fs_info
->fs_devices
);
2058 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
2060 bdi_destroy(&fs_info
->bdi
);
2062 cleanup_srcu_struct(&fs_info
->subvol_srcu
);
2070 return ERR_PTR(err
);
2073 static void btrfs_end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
)
2075 char b
[BDEVNAME_SIZE
];
2078 set_buffer_uptodate(bh
);
2080 if (!buffer_eopnotsupp(bh
) && printk_ratelimit()) {
2081 printk(KERN_WARNING
"lost page write due to "
2082 "I/O error on %s\n",
2083 bdevname(bh
->b_bdev
, b
));
2085 /* note, we dont' set_buffer_write_io_error because we have
2086 * our own ways of dealing with the IO errors
2088 clear_buffer_uptodate(bh
);
2094 struct buffer_head
*btrfs_read_dev_super(struct block_device
*bdev
)
2096 struct buffer_head
*bh
;
2097 struct buffer_head
*latest
= NULL
;
2098 struct btrfs_super_block
*super
;
2103 /* we would like to check all the supers, but that would make
2104 * a btrfs mount succeed after a mkfs from a different FS.
2105 * So, we need to add a special mount option to scan for
2106 * later supers, using BTRFS_SUPER_MIRROR_MAX instead
2108 for (i
= 0; i
< 1; i
++) {
2109 bytenr
= btrfs_sb_offset(i
);
2110 if (bytenr
+ 4096 >= i_size_read(bdev
->bd_inode
))
2112 bh
= __bread(bdev
, bytenr
/ 4096, 4096);
2116 super
= (struct btrfs_super_block
*)bh
->b_data
;
2117 if (btrfs_super_bytenr(super
) != bytenr
||
2118 strncmp((char *)(&super
->magic
), BTRFS_MAGIC
,
2119 sizeof(super
->magic
))) {
2124 if (!latest
|| btrfs_super_generation(super
) > transid
) {
2127 transid
= btrfs_super_generation(super
);
2136 * this should be called twice, once with wait == 0 and
2137 * once with wait == 1. When wait == 0 is done, all the buffer heads
2138 * we write are pinned.
2140 * They are released when wait == 1 is done.
2141 * max_mirrors must be the same for both runs, and it indicates how
2142 * many supers on this one device should be written.
2144 * max_mirrors == 0 means to write them all.
2146 static int write_dev_supers(struct btrfs_device
*device
,
2147 struct btrfs_super_block
*sb
,
2148 int do_barriers
, int wait
, int max_mirrors
)
2150 struct buffer_head
*bh
;
2156 int last_barrier
= 0;
2158 if (max_mirrors
== 0)
2159 max_mirrors
= BTRFS_SUPER_MIRROR_MAX
;
2161 /* make sure only the last submit_bh does a barrier */
2163 for (i
= 0; i
< max_mirrors
; i
++) {
2164 bytenr
= btrfs_sb_offset(i
);
2165 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>=
2166 device
->total_bytes
)
2172 for (i
= 0; i
< max_mirrors
; i
++) {
2173 bytenr
= btrfs_sb_offset(i
);
2174 if (bytenr
+ BTRFS_SUPER_INFO_SIZE
>= device
->total_bytes
)
2178 bh
= __find_get_block(device
->bdev
, bytenr
/ 4096,
2179 BTRFS_SUPER_INFO_SIZE
);
2182 if (!buffer_uptodate(bh
))
2185 /* drop our reference */
2188 /* drop the reference from the wait == 0 run */
2192 btrfs_set_super_bytenr(sb
, bytenr
);
2195 crc
= btrfs_csum_data(NULL
, (char *)sb
+
2196 BTRFS_CSUM_SIZE
, crc
,
2197 BTRFS_SUPER_INFO_SIZE
-
2199 btrfs_csum_final(crc
, sb
->csum
);
2202 * one reference for us, and we leave it for the
2205 bh
= __getblk(device
->bdev
, bytenr
/ 4096,
2206 BTRFS_SUPER_INFO_SIZE
);
2207 memcpy(bh
->b_data
, sb
, BTRFS_SUPER_INFO_SIZE
);
2209 /* one reference for submit_bh */
2212 set_buffer_uptodate(bh
);
2214 bh
->b_end_io
= btrfs_end_buffer_write_sync
;
2217 if (i
== last_barrier
&& do_barriers
&& device
->barriers
) {
2218 ret
= submit_bh(WRITE_BARRIER
, bh
);
2219 if (ret
== -EOPNOTSUPP
) {
2220 printk("btrfs: disabling barriers on dev %s\n",
2222 set_buffer_uptodate(bh
);
2223 device
->barriers
= 0;
2224 /* one reference for submit_bh */
2227 ret
= submit_bh(WRITE_SYNC
, bh
);
2230 ret
= submit_bh(WRITE_SYNC
, bh
);
2236 return errors
< i
? 0 : -1;
2239 int write_all_supers(struct btrfs_root
*root
, int max_mirrors
)
2241 struct list_head
*head
;
2242 struct btrfs_device
*dev
;
2243 struct btrfs_super_block
*sb
;
2244 struct btrfs_dev_item
*dev_item
;
2248 int total_errors
= 0;
2251 max_errors
= btrfs_super_num_devices(&root
->fs_info
->super_copy
) - 1;
2252 do_barriers
= !btrfs_test_opt(root
, NOBARRIER
);
2254 sb
= &root
->fs_info
->super_for_commit
;
2255 dev_item
= &sb
->dev_item
;
2257 mutex_lock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2258 head
= &root
->fs_info
->fs_devices
->devices
;
2259 list_for_each_entry(dev
, head
, dev_list
) {
2264 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
2267 btrfs_set_stack_device_generation(dev_item
, 0);
2268 btrfs_set_stack_device_type(dev_item
, dev
->type
);
2269 btrfs_set_stack_device_id(dev_item
, dev
->devid
);
2270 btrfs_set_stack_device_total_bytes(dev_item
, dev
->total_bytes
);
2271 btrfs_set_stack_device_bytes_used(dev_item
, dev
->bytes_used
);
2272 btrfs_set_stack_device_io_align(dev_item
, dev
->io_align
);
2273 btrfs_set_stack_device_io_width(dev_item
, dev
->io_width
);
2274 btrfs_set_stack_device_sector_size(dev_item
, dev
->sector_size
);
2275 memcpy(dev_item
->uuid
, dev
->uuid
, BTRFS_UUID_SIZE
);
2276 memcpy(dev_item
->fsid
, dev
->fs_devices
->fsid
, BTRFS_UUID_SIZE
);
2278 flags
= btrfs_super_flags(sb
);
2279 btrfs_set_super_flags(sb
, flags
| BTRFS_HEADER_FLAG_WRITTEN
);
2281 ret
= write_dev_supers(dev
, sb
, do_barriers
, 0, max_mirrors
);
2285 if (total_errors
> max_errors
) {
2286 printk(KERN_ERR
"btrfs: %d errors while writing supers\n",
2292 list_for_each_entry(dev
, head
, dev_list
) {
2295 if (!dev
->in_fs_metadata
|| !dev
->writeable
)
2298 ret
= write_dev_supers(dev
, sb
, do_barriers
, 1, max_mirrors
);
2302 mutex_unlock(&root
->fs_info
->fs_devices
->device_list_mutex
);
2303 if (total_errors
> max_errors
) {
2304 printk(KERN_ERR
"btrfs: %d errors while writing supers\n",
2311 int write_ctree_super(struct btrfs_trans_handle
*trans
,
2312 struct btrfs_root
*root
, int max_mirrors
)
2316 ret
= write_all_supers(root
, max_mirrors
);
2320 int btrfs_free_fs_root(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
2322 spin_lock(&fs_info
->fs_roots_radix_lock
);
2323 radix_tree_delete(&fs_info
->fs_roots_radix
,
2324 (unsigned long)root
->root_key
.objectid
);
2325 spin_unlock(&fs_info
->fs_roots_radix_lock
);
2327 if (btrfs_root_refs(&root
->root_item
) == 0)
2328 synchronize_srcu(&fs_info
->subvol_srcu
);
2334 static void free_fs_root(struct btrfs_root
*root
)
2336 WARN_ON(!RB_EMPTY_ROOT(&root
->inode_tree
));
2337 if (root
->anon_super
.s_dev
) {
2338 down_write(&root
->anon_super
.s_umount
);
2339 kill_anon_super(&root
->anon_super
);
2341 free_extent_buffer(root
->node
);
2342 free_extent_buffer(root
->commit_root
);
2347 static int del_fs_roots(struct btrfs_fs_info
*fs_info
)
2350 struct btrfs_root
*gang
[8];
2353 while (!list_empty(&fs_info
->dead_roots
)) {
2354 gang
[0] = list_entry(fs_info
->dead_roots
.next
,
2355 struct btrfs_root
, root_list
);
2356 list_del(&gang
[0]->root_list
);
2358 if (gang
[0]->in_radix
) {
2359 btrfs_free_fs_root(fs_info
, gang
[0]);
2361 free_extent_buffer(gang
[0]->node
);
2362 free_extent_buffer(gang
[0]->commit_root
);
2368 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
2373 for (i
= 0; i
< ret
; i
++)
2374 btrfs_free_fs_root(fs_info
, gang
[i
]);
2379 int btrfs_cleanup_fs_roots(struct btrfs_fs_info
*fs_info
)
2381 u64 root_objectid
= 0;
2382 struct btrfs_root
*gang
[8];
2387 ret
= radix_tree_gang_lookup(&fs_info
->fs_roots_radix
,
2388 (void **)gang
, root_objectid
,
2393 root_objectid
= gang
[ret
- 1]->root_key
.objectid
+ 1;
2394 for (i
= 0; i
< ret
; i
++) {
2395 root_objectid
= gang
[i
]->root_key
.objectid
;
2396 btrfs_orphan_cleanup(gang
[i
]);
2403 int btrfs_commit_super(struct btrfs_root
*root
)
2405 struct btrfs_trans_handle
*trans
;
2408 mutex_lock(&root
->fs_info
->cleaner_mutex
);
2409 btrfs_run_delayed_iputs(root
);
2410 btrfs_clean_old_snapshots(root
);
2411 mutex_unlock(&root
->fs_info
->cleaner_mutex
);
2413 /* wait until ongoing cleanup work done */
2414 down_write(&root
->fs_info
->cleanup_work_sem
);
2415 up_write(&root
->fs_info
->cleanup_work_sem
);
2417 trans
= btrfs_join_transaction(root
, 1);
2418 ret
= btrfs_commit_transaction(trans
, root
);
2420 /* run commit again to drop the original snapshot */
2421 trans
= btrfs_join_transaction(root
, 1);
2422 btrfs_commit_transaction(trans
, root
);
2423 ret
= btrfs_write_and_wait_transaction(NULL
, root
);
2426 ret
= write_ctree_super(NULL
, root
, 0);
2430 int close_ctree(struct btrfs_root
*root
)
2432 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2435 fs_info
->closing
= 1;
2438 btrfs_put_block_group_cache(fs_info
);
2439 if (!(fs_info
->sb
->s_flags
& MS_RDONLY
)) {
2440 ret
= btrfs_commit_super(root
);
2442 printk(KERN_ERR
"btrfs: commit super ret %d\n", ret
);
2445 kthread_stop(root
->fs_info
->transaction_kthread
);
2446 kthread_stop(root
->fs_info
->cleaner_kthread
);
2448 fs_info
->closing
= 2;
2451 if (fs_info
->delalloc_bytes
) {
2452 printk(KERN_INFO
"btrfs: at unmount delalloc count %llu\n",
2453 (unsigned long long)fs_info
->delalloc_bytes
);
2455 if (fs_info
->total_ref_cache_size
) {
2456 printk(KERN_INFO
"btrfs: at umount reference cache size %llu\n",
2457 (unsigned long long)fs_info
->total_ref_cache_size
);
2460 free_extent_buffer(fs_info
->extent_root
->node
);
2461 free_extent_buffer(fs_info
->extent_root
->commit_root
);
2462 free_extent_buffer(fs_info
->tree_root
->node
);
2463 free_extent_buffer(fs_info
->tree_root
->commit_root
);
2464 free_extent_buffer(root
->fs_info
->chunk_root
->node
);
2465 free_extent_buffer(root
->fs_info
->chunk_root
->commit_root
);
2466 free_extent_buffer(root
->fs_info
->dev_root
->node
);
2467 free_extent_buffer(root
->fs_info
->dev_root
->commit_root
);
2468 free_extent_buffer(root
->fs_info
->csum_root
->node
);
2469 free_extent_buffer(root
->fs_info
->csum_root
->commit_root
);
2471 btrfs_free_block_groups(root
->fs_info
);
2473 del_fs_roots(fs_info
);
2475 iput(fs_info
->btree_inode
);
2477 btrfs_stop_workers(&fs_info
->generic_worker
);
2478 btrfs_stop_workers(&fs_info
->fixup_workers
);
2479 btrfs_stop_workers(&fs_info
->delalloc_workers
);
2480 btrfs_stop_workers(&fs_info
->workers
);
2481 btrfs_stop_workers(&fs_info
->endio_workers
);
2482 btrfs_stop_workers(&fs_info
->endio_meta_workers
);
2483 btrfs_stop_workers(&fs_info
->endio_meta_write_workers
);
2484 btrfs_stop_workers(&fs_info
->endio_write_workers
);
2485 btrfs_stop_workers(&fs_info
->endio_freespace_worker
);
2486 btrfs_stop_workers(&fs_info
->submit_workers
);
2488 btrfs_close_devices(fs_info
->fs_devices
);
2489 btrfs_mapping_tree_free(&fs_info
->mapping_tree
);
2491 bdi_destroy(&fs_info
->bdi
);
2492 cleanup_srcu_struct(&fs_info
->subvol_srcu
);
2494 kfree(fs_info
->extent_root
);
2495 kfree(fs_info
->tree_root
);
2496 kfree(fs_info
->chunk_root
);
2497 kfree(fs_info
->dev_root
);
2498 kfree(fs_info
->csum_root
);
2502 int btrfs_buffer_uptodate(struct extent_buffer
*buf
, u64 parent_transid
)
2505 struct inode
*btree_inode
= buf
->first_page
->mapping
->host
;
2507 ret
= extent_buffer_uptodate(&BTRFS_I(btree_inode
)->io_tree
, buf
,
2512 ret
= verify_parent_transid(&BTRFS_I(btree_inode
)->io_tree
, buf
,
2517 int btrfs_set_buffer_uptodate(struct extent_buffer
*buf
)
2519 struct inode
*btree_inode
= buf
->first_page
->mapping
->host
;
2520 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode
)->io_tree
,
2524 void btrfs_mark_buffer_dirty(struct extent_buffer
*buf
)
2526 struct btrfs_root
*root
= BTRFS_I(buf
->first_page
->mapping
->host
)->root
;
2527 u64 transid
= btrfs_header_generation(buf
);
2528 struct inode
*btree_inode
= root
->fs_info
->btree_inode
;
2531 btrfs_assert_tree_locked(buf
);
2532 if (transid
!= root
->fs_info
->generation
) {
2533 printk(KERN_CRIT
"btrfs transid mismatch buffer %llu, "
2534 "found %llu running %llu\n",
2535 (unsigned long long)buf
->start
,
2536 (unsigned long long)transid
,
2537 (unsigned long long)root
->fs_info
->generation
);
2540 was_dirty
= set_extent_buffer_dirty(&BTRFS_I(btree_inode
)->io_tree
,
2543 spin_lock(&root
->fs_info
->delalloc_lock
);
2544 root
->fs_info
->dirty_metadata_bytes
+= buf
->len
;
2545 spin_unlock(&root
->fs_info
->delalloc_lock
);
2549 void btrfs_btree_balance_dirty(struct btrfs_root
*root
, unsigned long nr
)
2552 * looks as though older kernels can get into trouble with
2553 * this code, they end up stuck in balance_dirty_pages forever
2556 unsigned long thresh
= 32 * 1024 * 1024;
2558 if (current
->flags
& PF_MEMALLOC
)
2561 num_dirty
= root
->fs_info
->dirty_metadata_bytes
;
2563 if (num_dirty
> thresh
) {
2564 balance_dirty_pages_ratelimited_nr(
2565 root
->fs_info
->btree_inode
->i_mapping
, 1);
2570 int btrfs_read_buffer(struct extent_buffer
*buf
, u64 parent_transid
)
2572 struct btrfs_root
*root
= BTRFS_I(buf
->first_page
->mapping
->host
)->root
;
2574 ret
= btree_read_extent_buffer_pages(root
, buf
, 0, parent_transid
);
2576 set_bit(EXTENT_BUFFER_UPTODATE
, &buf
->bflags
);
2580 int btree_lock_page_hook(struct page
*page
)
2582 struct inode
*inode
= page
->mapping
->host
;
2583 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2584 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
2585 struct extent_buffer
*eb
;
2587 u64 bytenr
= page_offset(page
);
2589 if (page
->private == EXTENT_PAGE_PRIVATE
)
2592 len
= page
->private >> 2;
2593 eb
= find_extent_buffer(io_tree
, bytenr
, len
, GFP_NOFS
);
2597 btrfs_tree_lock(eb
);
2598 btrfs_set_header_flag(eb
, BTRFS_HEADER_FLAG_WRITTEN
);
2600 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY
, &eb
->bflags
)) {
2601 spin_lock(&root
->fs_info
->delalloc_lock
);
2602 if (root
->fs_info
->dirty_metadata_bytes
>= eb
->len
)
2603 root
->fs_info
->dirty_metadata_bytes
-= eb
->len
;
2606 spin_unlock(&root
->fs_info
->delalloc_lock
);
2609 btrfs_tree_unlock(eb
);
2610 free_extent_buffer(eb
);
2616 static struct extent_io_ops btree_extent_io_ops
= {
2617 .write_cache_pages_lock_hook
= btree_lock_page_hook
,
2618 .readpage_end_io_hook
= btree_readpage_end_io_hook
,
2619 .submit_bio_hook
= btree_submit_bio_hook
,
2620 /* note we're sharing with inode.c for the merge bio hook */
2621 .merge_bio_hook
= btrfs_merge_bio_hook
,