Btrfs: Use async helpers to deal with pages that have been improperly dirtied
[deliverable/linux.git] / fs / btrfs / disk-io.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/version.h>
20 #include <linux/fs.h>
21 #include <linux/blkdev.h>
22 #include <linux/scatterlist.h>
23 #include <linux/swap.h>
24 #include <linux/radix-tree.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h> // for block_sync_page
27 #include <linux/workqueue.h>
28 #include <linux/kthread.h>
29 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
30 # include <linux/freezer.h>
31 #else
32 # include <linux/sched.h>
33 #endif
34 #include "crc32c.h"
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
42 #include "locking.h"
43
44 #if 0
45 static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
46 {
47 if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
48 printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
49 (unsigned long long)extent_buffer_blocknr(buf),
50 (unsigned long long)btrfs_header_blocknr(buf));
51 return 1;
52 }
53 return 0;
54 }
55 #endif
56
57 static struct extent_io_ops btree_extent_io_ops;
58 static void end_workqueue_fn(struct btrfs_work *work);
59
60 struct end_io_wq {
61 struct bio *bio;
62 bio_end_io_t *end_io;
63 void *private;
64 struct btrfs_fs_info *info;
65 int error;
66 int metadata;
67 struct list_head list;
68 struct btrfs_work work;
69 };
70
71 struct async_submit_bio {
72 struct inode *inode;
73 struct bio *bio;
74 struct list_head list;
75 extent_submit_bio_hook_t *submit_bio_hook;
76 int rw;
77 int mirror_num;
78 struct btrfs_work work;
79 };
80
81 struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
82 size_t page_offset, u64 start, u64 len,
83 int create)
84 {
85 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
86 struct extent_map *em;
87 int ret;
88
89 spin_lock(&em_tree->lock);
90 em = lookup_extent_mapping(em_tree, start, len);
91 if (em) {
92 em->bdev =
93 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
94 spin_unlock(&em_tree->lock);
95 goto out;
96 }
97 spin_unlock(&em_tree->lock);
98
99 em = alloc_extent_map(GFP_NOFS);
100 if (!em) {
101 em = ERR_PTR(-ENOMEM);
102 goto out;
103 }
104 em->start = 0;
105 em->len = (u64)-1;
106 em->block_start = 0;
107 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
108
109 spin_lock(&em_tree->lock);
110 ret = add_extent_mapping(em_tree, em);
111 if (ret == -EEXIST) {
112 u64 failed_start = em->start;
113 u64 failed_len = em->len;
114
115 printk("failed to insert %Lu %Lu -> %Lu into tree\n",
116 em->start, em->len, em->block_start);
117 free_extent_map(em);
118 em = lookup_extent_mapping(em_tree, start, len);
119 if (em) {
120 printk("after failing, found %Lu %Lu %Lu\n",
121 em->start, em->len, em->block_start);
122 ret = 0;
123 } else {
124 em = lookup_extent_mapping(em_tree, failed_start,
125 failed_len);
126 if (em) {
127 printk("double failure lookup gives us "
128 "%Lu %Lu -> %Lu\n", em->start,
129 em->len, em->block_start);
130 free_extent_map(em);
131 }
132 ret = -EIO;
133 }
134 } else if (ret) {
135 free_extent_map(em);
136 em = NULL;
137 }
138 spin_unlock(&em_tree->lock);
139
140 if (ret)
141 em = ERR_PTR(ret);
142 out:
143 return em;
144 }
145
146 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
147 {
148 return btrfs_crc32c(seed, data, len);
149 }
150
151 void btrfs_csum_final(u32 crc, char *result)
152 {
153 *(__le32 *)result = ~cpu_to_le32(crc);
154 }
155
156 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
157 int verify)
158 {
159 char result[BTRFS_CRC32_SIZE];
160 unsigned long len;
161 unsigned long cur_len;
162 unsigned long offset = BTRFS_CSUM_SIZE;
163 char *map_token = NULL;
164 char *kaddr;
165 unsigned long map_start;
166 unsigned long map_len;
167 int err;
168 u32 crc = ~(u32)0;
169
170 len = buf->len - offset;
171 while(len > 0) {
172 err = map_private_extent_buffer(buf, offset, 32,
173 &map_token, &kaddr,
174 &map_start, &map_len, KM_USER0);
175 if (err) {
176 printk("failed to map extent buffer! %lu\n",
177 offset);
178 return 1;
179 }
180 cur_len = min(len, map_len - (offset - map_start));
181 crc = btrfs_csum_data(root, kaddr + offset - map_start,
182 crc, cur_len);
183 len -= cur_len;
184 offset += cur_len;
185 unmap_extent_buffer(buf, map_token, KM_USER0);
186 }
187 btrfs_csum_final(crc, result);
188
189 if (verify) {
190 int from_this_trans = 0;
191
192 if (root->fs_info->running_transaction &&
193 btrfs_header_generation(buf) ==
194 root->fs_info->running_transaction->transid)
195 from_this_trans = 1;
196
197 /* FIXME, this is not good */
198 if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
199 u32 val;
200 u32 found = 0;
201 memcpy(&found, result, BTRFS_CRC32_SIZE);
202
203 read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
204 printk("btrfs: %s checksum verify failed on %llu "
205 "wanted %X found %X from_this_trans %d "
206 "level %d\n",
207 root->fs_info->sb->s_id,
208 buf->start, val, found, from_this_trans,
209 btrfs_header_level(buf));
210 return 1;
211 }
212 } else {
213 write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
214 }
215 return 0;
216 }
217
218 static int verify_parent_transid(struct extent_io_tree *io_tree,
219 struct extent_buffer *eb, u64 parent_transid)
220 {
221 int ret;
222
223 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
224 return 0;
225
226 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
227 if (extent_buffer_uptodate(io_tree, eb) &&
228 btrfs_header_generation(eb) == parent_transid) {
229 ret = 0;
230 goto out;
231 }
232 printk("parent transid verify failed on %llu wanted %llu found %llu\n",
233 (unsigned long long)eb->start,
234 (unsigned long long)parent_transid,
235 (unsigned long long)btrfs_header_generation(eb));
236 ret = 1;
237 out:
238 clear_extent_buffer_uptodate(io_tree, eb);
239 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
240 GFP_NOFS);
241 return ret;
242
243 }
244
245 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
246 struct extent_buffer *eb,
247 u64 start, u64 parent_transid)
248 {
249 struct extent_io_tree *io_tree;
250 int ret;
251 int num_copies = 0;
252 int mirror_num = 0;
253
254 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
255 while (1) {
256 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
257 btree_get_extent, mirror_num);
258 if (!ret &&
259 !verify_parent_transid(io_tree, eb, parent_transid))
260 return ret;
261
262 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
263 eb->start, eb->len);
264 if (num_copies == 1)
265 return ret;
266
267 mirror_num++;
268 if (mirror_num > num_copies)
269 return ret;
270 }
271 return -EIO;
272 }
273
274 int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
275 {
276 struct extent_io_tree *tree;
277 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
278 u64 found_start;
279 int found_level;
280 unsigned long len;
281 struct extent_buffer *eb;
282 int ret;
283
284 tree = &BTRFS_I(page->mapping->host)->io_tree;
285
286 if (page->private == EXTENT_PAGE_PRIVATE)
287 goto out;
288 if (!page->private)
289 goto out;
290 len = page->private >> 2;
291 if (len == 0) {
292 WARN_ON(1);
293 }
294 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
295 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
296 btrfs_header_generation(eb));
297 BUG_ON(ret);
298 found_start = btrfs_header_bytenr(eb);
299 if (found_start != start) {
300 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
301 start, found_start, len);
302 WARN_ON(1);
303 goto err;
304 }
305 if (eb->first_page != page) {
306 printk("bad first page %lu %lu\n", eb->first_page->index,
307 page->index);
308 WARN_ON(1);
309 goto err;
310 }
311 if (!PageUptodate(page)) {
312 printk("csum not up to date page %lu\n", page->index);
313 WARN_ON(1);
314 goto err;
315 }
316 found_level = btrfs_header_level(eb);
317 spin_lock(&root->fs_info->hash_lock);
318 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
319 spin_unlock(&root->fs_info->hash_lock);
320 csum_tree_block(root, eb, 0);
321 err:
322 free_extent_buffer(eb);
323 out:
324 return 0;
325 }
326
327 static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
328 {
329 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
330
331 csum_dirty_buffer(root, page);
332 return 0;
333 }
334
335 int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
336 struct extent_state *state)
337 {
338 struct extent_io_tree *tree;
339 u64 found_start;
340 int found_level;
341 unsigned long len;
342 struct extent_buffer *eb;
343 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
344 int ret = 0;
345
346 tree = &BTRFS_I(page->mapping->host)->io_tree;
347 if (page->private == EXTENT_PAGE_PRIVATE)
348 goto out;
349 if (!page->private)
350 goto out;
351 len = page->private >> 2;
352 if (len == 0) {
353 WARN_ON(1);
354 }
355 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
356
357 found_start = btrfs_header_bytenr(eb);
358 if (found_start != start) {
359 ret = -EIO;
360 goto err;
361 }
362 if (eb->first_page != page) {
363 printk("bad first page %lu %lu\n", eb->first_page->index,
364 page->index);
365 WARN_ON(1);
366 ret = -EIO;
367 goto err;
368 }
369 if (memcmp_extent_buffer(eb, root->fs_info->fsid,
370 (unsigned long)btrfs_header_fsid(eb),
371 BTRFS_FSID_SIZE)) {
372 printk("bad fsid on block %Lu\n", eb->start);
373 ret = -EIO;
374 goto err;
375 }
376 found_level = btrfs_header_level(eb);
377
378 ret = csum_tree_block(root, eb, 1);
379 if (ret)
380 ret = -EIO;
381
382 end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
383 end = eb->start + end - 1;
384 release_extent_buffer_tail_pages(eb);
385 err:
386 free_extent_buffer(eb);
387 out:
388 return ret;
389 }
390
391 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
392 static void end_workqueue_bio(struct bio *bio, int err)
393 #else
394 static int end_workqueue_bio(struct bio *bio,
395 unsigned int bytes_done, int err)
396 #endif
397 {
398 struct end_io_wq *end_io_wq = bio->bi_private;
399 struct btrfs_fs_info *fs_info;
400
401 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
402 if (bio->bi_size)
403 return 1;
404 #endif
405
406 fs_info = end_io_wq->info;
407 end_io_wq->error = err;
408 end_io_wq->work.func = end_workqueue_fn;
409 end_io_wq->work.flags = 0;
410 if (bio->bi_rw & (1 << BIO_RW))
411 btrfs_queue_worker(&fs_info->endio_write_workers,
412 &end_io_wq->work);
413 else
414 btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
415
416 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
417 return 0;
418 #endif
419 }
420
421 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
422 int metadata)
423 {
424 struct end_io_wq *end_io_wq;
425 end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
426 if (!end_io_wq)
427 return -ENOMEM;
428
429 end_io_wq->private = bio->bi_private;
430 end_io_wq->end_io = bio->bi_end_io;
431 end_io_wq->info = info;
432 end_io_wq->error = 0;
433 end_io_wq->bio = bio;
434 end_io_wq->metadata = metadata;
435
436 bio->bi_private = end_io_wq;
437 bio->bi_end_io = end_workqueue_bio;
438 return 0;
439 }
440
441 static void run_one_async_submit(struct btrfs_work *work)
442 {
443 struct btrfs_fs_info *fs_info;
444 struct async_submit_bio *async;
445
446 async = container_of(work, struct async_submit_bio, work);
447 fs_info = BTRFS_I(async->inode)->root->fs_info;
448 atomic_dec(&fs_info->nr_async_submits);
449 async->submit_bio_hook(async->inode, async->rw, async->bio,
450 async->mirror_num);
451 kfree(async);
452 }
453
454 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
455 int rw, struct bio *bio, int mirror_num,
456 extent_submit_bio_hook_t *submit_bio_hook)
457 {
458 struct async_submit_bio *async;
459
460 async = kmalloc(sizeof(*async), GFP_NOFS);
461 if (!async)
462 return -ENOMEM;
463
464 async->inode = inode;
465 async->rw = rw;
466 async->bio = bio;
467 async->mirror_num = mirror_num;
468 async->submit_bio_hook = submit_bio_hook;
469 async->work.func = run_one_async_submit;
470 async->work.flags = 0;
471 atomic_inc(&fs_info->nr_async_submits);
472 btrfs_queue_worker(&fs_info->workers, &async->work);
473 return 0;
474 }
475
476 static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
477 int mirror_num)
478 {
479 struct btrfs_root *root = BTRFS_I(inode)->root;
480 u64 offset;
481 int ret;
482
483 offset = bio->bi_sector << 9;
484
485 /*
486 * when we're called for a write, we're already in the async
487 * submission context. Just jump ingo btrfs_map_bio
488 */
489 if (rw & (1 << BIO_RW)) {
490 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
491 mirror_num, 0);
492 }
493
494 /*
495 * called for a read, do the setup so that checksum validation
496 * can happen in the async kernel threads
497 */
498 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
499 BUG_ON(ret);
500
501 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
502 }
503
504 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
505 int mirror_num)
506 {
507 /*
508 * kthread helpers are used to submit writes so that checksumming
509 * can happen in parallel across all CPUs
510 */
511 if (!(rw & (1 << BIO_RW))) {
512 return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
513 }
514 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
515 inode, rw, bio, mirror_num,
516 __btree_submit_bio_hook);
517 }
518
519 static int btree_writepage(struct page *page, struct writeback_control *wbc)
520 {
521 struct extent_io_tree *tree;
522 tree = &BTRFS_I(page->mapping->host)->io_tree;
523 return extent_write_full_page(tree, page, btree_get_extent, wbc);
524 }
525
526 static int btree_writepages(struct address_space *mapping,
527 struct writeback_control *wbc)
528 {
529 struct extent_io_tree *tree;
530 tree = &BTRFS_I(mapping->host)->io_tree;
531 if (wbc->sync_mode == WB_SYNC_NONE) {
532 u64 num_dirty;
533 u64 start = 0;
534 unsigned long thresh = 96 * 1024 * 1024;
535
536 if (wbc->for_kupdate)
537 return 0;
538
539 if (current_is_pdflush()) {
540 thresh = 96 * 1024 * 1024;
541 } else {
542 thresh = 8 * 1024 * 1024;
543 }
544 num_dirty = count_range_bits(tree, &start, (u64)-1,
545 thresh, EXTENT_DIRTY);
546 if (num_dirty < thresh) {
547 return 0;
548 }
549 }
550 return extent_writepages(tree, mapping, btree_get_extent, wbc);
551 }
552
553 int btree_readpage(struct file *file, struct page *page)
554 {
555 struct extent_io_tree *tree;
556 tree = &BTRFS_I(page->mapping->host)->io_tree;
557 return extent_read_full_page(tree, page, btree_get_extent);
558 }
559
560 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
561 {
562 struct extent_io_tree *tree;
563 struct extent_map_tree *map;
564 int ret;
565
566 if (page_count(page) > 3) {
567 /* once for page->private, once for the caller, once
568 * once for the page cache
569 */
570 return 0;
571 }
572 tree = &BTRFS_I(page->mapping->host)->io_tree;
573 map = &BTRFS_I(page->mapping->host)->extent_tree;
574 ret = try_release_extent_state(map, tree, page, gfp_flags);
575 if (ret == 1) {
576 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
577 ClearPagePrivate(page);
578 set_page_private(page, 0);
579 page_cache_release(page);
580 }
581 return ret;
582 }
583
584 static void btree_invalidatepage(struct page *page, unsigned long offset)
585 {
586 struct extent_io_tree *tree;
587 tree = &BTRFS_I(page->mapping->host)->io_tree;
588 extent_invalidatepage(tree, page, offset);
589 btree_releasepage(page, GFP_NOFS);
590 if (PagePrivate(page)) {
591 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
592 ClearPagePrivate(page);
593 set_page_private(page, 0);
594 page_cache_release(page);
595 }
596 }
597
598 #if 0
599 static int btree_writepage(struct page *page, struct writeback_control *wbc)
600 {
601 struct buffer_head *bh;
602 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
603 struct buffer_head *head;
604 if (!page_has_buffers(page)) {
605 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
606 (1 << BH_Dirty)|(1 << BH_Uptodate));
607 }
608 head = page_buffers(page);
609 bh = head;
610 do {
611 if (buffer_dirty(bh))
612 csum_tree_block(root, bh, 0);
613 bh = bh->b_this_page;
614 } while (bh != head);
615 return block_write_full_page(page, btree_get_block, wbc);
616 }
617 #endif
618
619 static struct address_space_operations btree_aops = {
620 .readpage = btree_readpage,
621 .writepage = btree_writepage,
622 .writepages = btree_writepages,
623 .releasepage = btree_releasepage,
624 .invalidatepage = btree_invalidatepage,
625 .sync_page = block_sync_page,
626 };
627
628 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
629 u64 parent_transid)
630 {
631 struct extent_buffer *buf = NULL;
632 struct inode *btree_inode = root->fs_info->btree_inode;
633 int ret = 0;
634
635 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
636 if (!buf)
637 return 0;
638 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
639 buf, 0, 0, btree_get_extent, 0);
640 free_extent_buffer(buf);
641 return ret;
642 }
643
644 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
645 u64 bytenr, u32 blocksize)
646 {
647 struct inode *btree_inode = root->fs_info->btree_inode;
648 struct extent_buffer *eb;
649 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
650 bytenr, blocksize, GFP_NOFS);
651 return eb;
652 }
653
654 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
655 u64 bytenr, u32 blocksize)
656 {
657 struct inode *btree_inode = root->fs_info->btree_inode;
658 struct extent_buffer *eb;
659
660 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
661 bytenr, blocksize, NULL, GFP_NOFS);
662 return eb;
663 }
664
665
666 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
667 u32 blocksize, u64 parent_transid)
668 {
669 struct extent_buffer *buf = NULL;
670 struct inode *btree_inode = root->fs_info->btree_inode;
671 struct extent_io_tree *io_tree;
672 int ret;
673
674 io_tree = &BTRFS_I(btree_inode)->io_tree;
675
676 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
677 if (!buf)
678 return NULL;
679
680 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
681
682 if (ret == 0) {
683 buf->flags |= EXTENT_UPTODATE;
684 }
685 return buf;
686
687 }
688
689 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
690 struct extent_buffer *buf)
691 {
692 struct inode *btree_inode = root->fs_info->btree_inode;
693 if (btrfs_header_generation(buf) ==
694 root->fs_info->running_transaction->transid) {
695 WARN_ON(!btrfs_tree_locked(buf));
696 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
697 buf);
698 }
699 return 0;
700 }
701
702 int wait_on_tree_block_writeback(struct btrfs_root *root,
703 struct extent_buffer *buf)
704 {
705 struct inode *btree_inode = root->fs_info->btree_inode;
706 wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->io_tree,
707 buf);
708 return 0;
709 }
710
711 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
712 u32 stripesize, struct btrfs_root *root,
713 struct btrfs_fs_info *fs_info,
714 u64 objectid)
715 {
716 root->node = NULL;
717 root->inode = NULL;
718 root->commit_root = NULL;
719 root->sectorsize = sectorsize;
720 root->nodesize = nodesize;
721 root->leafsize = leafsize;
722 root->stripesize = stripesize;
723 root->ref_cows = 0;
724 root->track_dirty = 0;
725
726 root->fs_info = fs_info;
727 root->objectid = objectid;
728 root->last_trans = 0;
729 root->highest_inode = 0;
730 root->last_inode_alloc = 0;
731 root->name = NULL;
732 root->in_sysfs = 0;
733
734 INIT_LIST_HEAD(&root->dirty_list);
735 spin_lock_init(&root->node_lock);
736 mutex_init(&root->objectid_mutex);
737 memset(&root->root_key, 0, sizeof(root->root_key));
738 memset(&root->root_item, 0, sizeof(root->root_item));
739 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
740 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
741 root->defrag_trans_start = fs_info->generation;
742 init_completion(&root->kobj_unregister);
743 root->defrag_running = 0;
744 root->defrag_level = 0;
745 root->root_key.objectid = objectid;
746 return 0;
747 }
748
749 static int find_and_setup_root(struct btrfs_root *tree_root,
750 struct btrfs_fs_info *fs_info,
751 u64 objectid,
752 struct btrfs_root *root)
753 {
754 int ret;
755 u32 blocksize;
756
757 __setup_root(tree_root->nodesize, tree_root->leafsize,
758 tree_root->sectorsize, tree_root->stripesize,
759 root, fs_info, objectid);
760 ret = btrfs_find_last_root(tree_root, objectid,
761 &root->root_item, &root->root_key);
762 BUG_ON(ret);
763
764 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
765 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
766 blocksize, 0);
767 BUG_ON(!root->node);
768 return 0;
769 }
770
771 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_fs_info *fs_info,
772 struct btrfs_key *location)
773 {
774 struct btrfs_root *root;
775 struct btrfs_root *tree_root = fs_info->tree_root;
776 struct btrfs_path *path;
777 struct extent_buffer *l;
778 u64 highest_inode;
779 u32 blocksize;
780 int ret = 0;
781
782 root = kzalloc(sizeof(*root), GFP_NOFS);
783 if (!root)
784 return ERR_PTR(-ENOMEM);
785 if (location->offset == (u64)-1) {
786 ret = find_and_setup_root(tree_root, fs_info,
787 location->objectid, root);
788 if (ret) {
789 kfree(root);
790 return ERR_PTR(ret);
791 }
792 goto insert;
793 }
794
795 __setup_root(tree_root->nodesize, tree_root->leafsize,
796 tree_root->sectorsize, tree_root->stripesize,
797 root, fs_info, location->objectid);
798
799 path = btrfs_alloc_path();
800 BUG_ON(!path);
801 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
802 if (ret != 0) {
803 if (ret > 0)
804 ret = -ENOENT;
805 goto out;
806 }
807 l = path->nodes[0];
808 read_extent_buffer(l, &root->root_item,
809 btrfs_item_ptr_offset(l, path->slots[0]),
810 sizeof(root->root_item));
811 memcpy(&root->root_key, location, sizeof(*location));
812 ret = 0;
813 out:
814 btrfs_release_path(root, path);
815 btrfs_free_path(path);
816 if (ret) {
817 kfree(root);
818 return ERR_PTR(ret);
819 }
820 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
821 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
822 blocksize, 0);
823 BUG_ON(!root->node);
824 insert:
825 root->ref_cows = 1;
826 ret = btrfs_find_highest_inode(root, &highest_inode);
827 if (ret == 0) {
828 root->highest_inode = highest_inode;
829 root->last_inode_alloc = highest_inode;
830 }
831 return root;
832 }
833
834 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
835 u64 root_objectid)
836 {
837 struct btrfs_root *root;
838
839 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
840 return fs_info->tree_root;
841 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
842 return fs_info->extent_root;
843
844 root = radix_tree_lookup(&fs_info->fs_roots_radix,
845 (unsigned long)root_objectid);
846 return root;
847 }
848
849 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
850 struct btrfs_key *location)
851 {
852 struct btrfs_root *root;
853 int ret;
854
855 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
856 return fs_info->tree_root;
857 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
858 return fs_info->extent_root;
859 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
860 return fs_info->chunk_root;
861 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
862 return fs_info->dev_root;
863
864 root = radix_tree_lookup(&fs_info->fs_roots_radix,
865 (unsigned long)location->objectid);
866 if (root)
867 return root;
868
869 root = btrfs_read_fs_root_no_radix(fs_info, location);
870 if (IS_ERR(root))
871 return root;
872 ret = radix_tree_insert(&fs_info->fs_roots_radix,
873 (unsigned long)root->root_key.objectid,
874 root);
875 if (ret) {
876 free_extent_buffer(root->node);
877 kfree(root);
878 return ERR_PTR(ret);
879 }
880 ret = btrfs_find_dead_roots(fs_info->tree_root,
881 root->root_key.objectid, root);
882 BUG_ON(ret);
883
884 return root;
885 }
886
887 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
888 struct btrfs_key *location,
889 const char *name, int namelen)
890 {
891 struct btrfs_root *root;
892 int ret;
893
894 root = btrfs_read_fs_root_no_name(fs_info, location);
895 if (!root)
896 return NULL;
897
898 if (root->in_sysfs)
899 return root;
900
901 ret = btrfs_set_root_name(root, name, namelen);
902 if (ret) {
903 free_extent_buffer(root->node);
904 kfree(root);
905 return ERR_PTR(ret);
906 }
907
908 ret = btrfs_sysfs_add_root(root);
909 if (ret) {
910 free_extent_buffer(root->node);
911 kfree(root->name);
912 kfree(root);
913 return ERR_PTR(ret);
914 }
915 root->in_sysfs = 1;
916 return root;
917 }
918 #if 0
919 static int add_hasher(struct btrfs_fs_info *info, char *type) {
920 struct btrfs_hasher *hasher;
921
922 hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
923 if (!hasher)
924 return -ENOMEM;
925 hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
926 if (!hasher->hash_tfm) {
927 kfree(hasher);
928 return -EINVAL;
929 }
930 spin_lock(&info->hash_lock);
931 list_add(&hasher->list, &info->hashers);
932 spin_unlock(&info->hash_lock);
933 return 0;
934 }
935 #endif
936
937 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
938 {
939 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
940 int ret = 0;
941 int limit = 256 * info->fs_devices->open_devices;
942 struct list_head *cur;
943 struct btrfs_device *device;
944 struct backing_dev_info *bdi;
945
946 if ((bdi_bits & (1 << BDI_write_congested)) &&
947 atomic_read(&info->nr_async_submits) > limit) {
948 return 1;
949 }
950
951 list_for_each(cur, &info->fs_devices->devices) {
952 device = list_entry(cur, struct btrfs_device, dev_list);
953 if (!device->bdev)
954 continue;
955 bdi = blk_get_backing_dev_info(device->bdev);
956 if (bdi && bdi_congested(bdi, bdi_bits)) {
957 ret = 1;
958 break;
959 }
960 }
961 return ret;
962 }
963
964 /*
965 * this unplugs every device on the box, and it is only used when page
966 * is null
967 */
968 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
969 {
970 struct list_head *cur;
971 struct btrfs_device *device;
972 struct btrfs_fs_info *info;
973
974 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
975 list_for_each(cur, &info->fs_devices->devices) {
976 device = list_entry(cur, struct btrfs_device, dev_list);
977 bdi = blk_get_backing_dev_info(device->bdev);
978 if (bdi->unplug_io_fn) {
979 bdi->unplug_io_fn(bdi, page);
980 }
981 }
982 }
983
984 void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
985 {
986 struct inode *inode;
987 struct extent_map_tree *em_tree;
988 struct extent_map *em;
989 struct address_space *mapping;
990 u64 offset;
991
992 /* the generic O_DIRECT read code does this */
993 if (!page) {
994 __unplug_io_fn(bdi, page);
995 return;
996 }
997
998 /*
999 * page->mapping may change at any time. Get a consistent copy
1000 * and use that for everything below
1001 */
1002 smp_mb();
1003 mapping = page->mapping;
1004 if (!mapping)
1005 return;
1006
1007 inode = mapping->host;
1008 offset = page_offset(page);
1009
1010 em_tree = &BTRFS_I(inode)->extent_tree;
1011 spin_lock(&em_tree->lock);
1012 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1013 spin_unlock(&em_tree->lock);
1014 if (!em)
1015 return;
1016
1017 offset = offset - em->start;
1018 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1019 em->block_start + offset, page);
1020 free_extent_map(em);
1021 }
1022
1023 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1024 {
1025 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1026 bdi_init(bdi);
1027 #endif
1028 bdi->ra_pages = default_backing_dev_info.ra_pages;
1029 bdi->state = 0;
1030 bdi->capabilities = default_backing_dev_info.capabilities;
1031 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1032 bdi->unplug_io_data = info;
1033 bdi->congested_fn = btrfs_congested_fn;
1034 bdi->congested_data = info;
1035 return 0;
1036 }
1037
1038 static int bio_ready_for_csum(struct bio *bio)
1039 {
1040 u64 length = 0;
1041 u64 buf_len = 0;
1042 u64 start = 0;
1043 struct page *page;
1044 struct extent_io_tree *io_tree = NULL;
1045 struct btrfs_fs_info *info = NULL;
1046 struct bio_vec *bvec;
1047 int i;
1048 int ret;
1049
1050 bio_for_each_segment(bvec, bio, i) {
1051 page = bvec->bv_page;
1052 if (page->private == EXTENT_PAGE_PRIVATE) {
1053 length += bvec->bv_len;
1054 continue;
1055 }
1056 if (!page->private) {
1057 length += bvec->bv_len;
1058 continue;
1059 }
1060 length = bvec->bv_len;
1061 buf_len = page->private >> 2;
1062 start = page_offset(page) + bvec->bv_offset;
1063 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1064 info = BTRFS_I(page->mapping->host)->root->fs_info;
1065 }
1066 /* are we fully contained in this bio? */
1067 if (buf_len <= length)
1068 return 1;
1069
1070 ret = extent_range_uptodate(io_tree, start + length,
1071 start + buf_len - 1);
1072 if (ret == 1)
1073 return ret;
1074 return ret;
1075 }
1076
1077 /*
1078 * called by the kthread helper functions to finally call the bio end_io
1079 * functions. This is where read checksum verification actually happens
1080 */
1081 static void end_workqueue_fn(struct btrfs_work *work)
1082 {
1083 struct bio *bio;
1084 struct end_io_wq *end_io_wq;
1085 struct btrfs_fs_info *fs_info;
1086 int error;
1087
1088 end_io_wq = container_of(work, struct end_io_wq, work);
1089 bio = end_io_wq->bio;
1090 fs_info = end_io_wq->info;
1091
1092 /* metadata bios are special because the whole tree block must
1093 * be checksummed at once. This makes sure the entire block is in
1094 * ram and up to date before trying to verify things. For
1095 * blocksize <= pagesize, it is basically a noop
1096 */
1097 if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
1098 btrfs_queue_worker(&fs_info->endio_workers,
1099 &end_io_wq->work);
1100 return;
1101 }
1102 error = end_io_wq->error;
1103 bio->bi_private = end_io_wq->private;
1104 bio->bi_end_io = end_io_wq->end_io;
1105 kfree(end_io_wq);
1106 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1107 bio_endio(bio, bio->bi_size, error);
1108 #else
1109 bio_endio(bio, error);
1110 #endif
1111 }
1112
1113 static int cleaner_kthread(void *arg)
1114 {
1115 struct btrfs_root *root = arg;
1116
1117 do {
1118 smp_mb();
1119 if (root->fs_info->closing)
1120 break;
1121
1122 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1123 mutex_lock(&root->fs_info->cleaner_mutex);
1124 btrfs_clean_old_snapshots(root);
1125 mutex_unlock(&root->fs_info->cleaner_mutex);
1126
1127 if (freezing(current)) {
1128 refrigerator();
1129 } else {
1130 smp_mb();
1131 if (root->fs_info->closing)
1132 break;
1133 set_current_state(TASK_INTERRUPTIBLE);
1134 schedule();
1135 __set_current_state(TASK_RUNNING);
1136 }
1137 } while (!kthread_should_stop());
1138 return 0;
1139 }
1140
1141 static int transaction_kthread(void *arg)
1142 {
1143 struct btrfs_root *root = arg;
1144 struct btrfs_trans_handle *trans;
1145 struct btrfs_transaction *cur;
1146 unsigned long now;
1147 unsigned long delay;
1148 int ret;
1149
1150 do {
1151 smp_mb();
1152 if (root->fs_info->closing)
1153 break;
1154
1155 delay = HZ * 30;
1156 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1157 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1158
1159 mutex_lock(&root->fs_info->trans_mutex);
1160 cur = root->fs_info->running_transaction;
1161 if (!cur) {
1162 mutex_unlock(&root->fs_info->trans_mutex);
1163 goto sleep;
1164 }
1165 now = get_seconds();
1166 if (now < cur->start_time || now - cur->start_time < 30) {
1167 mutex_unlock(&root->fs_info->trans_mutex);
1168 delay = HZ * 5;
1169 goto sleep;
1170 }
1171 mutex_unlock(&root->fs_info->trans_mutex);
1172 trans = btrfs_start_transaction(root, 1);
1173 ret = btrfs_commit_transaction(trans, root);
1174 sleep:
1175 wake_up_process(root->fs_info->cleaner_kthread);
1176 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1177
1178 if (freezing(current)) {
1179 refrigerator();
1180 } else {
1181 if (root->fs_info->closing)
1182 break;
1183 set_current_state(TASK_INTERRUPTIBLE);
1184 schedule_timeout(delay);
1185 __set_current_state(TASK_RUNNING);
1186 }
1187 } while (!kthread_should_stop());
1188 return 0;
1189 }
1190
1191 struct btrfs_root *open_ctree(struct super_block *sb,
1192 struct btrfs_fs_devices *fs_devices,
1193 char *options)
1194 {
1195 u32 sectorsize;
1196 u32 nodesize;
1197 u32 leafsize;
1198 u32 blocksize;
1199 u32 stripesize;
1200 struct buffer_head *bh;
1201 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
1202 GFP_NOFS);
1203 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
1204 GFP_NOFS);
1205 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1206 GFP_NOFS);
1207 struct btrfs_root *chunk_root = kmalloc(sizeof(struct btrfs_root),
1208 GFP_NOFS);
1209 struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
1210 GFP_NOFS);
1211 int ret;
1212 int err = -EINVAL;
1213
1214 struct btrfs_super_block *disk_super;
1215
1216 if (!extent_root || !tree_root || !fs_info) {
1217 err = -ENOMEM;
1218 goto fail;
1219 }
1220 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1221 INIT_LIST_HEAD(&fs_info->trans_list);
1222 INIT_LIST_HEAD(&fs_info->dead_roots);
1223 INIT_LIST_HEAD(&fs_info->hashers);
1224 spin_lock_init(&fs_info->hash_lock);
1225 spin_lock_init(&fs_info->delalloc_lock);
1226 spin_lock_init(&fs_info->new_trans_lock);
1227
1228 init_completion(&fs_info->kobj_unregister);
1229 fs_info->tree_root = tree_root;
1230 fs_info->extent_root = extent_root;
1231 fs_info->chunk_root = chunk_root;
1232 fs_info->dev_root = dev_root;
1233 fs_info->fs_devices = fs_devices;
1234 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1235 INIT_LIST_HEAD(&fs_info->space_info);
1236 btrfs_mapping_init(&fs_info->mapping_tree);
1237 atomic_set(&fs_info->nr_async_submits, 0);
1238 atomic_set(&fs_info->throttles, 0);
1239 fs_info->sb = sb;
1240 fs_info->max_extent = (u64)-1;
1241 fs_info->max_inline = 8192 * 1024;
1242 setup_bdi(fs_info, &fs_info->bdi);
1243 fs_info->btree_inode = new_inode(sb);
1244 fs_info->btree_inode->i_ino = 1;
1245 fs_info->btree_inode->i_nlink = 1;
1246 fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
1247
1248 sb->s_blocksize = 4096;
1249 sb->s_blocksize_bits = blksize_bits(4096);
1250
1251 /*
1252 * we set the i_size on the btree inode to the max possible int.
1253 * the real end of the address space is determined by all of
1254 * the devices in the system
1255 */
1256 fs_info->btree_inode->i_size = OFFSET_MAX;
1257 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1258 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1259
1260 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1261 fs_info->btree_inode->i_mapping,
1262 GFP_NOFS);
1263 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1264 GFP_NOFS);
1265
1266 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1267
1268 extent_io_tree_init(&fs_info->free_space_cache,
1269 fs_info->btree_inode->i_mapping, GFP_NOFS);
1270 extent_io_tree_init(&fs_info->block_group_cache,
1271 fs_info->btree_inode->i_mapping, GFP_NOFS);
1272 extent_io_tree_init(&fs_info->pinned_extents,
1273 fs_info->btree_inode->i_mapping, GFP_NOFS);
1274 extent_io_tree_init(&fs_info->pending_del,
1275 fs_info->btree_inode->i_mapping, GFP_NOFS);
1276 extent_io_tree_init(&fs_info->extent_ins,
1277 fs_info->btree_inode->i_mapping, GFP_NOFS);
1278 fs_info->do_barriers = 1;
1279
1280 BTRFS_I(fs_info->btree_inode)->root = tree_root;
1281 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1282 sizeof(struct btrfs_key));
1283 insert_inode_hash(fs_info->btree_inode);
1284 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1285
1286 mutex_init(&fs_info->trans_mutex);
1287 mutex_init(&fs_info->drop_mutex);
1288 mutex_init(&fs_info->alloc_mutex);
1289 mutex_init(&fs_info->chunk_mutex);
1290 mutex_init(&fs_info->transaction_kthread_mutex);
1291 mutex_init(&fs_info->cleaner_mutex);
1292 mutex_init(&fs_info->volume_mutex);
1293 init_waitqueue_head(&fs_info->transaction_throttle);
1294
1295 #if 0
1296 ret = add_hasher(fs_info, "crc32c");
1297 if (ret) {
1298 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
1299 err = -ENOMEM;
1300 goto fail_iput;
1301 }
1302 #endif
1303 __setup_root(4096, 4096, 4096, 4096, tree_root,
1304 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1305
1306
1307 bh = __bread(fs_devices->latest_bdev,
1308 BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
1309 if (!bh)
1310 goto fail_iput;
1311
1312 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1313 brelse(bh);
1314
1315 memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1316
1317 disk_super = &fs_info->super_copy;
1318 if (!btrfs_super_root(disk_super))
1319 goto fail_sb_buffer;
1320
1321 err = btrfs_parse_options(tree_root, options);
1322 if (err)
1323 goto fail_sb_buffer;
1324
1325 /*
1326 * we need to start all the end_io workers up front because the
1327 * queue work function gets called at interrupt time, and so it
1328 * cannot dynamically grow.
1329 */
1330 btrfs_init_workers(&fs_info->workers, fs_info->thread_pool_size);
1331 btrfs_init_workers(&fs_info->submit_workers, fs_info->thread_pool_size);
1332 btrfs_init_workers(&fs_info->fixup_workers, 1);
1333 btrfs_init_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1334 btrfs_init_workers(&fs_info->endio_write_workers,
1335 fs_info->thread_pool_size);
1336 btrfs_start_workers(&fs_info->workers, 1);
1337 btrfs_start_workers(&fs_info->submit_workers, 1);
1338 btrfs_start_workers(&fs_info->fixup_workers, 1);
1339 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1340 btrfs_start_workers(&fs_info->endio_write_workers,
1341 fs_info->thread_pool_size);
1342
1343 err = -EINVAL;
1344 if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
1345 printk("Btrfs: wanted %llu devices, but found %llu\n",
1346 (unsigned long long)btrfs_super_num_devices(disk_super),
1347 (unsigned long long)fs_devices->open_devices);
1348 if (btrfs_test_opt(tree_root, DEGRADED))
1349 printk("continuing in degraded mode\n");
1350 else {
1351 goto fail_sb_buffer;
1352 }
1353 }
1354
1355 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1356
1357 nodesize = btrfs_super_nodesize(disk_super);
1358 leafsize = btrfs_super_leafsize(disk_super);
1359 sectorsize = btrfs_super_sectorsize(disk_super);
1360 stripesize = btrfs_super_stripesize(disk_super);
1361 tree_root->nodesize = nodesize;
1362 tree_root->leafsize = leafsize;
1363 tree_root->sectorsize = sectorsize;
1364 tree_root->stripesize = stripesize;
1365
1366 sb->s_blocksize = sectorsize;
1367 sb->s_blocksize_bits = blksize_bits(sectorsize);
1368
1369 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1370 sizeof(disk_super->magic))) {
1371 printk("btrfs: valid FS not found on %s\n", sb->s_id);
1372 goto fail_sb_buffer;
1373 }
1374
1375 mutex_lock(&fs_info->chunk_mutex);
1376 ret = btrfs_read_sys_array(tree_root);
1377 mutex_unlock(&fs_info->chunk_mutex);
1378 if (ret) {
1379 printk("btrfs: failed to read the system array on %s\n",
1380 sb->s_id);
1381 goto fail_sys_array;
1382 }
1383
1384 blocksize = btrfs_level_size(tree_root,
1385 btrfs_super_chunk_root_level(disk_super));
1386
1387 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1388 chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1389
1390 chunk_root->node = read_tree_block(chunk_root,
1391 btrfs_super_chunk_root(disk_super),
1392 blocksize, 0);
1393 BUG_ON(!chunk_root->node);
1394
1395 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1396 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1397 BTRFS_UUID_SIZE);
1398
1399 mutex_lock(&fs_info->chunk_mutex);
1400 ret = btrfs_read_chunk_tree(chunk_root);
1401 mutex_unlock(&fs_info->chunk_mutex);
1402 BUG_ON(ret);
1403
1404 btrfs_close_extra_devices(fs_devices);
1405
1406 blocksize = btrfs_level_size(tree_root,
1407 btrfs_super_root_level(disk_super));
1408
1409
1410 tree_root->node = read_tree_block(tree_root,
1411 btrfs_super_root(disk_super),
1412 blocksize, 0);
1413 if (!tree_root->node)
1414 goto fail_sb_buffer;
1415
1416
1417 ret = find_and_setup_root(tree_root, fs_info,
1418 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1419 if (ret)
1420 goto fail_tree_root;
1421 extent_root->track_dirty = 1;
1422
1423 ret = find_and_setup_root(tree_root, fs_info,
1424 BTRFS_DEV_TREE_OBJECTID, dev_root);
1425 dev_root->track_dirty = 1;
1426
1427 if (ret)
1428 goto fail_extent_root;
1429
1430 btrfs_read_block_groups(extent_root);
1431
1432 fs_info->generation = btrfs_super_generation(disk_super) + 1;
1433 fs_info->data_alloc_profile = (u64)-1;
1434 fs_info->metadata_alloc_profile = (u64)-1;
1435 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1436 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1437 "btrfs-cleaner");
1438 if (!fs_info->cleaner_kthread)
1439 goto fail_extent_root;
1440
1441 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1442 tree_root,
1443 "btrfs-transaction");
1444 if (!fs_info->transaction_kthread)
1445 goto fail_cleaner;
1446
1447
1448 return tree_root;
1449
1450 fail_cleaner:
1451 kthread_stop(fs_info->cleaner_kthread);
1452 fail_extent_root:
1453 free_extent_buffer(extent_root->node);
1454 fail_tree_root:
1455 free_extent_buffer(tree_root->node);
1456 fail_sys_array:
1457 fail_sb_buffer:
1458 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
1459 btrfs_stop_workers(&fs_info->fixup_workers);
1460 btrfs_stop_workers(&fs_info->workers);
1461 btrfs_stop_workers(&fs_info->endio_workers);
1462 btrfs_stop_workers(&fs_info->endio_write_workers);
1463 btrfs_stop_workers(&fs_info->submit_workers);
1464 fail_iput:
1465 iput(fs_info->btree_inode);
1466 fail:
1467 btrfs_close_devices(fs_info->fs_devices);
1468 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1469
1470 kfree(extent_root);
1471 kfree(tree_root);
1472 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1473 bdi_destroy(&fs_info->bdi);
1474 #endif
1475 kfree(fs_info);
1476 return ERR_PTR(err);
1477 }
1478
1479 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1480 {
1481 char b[BDEVNAME_SIZE];
1482
1483 if (uptodate) {
1484 set_buffer_uptodate(bh);
1485 } else {
1486 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1487 printk(KERN_WARNING "lost page write due to "
1488 "I/O error on %s\n",
1489 bdevname(bh->b_bdev, b));
1490 }
1491 /* note, we dont' set_buffer_write_io_error because we have
1492 * our own ways of dealing with the IO errors
1493 */
1494 clear_buffer_uptodate(bh);
1495 }
1496 unlock_buffer(bh);
1497 put_bh(bh);
1498 }
1499
1500 int write_all_supers(struct btrfs_root *root)
1501 {
1502 struct list_head *cur;
1503 struct list_head *head = &root->fs_info->fs_devices->devices;
1504 struct btrfs_device *dev;
1505 struct btrfs_super_block *sb;
1506 struct btrfs_dev_item *dev_item;
1507 struct buffer_head *bh;
1508 int ret;
1509 int do_barriers;
1510 int max_errors;
1511 int total_errors = 0;
1512 u32 crc;
1513 u64 flags;
1514
1515 max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1516 do_barriers = !btrfs_test_opt(root, NOBARRIER);
1517
1518 sb = &root->fs_info->super_for_commit;
1519 dev_item = &sb->dev_item;
1520 list_for_each(cur, head) {
1521 dev = list_entry(cur, struct btrfs_device, dev_list);
1522 if (!dev->bdev) {
1523 total_errors++;
1524 continue;
1525 }
1526 if (!dev->in_fs_metadata)
1527 continue;
1528
1529 btrfs_set_stack_device_type(dev_item, dev->type);
1530 btrfs_set_stack_device_id(dev_item, dev->devid);
1531 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
1532 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
1533 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
1534 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
1535 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
1536 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
1537 flags = btrfs_super_flags(sb);
1538 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
1539
1540
1541 crc = ~(u32)0;
1542 crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
1543 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
1544 btrfs_csum_final(crc, sb->csum);
1545
1546 bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
1547 BTRFS_SUPER_INFO_SIZE);
1548
1549 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
1550 dev->pending_io = bh;
1551
1552 get_bh(bh);
1553 set_buffer_uptodate(bh);
1554 lock_buffer(bh);
1555 bh->b_end_io = btrfs_end_buffer_write_sync;
1556
1557 if (do_barriers && dev->barriers) {
1558 ret = submit_bh(WRITE_BARRIER, bh);
1559 if (ret == -EOPNOTSUPP) {
1560 printk("btrfs: disabling barriers on dev %s\n",
1561 dev->name);
1562 set_buffer_uptodate(bh);
1563 dev->barriers = 0;
1564 get_bh(bh);
1565 lock_buffer(bh);
1566 ret = submit_bh(WRITE, bh);
1567 }
1568 } else {
1569 ret = submit_bh(WRITE, bh);
1570 }
1571 if (ret)
1572 total_errors++;
1573 }
1574 if (total_errors > max_errors) {
1575 printk("btrfs: %d errors while writing supers\n", total_errors);
1576 BUG();
1577 }
1578 total_errors = 0;
1579
1580 list_for_each(cur, head) {
1581 dev = list_entry(cur, struct btrfs_device, dev_list);
1582 if (!dev->bdev)
1583 continue;
1584 if (!dev->in_fs_metadata)
1585 continue;
1586
1587 BUG_ON(!dev->pending_io);
1588 bh = dev->pending_io;
1589 wait_on_buffer(bh);
1590 if (!buffer_uptodate(dev->pending_io)) {
1591 if (do_barriers && dev->barriers) {
1592 printk("btrfs: disabling barriers on dev %s\n",
1593 dev->name);
1594 set_buffer_uptodate(bh);
1595 get_bh(bh);
1596 lock_buffer(bh);
1597 dev->barriers = 0;
1598 ret = submit_bh(WRITE, bh);
1599 BUG_ON(ret);
1600 wait_on_buffer(bh);
1601 if (!buffer_uptodate(bh))
1602 total_errors++;
1603 } else {
1604 total_errors++;
1605 }
1606
1607 }
1608 dev->pending_io = NULL;
1609 brelse(bh);
1610 }
1611 if (total_errors > max_errors) {
1612 printk("btrfs: %d errors while writing supers\n", total_errors);
1613 BUG();
1614 }
1615 return 0;
1616 }
1617
1618 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
1619 *root)
1620 {
1621 int ret;
1622
1623 ret = write_all_supers(root);
1624 return ret;
1625 }
1626
1627 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
1628 {
1629 radix_tree_delete(&fs_info->fs_roots_radix,
1630 (unsigned long)root->root_key.objectid);
1631 if (root->in_sysfs)
1632 btrfs_sysfs_del_root(root);
1633 if (root->inode)
1634 iput(root->inode);
1635 if (root->node)
1636 free_extent_buffer(root->node);
1637 if (root->commit_root)
1638 free_extent_buffer(root->commit_root);
1639 if (root->name)
1640 kfree(root->name);
1641 kfree(root);
1642 return 0;
1643 }
1644
1645 static int del_fs_roots(struct btrfs_fs_info *fs_info)
1646 {
1647 int ret;
1648 struct btrfs_root *gang[8];
1649 int i;
1650
1651 while(1) {
1652 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1653 (void **)gang, 0,
1654 ARRAY_SIZE(gang));
1655 if (!ret)
1656 break;
1657 for (i = 0; i < ret; i++)
1658 btrfs_free_fs_root(fs_info, gang[i]);
1659 }
1660 return 0;
1661 }
1662
1663 int close_ctree(struct btrfs_root *root)
1664 {
1665 int ret;
1666 struct btrfs_trans_handle *trans;
1667 struct btrfs_fs_info *fs_info = root->fs_info;
1668
1669 fs_info->closing = 1;
1670 smp_mb();
1671
1672 kthread_stop(root->fs_info->transaction_kthread);
1673 kthread_stop(root->fs_info->cleaner_kthread);
1674
1675 btrfs_clean_old_snapshots(root);
1676 trans = btrfs_start_transaction(root, 1);
1677 ret = btrfs_commit_transaction(trans, root);
1678 /* run commit again to drop the original snapshot */
1679 trans = btrfs_start_transaction(root, 1);
1680 btrfs_commit_transaction(trans, root);
1681 ret = btrfs_write_and_wait_transaction(NULL, root);
1682 BUG_ON(ret);
1683
1684 write_ctree_super(NULL, root);
1685
1686 if (fs_info->delalloc_bytes) {
1687 printk("btrfs: at unmount delalloc count %Lu\n",
1688 fs_info->delalloc_bytes);
1689 }
1690 if (fs_info->extent_root->node)
1691 free_extent_buffer(fs_info->extent_root->node);
1692
1693 if (fs_info->tree_root->node)
1694 free_extent_buffer(fs_info->tree_root->node);
1695
1696 if (root->fs_info->chunk_root->node);
1697 free_extent_buffer(root->fs_info->chunk_root->node);
1698
1699 if (root->fs_info->dev_root->node);
1700 free_extent_buffer(root->fs_info->dev_root->node);
1701
1702 btrfs_free_block_groups(root->fs_info);
1703 del_fs_roots(fs_info);
1704
1705 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1706
1707 extent_io_tree_empty_lru(&fs_info->free_space_cache);
1708 extent_io_tree_empty_lru(&fs_info->block_group_cache);
1709 extent_io_tree_empty_lru(&fs_info->pinned_extents);
1710 extent_io_tree_empty_lru(&fs_info->pending_del);
1711 extent_io_tree_empty_lru(&fs_info->extent_ins);
1712 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
1713
1714 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1715
1716 btrfs_stop_workers(&fs_info->fixup_workers);
1717 btrfs_stop_workers(&fs_info->workers);
1718 btrfs_stop_workers(&fs_info->endio_workers);
1719 btrfs_stop_workers(&fs_info->endio_write_workers);
1720 btrfs_stop_workers(&fs_info->submit_workers);
1721
1722 iput(fs_info->btree_inode);
1723 #if 0
1724 while(!list_empty(&fs_info->hashers)) {
1725 struct btrfs_hasher *hasher;
1726 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
1727 hashers);
1728 list_del(&hasher->hashers);
1729 crypto_free_hash(&fs_info->hash_tfm);
1730 kfree(hasher);
1731 }
1732 #endif
1733 btrfs_close_devices(fs_info->fs_devices);
1734 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1735
1736 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1737 bdi_destroy(&fs_info->bdi);
1738 #endif
1739
1740 kfree(fs_info->extent_root);
1741 kfree(fs_info->tree_root);
1742 kfree(fs_info->chunk_root);
1743 kfree(fs_info->dev_root);
1744 return 0;
1745 }
1746
1747 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
1748 {
1749 int ret;
1750 struct inode *btree_inode = buf->first_page->mapping->host;
1751
1752 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
1753 if (!ret)
1754 return ret;
1755
1756 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
1757 parent_transid);
1758 return !ret;
1759 }
1760
1761 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
1762 {
1763 struct inode *btree_inode = buf->first_page->mapping->host;
1764 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
1765 buf);
1766 }
1767
1768 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
1769 {
1770 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1771 u64 transid = btrfs_header_generation(buf);
1772 struct inode *btree_inode = root->fs_info->btree_inode;
1773
1774 WARN_ON(!btrfs_tree_locked(buf));
1775 if (transid != root->fs_info->generation) {
1776 printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
1777 (unsigned long long)buf->start,
1778 transid, root->fs_info->generation);
1779 WARN_ON(1);
1780 }
1781 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
1782 }
1783
1784 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1785 {
1786 /*
1787 * looks as though older kernels can get into trouble with
1788 * this code, they end up stuck in balance_dirty_pages forever
1789 */
1790 struct extent_io_tree *tree;
1791 u64 num_dirty;
1792 u64 start = 0;
1793 unsigned long thresh = 16 * 1024 * 1024;
1794 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1795
1796 if (current_is_pdflush())
1797 return;
1798
1799 num_dirty = count_range_bits(tree, &start, (u64)-1,
1800 thresh, EXTENT_DIRTY);
1801 if (num_dirty > thresh) {
1802 balance_dirty_pages_ratelimited_nr(
1803 root->fs_info->btree_inode->i_mapping, 1);
1804 }
1805 return;
1806 }
1807
1808 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
1809 {
1810 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1811 int ret;
1812 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1813 if (ret == 0) {
1814 buf->flags |= EXTENT_UPTODATE;
1815 }
1816 return ret;
1817 }
1818
1819 static struct extent_io_ops btree_extent_io_ops = {
1820 .writepage_io_hook = btree_writepage_io_hook,
1821 .readpage_end_io_hook = btree_readpage_end_io_hook,
1822 .submit_bio_hook = btree_submit_bio_hook,
1823 /* note we're sharing with inode.c for the merge bio hook */
1824 .merge_bio_hook = btrfs_merge_bio_hook,
1825 };
This page took 0.095518 seconds and 6 git commands to generate.