Btrfs: optimize btrget/set/removexattr
[deliverable/linux.git] / fs / btrfs / disk-io.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/version.h>
20 #include <linux/fs.h>
21 #include <linux/blkdev.h>
22 #include <linux/scatterlist.h>
23 #include <linux/swap.h>
24 #include <linux/radix-tree.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h> // for block_sync_page
27 #include <linux/workqueue.h>
28 #include <linux/kthread.h>
29 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
30 # include <linux/freezer.h>
31 #else
32 # include <linux/sched.h>
33 #endif
34 #include "crc32c.h"
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "print-tree.h"
41 #include "async-thread.h"
42 #include "locking.h"
43 #include "ref-cache.h"
44
45 #if 0
46 static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
47 {
48 if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
49 printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
50 (unsigned long long)extent_buffer_blocknr(buf),
51 (unsigned long long)btrfs_header_blocknr(buf));
52 return 1;
53 }
54 return 0;
55 }
56 #endif
57
58 static struct extent_io_ops btree_extent_io_ops;
59 static void end_workqueue_fn(struct btrfs_work *work);
60
61 struct end_io_wq {
62 struct bio *bio;
63 bio_end_io_t *end_io;
64 void *private;
65 struct btrfs_fs_info *info;
66 int error;
67 int metadata;
68 struct list_head list;
69 struct btrfs_work work;
70 };
71
72 struct async_submit_bio {
73 struct inode *inode;
74 struct bio *bio;
75 struct list_head list;
76 extent_submit_bio_hook_t *submit_bio_hook;
77 int rw;
78 int mirror_num;
79 struct btrfs_work work;
80 };
81
82 struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
83 size_t page_offset, u64 start, u64 len,
84 int create)
85 {
86 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
87 struct extent_map *em;
88 int ret;
89
90 spin_lock(&em_tree->lock);
91 em = lookup_extent_mapping(em_tree, start, len);
92 if (em) {
93 em->bdev =
94 BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
95 spin_unlock(&em_tree->lock);
96 goto out;
97 }
98 spin_unlock(&em_tree->lock);
99
100 em = alloc_extent_map(GFP_NOFS);
101 if (!em) {
102 em = ERR_PTR(-ENOMEM);
103 goto out;
104 }
105 em->start = 0;
106 em->len = (u64)-1;
107 em->block_start = 0;
108 em->bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
109
110 spin_lock(&em_tree->lock);
111 ret = add_extent_mapping(em_tree, em);
112 if (ret == -EEXIST) {
113 u64 failed_start = em->start;
114 u64 failed_len = em->len;
115
116 printk("failed to insert %Lu %Lu -> %Lu into tree\n",
117 em->start, em->len, em->block_start);
118 free_extent_map(em);
119 em = lookup_extent_mapping(em_tree, start, len);
120 if (em) {
121 printk("after failing, found %Lu %Lu %Lu\n",
122 em->start, em->len, em->block_start);
123 ret = 0;
124 } else {
125 em = lookup_extent_mapping(em_tree, failed_start,
126 failed_len);
127 if (em) {
128 printk("double failure lookup gives us "
129 "%Lu %Lu -> %Lu\n", em->start,
130 em->len, em->block_start);
131 free_extent_map(em);
132 }
133 ret = -EIO;
134 }
135 } else if (ret) {
136 free_extent_map(em);
137 em = NULL;
138 }
139 spin_unlock(&em_tree->lock);
140
141 if (ret)
142 em = ERR_PTR(ret);
143 out:
144 return em;
145 }
146
147 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
148 {
149 return btrfs_crc32c(seed, data, len);
150 }
151
152 void btrfs_csum_final(u32 crc, char *result)
153 {
154 *(__le32 *)result = ~cpu_to_le32(crc);
155 }
156
157 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
158 int verify)
159 {
160 char result[BTRFS_CRC32_SIZE];
161 unsigned long len;
162 unsigned long cur_len;
163 unsigned long offset = BTRFS_CSUM_SIZE;
164 char *map_token = NULL;
165 char *kaddr;
166 unsigned long map_start;
167 unsigned long map_len;
168 int err;
169 u32 crc = ~(u32)0;
170
171 len = buf->len - offset;
172 while(len > 0) {
173 err = map_private_extent_buffer(buf, offset, 32,
174 &map_token, &kaddr,
175 &map_start, &map_len, KM_USER0);
176 if (err) {
177 printk("failed to map extent buffer! %lu\n",
178 offset);
179 return 1;
180 }
181 cur_len = min(len, map_len - (offset - map_start));
182 crc = btrfs_csum_data(root, kaddr + offset - map_start,
183 crc, cur_len);
184 len -= cur_len;
185 offset += cur_len;
186 unmap_extent_buffer(buf, map_token, KM_USER0);
187 }
188 btrfs_csum_final(crc, result);
189
190 if (verify) {
191 /* FIXME, this is not good */
192 if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
193 u32 val;
194 u32 found = 0;
195 memcpy(&found, result, BTRFS_CRC32_SIZE);
196
197 read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
198 printk("btrfs: %s checksum verify failed on %llu "
199 "wanted %X found %X level %d\n",
200 root->fs_info->sb->s_id,
201 buf->start, val, found, btrfs_header_level(buf));
202 return 1;
203 }
204 } else {
205 write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
206 }
207 return 0;
208 }
209
210 static int verify_parent_transid(struct extent_io_tree *io_tree,
211 struct extent_buffer *eb, u64 parent_transid)
212 {
213 int ret;
214
215 if (!parent_transid || btrfs_header_generation(eb) == parent_transid)
216 return 0;
217
218 lock_extent(io_tree, eb->start, eb->start + eb->len - 1, GFP_NOFS);
219 if (extent_buffer_uptodate(io_tree, eb) &&
220 btrfs_header_generation(eb) == parent_transid) {
221 ret = 0;
222 goto out;
223 }
224 printk("parent transid verify failed on %llu wanted %llu found %llu\n",
225 (unsigned long long)eb->start,
226 (unsigned long long)parent_transid,
227 (unsigned long long)btrfs_header_generation(eb));
228 ret = 1;
229 clear_extent_buffer_uptodate(io_tree, eb);
230 out:
231 unlock_extent(io_tree, eb->start, eb->start + eb->len - 1,
232 GFP_NOFS);
233 return ret;
234
235 }
236
237 static int btree_read_extent_buffer_pages(struct btrfs_root *root,
238 struct extent_buffer *eb,
239 u64 start, u64 parent_transid)
240 {
241 struct extent_io_tree *io_tree;
242 int ret;
243 int num_copies = 0;
244 int mirror_num = 0;
245
246 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
247 while (1) {
248 ret = read_extent_buffer_pages(io_tree, eb, start, 1,
249 btree_get_extent, mirror_num);
250 if (!ret &&
251 !verify_parent_transid(io_tree, eb, parent_transid))
252 return ret;
253
254 num_copies = btrfs_num_copies(&root->fs_info->mapping_tree,
255 eb->start, eb->len);
256 if (num_copies == 1)
257 return ret;
258
259 mirror_num++;
260 if (mirror_num > num_copies)
261 return ret;
262 }
263 return -EIO;
264 }
265
266 int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
267 {
268 struct extent_io_tree *tree;
269 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
270 u64 found_start;
271 int found_level;
272 unsigned long len;
273 struct extent_buffer *eb;
274 int ret;
275
276 tree = &BTRFS_I(page->mapping->host)->io_tree;
277
278 if (page->private == EXTENT_PAGE_PRIVATE)
279 goto out;
280 if (!page->private)
281 goto out;
282 len = page->private >> 2;
283 if (len == 0) {
284 WARN_ON(1);
285 }
286 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
287 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
288 btrfs_header_generation(eb));
289 BUG_ON(ret);
290 found_start = btrfs_header_bytenr(eb);
291 if (found_start != start) {
292 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
293 start, found_start, len);
294 WARN_ON(1);
295 goto err;
296 }
297 if (eb->first_page != page) {
298 printk("bad first page %lu %lu\n", eb->first_page->index,
299 page->index);
300 WARN_ON(1);
301 goto err;
302 }
303 if (!PageUptodate(page)) {
304 printk("csum not up to date page %lu\n", page->index);
305 WARN_ON(1);
306 goto err;
307 }
308 found_level = btrfs_header_level(eb);
309 spin_lock(&root->fs_info->hash_lock);
310 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
311 spin_unlock(&root->fs_info->hash_lock);
312 csum_tree_block(root, eb, 0);
313 err:
314 free_extent_buffer(eb);
315 out:
316 return 0;
317 }
318
319 static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
320 {
321 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
322
323 csum_dirty_buffer(root, page);
324 return 0;
325 }
326
327 int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
328 struct extent_state *state)
329 {
330 struct extent_io_tree *tree;
331 u64 found_start;
332 int found_level;
333 unsigned long len;
334 struct extent_buffer *eb;
335 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
336 int ret = 0;
337
338 tree = &BTRFS_I(page->mapping->host)->io_tree;
339 if (page->private == EXTENT_PAGE_PRIVATE)
340 goto out;
341 if (!page->private)
342 goto out;
343 len = page->private >> 2;
344 if (len == 0) {
345 WARN_ON(1);
346 }
347 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
348
349 found_start = btrfs_header_bytenr(eb);
350 if (found_start != start) {
351 ret = -EIO;
352 goto err;
353 }
354 if (eb->first_page != page) {
355 printk("bad first page %lu %lu\n", eb->first_page->index,
356 page->index);
357 WARN_ON(1);
358 ret = -EIO;
359 goto err;
360 }
361 if (memcmp_extent_buffer(eb, root->fs_info->fsid,
362 (unsigned long)btrfs_header_fsid(eb),
363 BTRFS_FSID_SIZE)) {
364 printk("bad fsid on block %Lu\n", eb->start);
365 ret = -EIO;
366 goto err;
367 }
368 found_level = btrfs_header_level(eb);
369
370 ret = csum_tree_block(root, eb, 1);
371 if (ret)
372 ret = -EIO;
373
374 end = min_t(u64, eb->len, PAGE_CACHE_SIZE);
375 end = eb->start + end - 1;
376 err:
377 free_extent_buffer(eb);
378 out:
379 return ret;
380 }
381
382 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
383 static void end_workqueue_bio(struct bio *bio, int err)
384 #else
385 static int end_workqueue_bio(struct bio *bio,
386 unsigned int bytes_done, int err)
387 #endif
388 {
389 struct end_io_wq *end_io_wq = bio->bi_private;
390 struct btrfs_fs_info *fs_info;
391
392 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
393 if (bio->bi_size)
394 return 1;
395 #endif
396
397 fs_info = end_io_wq->info;
398 end_io_wq->error = err;
399 end_io_wq->work.func = end_workqueue_fn;
400 end_io_wq->work.flags = 0;
401 if (bio->bi_rw & (1 << BIO_RW))
402 btrfs_queue_worker(&fs_info->endio_write_workers,
403 &end_io_wq->work);
404 else
405 btrfs_queue_worker(&fs_info->endio_workers, &end_io_wq->work);
406
407 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
408 return 0;
409 #endif
410 }
411
412 int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
413 int metadata)
414 {
415 struct end_io_wq *end_io_wq;
416 end_io_wq = kmalloc(sizeof(*end_io_wq), GFP_NOFS);
417 if (!end_io_wq)
418 return -ENOMEM;
419
420 end_io_wq->private = bio->bi_private;
421 end_io_wq->end_io = bio->bi_end_io;
422 end_io_wq->info = info;
423 end_io_wq->error = 0;
424 end_io_wq->bio = bio;
425 end_io_wq->metadata = metadata;
426
427 bio->bi_private = end_io_wq;
428 bio->bi_end_io = end_workqueue_bio;
429 return 0;
430 }
431
432 unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
433 {
434 unsigned long limit = min_t(unsigned long,
435 info->workers.max_workers,
436 info->fs_devices->open_devices);
437 return 256 * limit;
438 }
439
440 int btrfs_congested_async(struct btrfs_fs_info *info, int iodone)
441 {
442 return atomic_read(&info->nr_async_bios) >
443 btrfs_async_submit_limit(info);
444 }
445
446 static void run_one_async_submit(struct btrfs_work *work)
447 {
448 struct btrfs_fs_info *fs_info;
449 struct async_submit_bio *async;
450 int limit;
451
452 async = container_of(work, struct async_submit_bio, work);
453 fs_info = BTRFS_I(async->inode)->root->fs_info;
454
455 limit = btrfs_async_submit_limit(fs_info);
456 limit = limit * 2 / 3;
457
458 atomic_dec(&fs_info->nr_async_submits);
459
460 if (atomic_read(&fs_info->nr_async_submits) < limit &&
461 waitqueue_active(&fs_info->async_submit_wait))
462 wake_up(&fs_info->async_submit_wait);
463
464 async->submit_bio_hook(async->inode, async->rw, async->bio,
465 async->mirror_num);
466 kfree(async);
467 }
468
469 int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
470 int rw, struct bio *bio, int mirror_num,
471 extent_submit_bio_hook_t *submit_bio_hook)
472 {
473 struct async_submit_bio *async;
474 int limit = btrfs_async_submit_limit(fs_info);
475
476 async = kmalloc(sizeof(*async), GFP_NOFS);
477 if (!async)
478 return -ENOMEM;
479
480 async->inode = inode;
481 async->rw = rw;
482 async->bio = bio;
483 async->mirror_num = mirror_num;
484 async->submit_bio_hook = submit_bio_hook;
485 async->work.func = run_one_async_submit;
486 async->work.flags = 0;
487 atomic_inc(&fs_info->nr_async_submits);
488 btrfs_queue_worker(&fs_info->workers, &async->work);
489
490 if (atomic_read(&fs_info->nr_async_submits) > limit) {
491 wait_event_timeout(fs_info->async_submit_wait,
492 (atomic_read(&fs_info->nr_async_submits) < limit),
493 HZ/10);
494
495 wait_event_timeout(fs_info->async_submit_wait,
496 (atomic_read(&fs_info->nr_async_bios) < limit),
497 HZ/10);
498 }
499 return 0;
500 }
501
502 static int __btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
503 int mirror_num)
504 {
505 struct btrfs_root *root = BTRFS_I(inode)->root;
506 u64 offset;
507 int ret;
508
509 offset = bio->bi_sector << 9;
510
511 /*
512 * when we're called for a write, we're already in the async
513 * submission context. Just jump into btrfs_map_bio
514 */
515 if (rw & (1 << BIO_RW)) {
516 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
517 mirror_num, 1);
518 }
519
520 /*
521 * called for a read, do the setup so that checksum validation
522 * can happen in the async kernel threads
523 */
524 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 1);
525 BUG_ON(ret);
526
527 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
528 }
529
530 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
531 int mirror_num)
532 {
533 /*
534 * kthread helpers are used to submit writes so that checksumming
535 * can happen in parallel across all CPUs
536 */
537 if (!(rw & (1 << BIO_RW))) {
538 return __btree_submit_bio_hook(inode, rw, bio, mirror_num);
539 }
540 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
541 inode, rw, bio, mirror_num,
542 __btree_submit_bio_hook);
543 }
544
545 static int btree_writepage(struct page *page, struct writeback_control *wbc)
546 {
547 struct extent_io_tree *tree;
548 tree = &BTRFS_I(page->mapping->host)->io_tree;
549
550 if (current->flags & PF_MEMALLOC) {
551 redirty_page_for_writepage(wbc, page);
552 unlock_page(page);
553 return 0;
554 }
555 return extent_write_full_page(tree, page, btree_get_extent, wbc);
556 }
557
558 static int btree_writepages(struct address_space *mapping,
559 struct writeback_control *wbc)
560 {
561 struct extent_io_tree *tree;
562 tree = &BTRFS_I(mapping->host)->io_tree;
563 if (wbc->sync_mode == WB_SYNC_NONE) {
564 u64 num_dirty;
565 u64 start = 0;
566 unsigned long thresh = 8 * 1024 * 1024;
567
568 if (wbc->for_kupdate)
569 return 0;
570
571 num_dirty = count_range_bits(tree, &start, (u64)-1,
572 thresh, EXTENT_DIRTY);
573 if (num_dirty < thresh) {
574 return 0;
575 }
576 }
577 return extent_writepages(tree, mapping, btree_get_extent, wbc);
578 }
579
580 int btree_readpage(struct file *file, struct page *page)
581 {
582 struct extent_io_tree *tree;
583 tree = &BTRFS_I(page->mapping->host)->io_tree;
584 return extent_read_full_page(tree, page, btree_get_extent);
585 }
586
587 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
588 {
589 struct extent_io_tree *tree;
590 struct extent_map_tree *map;
591 int ret;
592
593 tree = &BTRFS_I(page->mapping->host)->io_tree;
594 map = &BTRFS_I(page->mapping->host)->extent_tree;
595
596 ret = try_release_extent_state(map, tree, page, gfp_flags);
597 if (!ret) {
598 return 0;
599 }
600
601 ret = try_release_extent_buffer(tree, page);
602 if (ret == 1) {
603 ClearPagePrivate(page);
604 set_page_private(page, 0);
605 page_cache_release(page);
606 }
607
608 return ret;
609 }
610
611 static void btree_invalidatepage(struct page *page, unsigned long offset)
612 {
613 struct extent_io_tree *tree;
614 tree = &BTRFS_I(page->mapping->host)->io_tree;
615 extent_invalidatepage(tree, page, offset);
616 btree_releasepage(page, GFP_NOFS);
617 if (PagePrivate(page)) {
618 printk("warning page private not zero on page %Lu\n",
619 page_offset(page));
620 ClearPagePrivate(page);
621 set_page_private(page, 0);
622 page_cache_release(page);
623 }
624 }
625
626 #if 0
627 static int btree_writepage(struct page *page, struct writeback_control *wbc)
628 {
629 struct buffer_head *bh;
630 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
631 struct buffer_head *head;
632 if (!page_has_buffers(page)) {
633 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
634 (1 << BH_Dirty)|(1 << BH_Uptodate));
635 }
636 head = page_buffers(page);
637 bh = head;
638 do {
639 if (buffer_dirty(bh))
640 csum_tree_block(root, bh, 0);
641 bh = bh->b_this_page;
642 } while (bh != head);
643 return block_write_full_page(page, btree_get_block, wbc);
644 }
645 #endif
646
647 static struct address_space_operations btree_aops = {
648 .readpage = btree_readpage,
649 .writepage = btree_writepage,
650 .writepages = btree_writepages,
651 .releasepage = btree_releasepage,
652 .invalidatepage = btree_invalidatepage,
653 .sync_page = block_sync_page,
654 };
655
656 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
657 u64 parent_transid)
658 {
659 struct extent_buffer *buf = NULL;
660 struct inode *btree_inode = root->fs_info->btree_inode;
661 int ret = 0;
662
663 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
664 if (!buf)
665 return 0;
666 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
667 buf, 0, 0, btree_get_extent, 0);
668 free_extent_buffer(buf);
669 return ret;
670 }
671
672 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
673 u64 bytenr, u32 blocksize)
674 {
675 struct inode *btree_inode = root->fs_info->btree_inode;
676 struct extent_buffer *eb;
677 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
678 bytenr, blocksize, GFP_NOFS);
679 return eb;
680 }
681
682 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
683 u64 bytenr, u32 blocksize)
684 {
685 struct inode *btree_inode = root->fs_info->btree_inode;
686 struct extent_buffer *eb;
687
688 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
689 bytenr, blocksize, NULL, GFP_NOFS);
690 return eb;
691 }
692
693
694 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
695 u32 blocksize, u64 parent_transid)
696 {
697 struct extent_buffer *buf = NULL;
698 struct inode *btree_inode = root->fs_info->btree_inode;
699 struct extent_io_tree *io_tree;
700 int ret;
701
702 io_tree = &BTRFS_I(btree_inode)->io_tree;
703
704 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
705 if (!buf)
706 return NULL;
707
708 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
709
710 if (ret == 0) {
711 buf->flags |= EXTENT_UPTODATE;
712 }
713 return buf;
714
715 }
716
717 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
718 struct extent_buffer *buf)
719 {
720 struct inode *btree_inode = root->fs_info->btree_inode;
721 if (btrfs_header_generation(buf) ==
722 root->fs_info->running_transaction->transid) {
723 WARN_ON(!btrfs_tree_locked(buf));
724 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
725 buf);
726 }
727 return 0;
728 }
729
730 int wait_on_tree_block_writeback(struct btrfs_root *root,
731 struct extent_buffer *buf)
732 {
733 struct inode *btree_inode = root->fs_info->btree_inode;
734 wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->io_tree,
735 buf);
736 return 0;
737 }
738
739 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
740 u32 stripesize, struct btrfs_root *root,
741 struct btrfs_fs_info *fs_info,
742 u64 objectid)
743 {
744 root->node = NULL;
745 root->inode = NULL;
746 root->commit_root = NULL;
747 root->ref_tree = NULL;
748 root->sectorsize = sectorsize;
749 root->nodesize = nodesize;
750 root->leafsize = leafsize;
751 root->stripesize = stripesize;
752 root->ref_cows = 0;
753 root->track_dirty = 0;
754
755 root->fs_info = fs_info;
756 root->objectid = objectid;
757 root->last_trans = 0;
758 root->highest_inode = 0;
759 root->last_inode_alloc = 0;
760 root->name = NULL;
761 root->in_sysfs = 0;
762
763 INIT_LIST_HEAD(&root->dirty_list);
764 INIT_LIST_HEAD(&root->orphan_list);
765 INIT_LIST_HEAD(&root->dead_list);
766 spin_lock_init(&root->node_lock);
767 spin_lock_init(&root->list_lock);
768 mutex_init(&root->objectid_mutex);
769
770 btrfs_leaf_ref_tree_init(&root->ref_tree_struct);
771 root->ref_tree = &root->ref_tree_struct;
772
773 memset(&root->root_key, 0, sizeof(root->root_key));
774 memset(&root->root_item, 0, sizeof(root->root_item));
775 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
776 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
777 root->defrag_trans_start = fs_info->generation;
778 init_completion(&root->kobj_unregister);
779 root->defrag_running = 0;
780 root->defrag_level = 0;
781 root->root_key.objectid = objectid;
782 return 0;
783 }
784
785 static int find_and_setup_root(struct btrfs_root *tree_root,
786 struct btrfs_fs_info *fs_info,
787 u64 objectid,
788 struct btrfs_root *root)
789 {
790 int ret;
791 u32 blocksize;
792
793 __setup_root(tree_root->nodesize, tree_root->leafsize,
794 tree_root->sectorsize, tree_root->stripesize,
795 root, fs_info, objectid);
796 ret = btrfs_find_last_root(tree_root, objectid,
797 &root->root_item, &root->root_key);
798 BUG_ON(ret);
799
800 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
801 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
802 blocksize, 0);
803 BUG_ON(!root->node);
804 return 0;
805 }
806
807 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_fs_info *fs_info,
808 struct btrfs_key *location)
809 {
810 struct btrfs_root *root;
811 struct btrfs_root *tree_root = fs_info->tree_root;
812 struct btrfs_path *path;
813 struct extent_buffer *l;
814 u64 highest_inode;
815 u32 blocksize;
816 int ret = 0;
817
818 root = kzalloc(sizeof(*root), GFP_NOFS);
819 if (!root)
820 return ERR_PTR(-ENOMEM);
821 if (location->offset == (u64)-1) {
822 ret = find_and_setup_root(tree_root, fs_info,
823 location->objectid, root);
824 if (ret) {
825 kfree(root);
826 return ERR_PTR(ret);
827 }
828 goto insert;
829 }
830
831 __setup_root(tree_root->nodesize, tree_root->leafsize,
832 tree_root->sectorsize, tree_root->stripesize,
833 root, fs_info, location->objectid);
834
835 path = btrfs_alloc_path();
836 BUG_ON(!path);
837 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
838 if (ret != 0) {
839 if (ret > 0)
840 ret = -ENOENT;
841 goto out;
842 }
843 l = path->nodes[0];
844 read_extent_buffer(l, &root->root_item,
845 btrfs_item_ptr_offset(l, path->slots[0]),
846 sizeof(root->root_item));
847 memcpy(&root->root_key, location, sizeof(*location));
848 ret = 0;
849 out:
850 btrfs_release_path(root, path);
851 btrfs_free_path(path);
852 if (ret) {
853 kfree(root);
854 return ERR_PTR(ret);
855 }
856 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
857 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
858 blocksize, 0);
859 BUG_ON(!root->node);
860 insert:
861 root->ref_cows = 1;
862 ret = btrfs_find_highest_inode(root, &highest_inode);
863 if (ret == 0) {
864 root->highest_inode = highest_inode;
865 root->last_inode_alloc = highest_inode;
866 }
867 return root;
868 }
869
870 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
871 u64 root_objectid)
872 {
873 struct btrfs_root *root;
874
875 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
876 return fs_info->tree_root;
877 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
878 return fs_info->extent_root;
879
880 root = radix_tree_lookup(&fs_info->fs_roots_radix,
881 (unsigned long)root_objectid);
882 return root;
883 }
884
885 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
886 struct btrfs_key *location)
887 {
888 struct btrfs_root *root;
889 int ret;
890
891 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
892 return fs_info->tree_root;
893 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
894 return fs_info->extent_root;
895 if (location->objectid == BTRFS_CHUNK_TREE_OBJECTID)
896 return fs_info->chunk_root;
897 if (location->objectid == BTRFS_DEV_TREE_OBJECTID)
898 return fs_info->dev_root;
899
900 root = radix_tree_lookup(&fs_info->fs_roots_radix,
901 (unsigned long)location->objectid);
902 if (root)
903 return root;
904
905 root = btrfs_read_fs_root_no_radix(fs_info, location);
906 if (IS_ERR(root))
907 return root;
908 ret = radix_tree_insert(&fs_info->fs_roots_radix,
909 (unsigned long)root->root_key.objectid,
910 root);
911 if (ret) {
912 free_extent_buffer(root->node);
913 kfree(root);
914 return ERR_PTR(ret);
915 }
916 ret = btrfs_find_dead_roots(fs_info->tree_root,
917 root->root_key.objectid, root);
918 BUG_ON(ret);
919
920 return root;
921 }
922
923 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
924 struct btrfs_key *location,
925 const char *name, int namelen)
926 {
927 struct btrfs_root *root;
928 int ret;
929
930 root = btrfs_read_fs_root_no_name(fs_info, location);
931 if (!root)
932 return NULL;
933
934 if (root->in_sysfs)
935 return root;
936
937 ret = btrfs_set_root_name(root, name, namelen);
938 if (ret) {
939 free_extent_buffer(root->node);
940 kfree(root);
941 return ERR_PTR(ret);
942 }
943
944 ret = btrfs_sysfs_add_root(root);
945 if (ret) {
946 free_extent_buffer(root->node);
947 kfree(root->name);
948 kfree(root);
949 return ERR_PTR(ret);
950 }
951 root->in_sysfs = 1;
952 return root;
953 }
954 #if 0
955 static int add_hasher(struct btrfs_fs_info *info, char *type) {
956 struct btrfs_hasher *hasher;
957
958 hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
959 if (!hasher)
960 return -ENOMEM;
961 hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
962 if (!hasher->hash_tfm) {
963 kfree(hasher);
964 return -EINVAL;
965 }
966 spin_lock(&info->hash_lock);
967 list_add(&hasher->list, &info->hashers);
968 spin_unlock(&info->hash_lock);
969 return 0;
970 }
971 #endif
972
973 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
974 {
975 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
976 int ret = 0;
977 struct list_head *cur;
978 struct btrfs_device *device;
979 struct backing_dev_info *bdi;
980
981 if ((bdi_bits & (1 << BDI_write_congested)) &&
982 btrfs_congested_async(info, 0))
983 return 1;
984
985 list_for_each(cur, &info->fs_devices->devices) {
986 device = list_entry(cur, struct btrfs_device, dev_list);
987 if (!device->bdev)
988 continue;
989 bdi = blk_get_backing_dev_info(device->bdev);
990 if (bdi && bdi_congested(bdi, bdi_bits)) {
991 ret = 1;
992 break;
993 }
994 }
995 return ret;
996 }
997
998 /*
999 * this unplugs every device on the box, and it is only used when page
1000 * is null
1001 */
1002 static void __unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1003 {
1004 struct list_head *cur;
1005 struct btrfs_device *device;
1006 struct btrfs_fs_info *info;
1007
1008 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
1009 list_for_each(cur, &info->fs_devices->devices) {
1010 device = list_entry(cur, struct btrfs_device, dev_list);
1011 bdi = blk_get_backing_dev_info(device->bdev);
1012 if (bdi->unplug_io_fn) {
1013 bdi->unplug_io_fn(bdi, page);
1014 }
1015 }
1016 }
1017
1018 void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1019 {
1020 struct inode *inode;
1021 struct extent_map_tree *em_tree;
1022 struct extent_map *em;
1023 struct address_space *mapping;
1024 u64 offset;
1025
1026 /* the generic O_DIRECT read code does this */
1027 if (!page) {
1028 __unplug_io_fn(bdi, page);
1029 return;
1030 }
1031
1032 /*
1033 * page->mapping may change at any time. Get a consistent copy
1034 * and use that for everything below
1035 */
1036 smp_mb();
1037 mapping = page->mapping;
1038 if (!mapping)
1039 return;
1040
1041 inode = mapping->host;
1042 offset = page_offset(page);
1043
1044 em_tree = &BTRFS_I(inode)->extent_tree;
1045 spin_lock(&em_tree->lock);
1046 em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE);
1047 spin_unlock(&em_tree->lock);
1048 if (!em) {
1049 __unplug_io_fn(bdi, page);
1050 return;
1051 }
1052
1053 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
1054 free_extent_map(em);
1055 __unplug_io_fn(bdi, page);
1056 return;
1057 }
1058 offset = offset - em->start;
1059 btrfs_unplug_page(&BTRFS_I(inode)->root->fs_info->mapping_tree,
1060 em->block_start + offset, page);
1061 free_extent_map(em);
1062 }
1063
1064 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
1065 {
1066 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1067 bdi_init(bdi);
1068 #endif
1069 bdi->ra_pages = default_backing_dev_info.ra_pages;
1070 bdi->state = 0;
1071 bdi->capabilities = default_backing_dev_info.capabilities;
1072 bdi->unplug_io_fn = btrfs_unplug_io_fn;
1073 bdi->unplug_io_data = info;
1074 bdi->congested_fn = btrfs_congested_fn;
1075 bdi->congested_data = info;
1076 return 0;
1077 }
1078
1079 static int bio_ready_for_csum(struct bio *bio)
1080 {
1081 u64 length = 0;
1082 u64 buf_len = 0;
1083 u64 start = 0;
1084 struct page *page;
1085 struct extent_io_tree *io_tree = NULL;
1086 struct btrfs_fs_info *info = NULL;
1087 struct bio_vec *bvec;
1088 int i;
1089 int ret;
1090
1091 bio_for_each_segment(bvec, bio, i) {
1092 page = bvec->bv_page;
1093 if (page->private == EXTENT_PAGE_PRIVATE) {
1094 length += bvec->bv_len;
1095 continue;
1096 }
1097 if (!page->private) {
1098 length += bvec->bv_len;
1099 continue;
1100 }
1101 length = bvec->bv_len;
1102 buf_len = page->private >> 2;
1103 start = page_offset(page) + bvec->bv_offset;
1104 io_tree = &BTRFS_I(page->mapping->host)->io_tree;
1105 info = BTRFS_I(page->mapping->host)->root->fs_info;
1106 }
1107 /* are we fully contained in this bio? */
1108 if (buf_len <= length)
1109 return 1;
1110
1111 ret = extent_range_uptodate(io_tree, start + length,
1112 start + buf_len - 1);
1113 if (ret == 1)
1114 return ret;
1115 return ret;
1116 }
1117
1118 /*
1119 * called by the kthread helper functions to finally call the bio end_io
1120 * functions. This is where read checksum verification actually happens
1121 */
1122 static void end_workqueue_fn(struct btrfs_work *work)
1123 {
1124 struct bio *bio;
1125 struct end_io_wq *end_io_wq;
1126 struct btrfs_fs_info *fs_info;
1127 int error;
1128
1129 end_io_wq = container_of(work, struct end_io_wq, work);
1130 bio = end_io_wq->bio;
1131 fs_info = end_io_wq->info;
1132
1133 /* metadata bios are special because the whole tree block must
1134 * be checksummed at once. This makes sure the entire block is in
1135 * ram and up to date before trying to verify things. For
1136 * blocksize <= pagesize, it is basically a noop
1137 */
1138 if (end_io_wq->metadata && !bio_ready_for_csum(bio)) {
1139 btrfs_queue_worker(&fs_info->endio_workers,
1140 &end_io_wq->work);
1141 return;
1142 }
1143 error = end_io_wq->error;
1144 bio->bi_private = end_io_wq->private;
1145 bio->bi_end_io = end_io_wq->end_io;
1146 kfree(end_io_wq);
1147 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1148 bio_endio(bio, bio->bi_size, error);
1149 #else
1150 bio_endio(bio, error);
1151 #endif
1152 }
1153
1154 static int cleaner_kthread(void *arg)
1155 {
1156 struct btrfs_root *root = arg;
1157
1158 do {
1159 smp_mb();
1160 if (root->fs_info->closing)
1161 break;
1162
1163 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1164 mutex_lock(&root->fs_info->cleaner_mutex);
1165 btrfs_clean_old_snapshots(root);
1166 mutex_unlock(&root->fs_info->cleaner_mutex);
1167
1168 if (freezing(current)) {
1169 refrigerator();
1170 } else {
1171 smp_mb();
1172 if (root->fs_info->closing)
1173 break;
1174 set_current_state(TASK_INTERRUPTIBLE);
1175 schedule();
1176 __set_current_state(TASK_RUNNING);
1177 }
1178 } while (!kthread_should_stop());
1179 return 0;
1180 }
1181
1182 static int transaction_kthread(void *arg)
1183 {
1184 struct btrfs_root *root = arg;
1185 struct btrfs_trans_handle *trans;
1186 struct btrfs_transaction *cur;
1187 unsigned long now;
1188 unsigned long delay;
1189 int ret;
1190
1191 do {
1192 smp_mb();
1193 if (root->fs_info->closing)
1194 break;
1195
1196 delay = HZ * 30;
1197 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1198 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1199
1200 if (root->fs_info->total_ref_cache_size > 20 * 1024 * 1024) {
1201 printk("btrfs: total reference cache size %Lu\n",
1202 root->fs_info->total_ref_cache_size);
1203 }
1204
1205 mutex_lock(&root->fs_info->trans_mutex);
1206 cur = root->fs_info->running_transaction;
1207 if (!cur) {
1208 mutex_unlock(&root->fs_info->trans_mutex);
1209 goto sleep;
1210 }
1211
1212 now = get_seconds();
1213 if (now < cur->start_time || now - cur->start_time < 30) {
1214 mutex_unlock(&root->fs_info->trans_mutex);
1215 delay = HZ * 5;
1216 goto sleep;
1217 }
1218 mutex_unlock(&root->fs_info->trans_mutex);
1219 trans = btrfs_start_transaction(root, 1);
1220 ret = btrfs_commit_transaction(trans, root);
1221 sleep:
1222 wake_up_process(root->fs_info->cleaner_kthread);
1223 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
1224
1225 if (freezing(current)) {
1226 refrigerator();
1227 } else {
1228 if (root->fs_info->closing)
1229 break;
1230 set_current_state(TASK_INTERRUPTIBLE);
1231 schedule_timeout(delay);
1232 __set_current_state(TASK_RUNNING);
1233 }
1234 } while (!kthread_should_stop());
1235 return 0;
1236 }
1237
1238 struct btrfs_root *open_ctree(struct super_block *sb,
1239 struct btrfs_fs_devices *fs_devices,
1240 char *options)
1241 {
1242 u32 sectorsize;
1243 u32 nodesize;
1244 u32 leafsize;
1245 u32 blocksize;
1246 u32 stripesize;
1247 struct buffer_head *bh;
1248 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
1249 GFP_NOFS);
1250 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
1251 GFP_NOFS);
1252 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1253 GFP_NOFS);
1254 struct btrfs_root *chunk_root = kmalloc(sizeof(struct btrfs_root),
1255 GFP_NOFS);
1256 struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
1257 GFP_NOFS);
1258 int ret;
1259 int err = -EINVAL;
1260
1261 struct btrfs_super_block *disk_super;
1262
1263 if (!extent_root || !tree_root || !fs_info) {
1264 err = -ENOMEM;
1265 goto fail;
1266 }
1267 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
1268 INIT_LIST_HEAD(&fs_info->trans_list);
1269 INIT_LIST_HEAD(&fs_info->dead_roots);
1270 INIT_LIST_HEAD(&fs_info->hashers);
1271 INIT_LIST_HEAD(&fs_info->delalloc_inodes);
1272 spin_lock_init(&fs_info->hash_lock);
1273 spin_lock_init(&fs_info->delalloc_lock);
1274 spin_lock_init(&fs_info->new_trans_lock);
1275 spin_lock_init(&fs_info->ref_cache_lock);
1276
1277 init_completion(&fs_info->kobj_unregister);
1278 fs_info->tree_root = tree_root;
1279 fs_info->extent_root = extent_root;
1280 fs_info->chunk_root = chunk_root;
1281 fs_info->dev_root = dev_root;
1282 fs_info->fs_devices = fs_devices;
1283 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1284 INIT_LIST_HEAD(&fs_info->space_info);
1285 btrfs_mapping_init(&fs_info->mapping_tree);
1286 atomic_set(&fs_info->nr_async_submits, 0);
1287 atomic_set(&fs_info->nr_async_bios, 0);
1288 atomic_set(&fs_info->throttles, 0);
1289 atomic_set(&fs_info->throttle_gen, 0);
1290 fs_info->sb = sb;
1291 fs_info->max_extent = (u64)-1;
1292 fs_info->max_inline = 8192 * 1024;
1293 setup_bdi(fs_info, &fs_info->bdi);
1294 fs_info->btree_inode = new_inode(sb);
1295 fs_info->btree_inode->i_ino = 1;
1296 fs_info->btree_inode->i_nlink = 1;
1297 fs_info->thread_pool_size = min(num_online_cpus() + 2, 8);
1298
1299 INIT_LIST_HEAD(&fs_info->ordered_extents);
1300 spin_lock_init(&fs_info->ordered_extent_lock);
1301
1302 sb->s_blocksize = 4096;
1303 sb->s_blocksize_bits = blksize_bits(4096);
1304
1305 /*
1306 * we set the i_size on the btree inode to the max possible int.
1307 * the real end of the address space is determined by all of
1308 * the devices in the system
1309 */
1310 fs_info->btree_inode->i_size = OFFSET_MAX;
1311 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
1312 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
1313
1314 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
1315 fs_info->btree_inode->i_mapping,
1316 GFP_NOFS);
1317 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
1318 GFP_NOFS);
1319
1320 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
1321
1322 extent_io_tree_init(&fs_info->free_space_cache,
1323 fs_info->btree_inode->i_mapping, GFP_NOFS);
1324 extent_io_tree_init(&fs_info->block_group_cache,
1325 fs_info->btree_inode->i_mapping, GFP_NOFS);
1326 extent_io_tree_init(&fs_info->pinned_extents,
1327 fs_info->btree_inode->i_mapping, GFP_NOFS);
1328 extent_io_tree_init(&fs_info->pending_del,
1329 fs_info->btree_inode->i_mapping, GFP_NOFS);
1330 extent_io_tree_init(&fs_info->extent_ins,
1331 fs_info->btree_inode->i_mapping, GFP_NOFS);
1332 fs_info->do_barriers = 1;
1333
1334 BTRFS_I(fs_info->btree_inode)->root = tree_root;
1335 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
1336 sizeof(struct btrfs_key));
1337 insert_inode_hash(fs_info->btree_inode);
1338 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
1339
1340 mutex_init(&fs_info->trans_mutex);
1341 mutex_init(&fs_info->drop_mutex);
1342 mutex_init(&fs_info->alloc_mutex);
1343 mutex_init(&fs_info->chunk_mutex);
1344 mutex_init(&fs_info->transaction_kthread_mutex);
1345 mutex_init(&fs_info->cleaner_mutex);
1346 mutex_init(&fs_info->volume_mutex);
1347 init_waitqueue_head(&fs_info->transaction_throttle);
1348 init_waitqueue_head(&fs_info->transaction_wait);
1349 init_waitqueue_head(&fs_info->async_submit_wait);
1350
1351 #if 0
1352 ret = add_hasher(fs_info, "crc32c");
1353 if (ret) {
1354 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
1355 err = -ENOMEM;
1356 goto fail_iput;
1357 }
1358 #endif
1359 __setup_root(4096, 4096, 4096, 4096, tree_root,
1360 fs_info, BTRFS_ROOT_TREE_OBJECTID);
1361
1362
1363 bh = __bread(fs_devices->latest_bdev,
1364 BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
1365 if (!bh)
1366 goto fail_iput;
1367
1368 memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy));
1369 brelse(bh);
1370
1371 memcpy(fs_info->fsid, fs_info->super_copy.fsid, BTRFS_FSID_SIZE);
1372
1373 disk_super = &fs_info->super_copy;
1374 if (!btrfs_super_root(disk_super))
1375 goto fail_sb_buffer;
1376
1377 err = btrfs_parse_options(tree_root, options);
1378 if (err)
1379 goto fail_sb_buffer;
1380
1381 /*
1382 * we need to start all the end_io workers up front because the
1383 * queue work function gets called at interrupt time, and so it
1384 * cannot dynamically grow.
1385 */
1386 btrfs_init_workers(&fs_info->workers, "worker",
1387 fs_info->thread_pool_size);
1388 btrfs_init_workers(&fs_info->submit_workers, "submit",
1389 min_t(u64, fs_devices->num_devices,
1390 fs_info->thread_pool_size));
1391
1392 /* a higher idle thresh on the submit workers makes it much more
1393 * likely that bios will be send down in a sane order to the
1394 * devices
1395 */
1396 fs_info->submit_workers.idle_thresh = 64;
1397
1398 /* fs_info->workers is responsible for checksumming file data
1399 * blocks and metadata. Using a larger idle thresh allows each
1400 * worker thread to operate on things in roughly the order they
1401 * were sent by the writeback daemons, improving overall locality
1402 * of the IO going down the pipe.
1403 */
1404 fs_info->workers.idle_thresh = 128;
1405
1406 btrfs_init_workers(&fs_info->fixup_workers, "fixup", 1);
1407 btrfs_init_workers(&fs_info->endio_workers, "endio",
1408 fs_info->thread_pool_size);
1409 btrfs_init_workers(&fs_info->endio_write_workers, "endio-write",
1410 fs_info->thread_pool_size);
1411
1412 /*
1413 * endios are largely parallel and should have a very
1414 * low idle thresh
1415 */
1416 fs_info->endio_workers.idle_thresh = 4;
1417 fs_info->endio_write_workers.idle_thresh = 4;
1418
1419 btrfs_start_workers(&fs_info->workers, 1);
1420 btrfs_start_workers(&fs_info->submit_workers, 1);
1421 btrfs_start_workers(&fs_info->fixup_workers, 1);
1422 btrfs_start_workers(&fs_info->endio_workers, fs_info->thread_pool_size);
1423 btrfs_start_workers(&fs_info->endio_write_workers,
1424 fs_info->thread_pool_size);
1425
1426 err = -EINVAL;
1427 if (btrfs_super_num_devices(disk_super) > fs_devices->open_devices) {
1428 printk("Btrfs: wanted %llu devices, but found %llu\n",
1429 (unsigned long long)btrfs_super_num_devices(disk_super),
1430 (unsigned long long)fs_devices->open_devices);
1431 if (btrfs_test_opt(tree_root, DEGRADED))
1432 printk("continuing in degraded mode\n");
1433 else {
1434 goto fail_sb_buffer;
1435 }
1436 }
1437
1438 fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
1439
1440 nodesize = btrfs_super_nodesize(disk_super);
1441 leafsize = btrfs_super_leafsize(disk_super);
1442 sectorsize = btrfs_super_sectorsize(disk_super);
1443 stripesize = btrfs_super_stripesize(disk_super);
1444 tree_root->nodesize = nodesize;
1445 tree_root->leafsize = leafsize;
1446 tree_root->sectorsize = sectorsize;
1447 tree_root->stripesize = stripesize;
1448
1449 sb->s_blocksize = sectorsize;
1450 sb->s_blocksize_bits = blksize_bits(sectorsize);
1451
1452 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
1453 sizeof(disk_super->magic))) {
1454 printk("btrfs: valid FS not found on %s\n", sb->s_id);
1455 goto fail_sb_buffer;
1456 }
1457
1458 mutex_lock(&fs_info->chunk_mutex);
1459 ret = btrfs_read_sys_array(tree_root);
1460 mutex_unlock(&fs_info->chunk_mutex);
1461 if (ret) {
1462 printk("btrfs: failed to read the system array on %s\n",
1463 sb->s_id);
1464 goto fail_sys_array;
1465 }
1466
1467 blocksize = btrfs_level_size(tree_root,
1468 btrfs_super_chunk_root_level(disk_super));
1469
1470 __setup_root(nodesize, leafsize, sectorsize, stripesize,
1471 chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
1472
1473 chunk_root->node = read_tree_block(chunk_root,
1474 btrfs_super_chunk_root(disk_super),
1475 blocksize, 0);
1476 BUG_ON(!chunk_root->node);
1477
1478 read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid,
1479 (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
1480 BTRFS_UUID_SIZE);
1481
1482 mutex_lock(&fs_info->chunk_mutex);
1483 ret = btrfs_read_chunk_tree(chunk_root);
1484 mutex_unlock(&fs_info->chunk_mutex);
1485 BUG_ON(ret);
1486
1487 btrfs_close_extra_devices(fs_devices);
1488
1489 blocksize = btrfs_level_size(tree_root,
1490 btrfs_super_root_level(disk_super));
1491
1492
1493 tree_root->node = read_tree_block(tree_root,
1494 btrfs_super_root(disk_super),
1495 blocksize, 0);
1496 if (!tree_root->node)
1497 goto fail_sb_buffer;
1498
1499
1500 ret = find_and_setup_root(tree_root, fs_info,
1501 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
1502 if (ret)
1503 goto fail_tree_root;
1504 extent_root->track_dirty = 1;
1505
1506 ret = find_and_setup_root(tree_root, fs_info,
1507 BTRFS_DEV_TREE_OBJECTID, dev_root);
1508 dev_root->track_dirty = 1;
1509
1510 if (ret)
1511 goto fail_extent_root;
1512
1513 btrfs_read_block_groups(extent_root);
1514
1515 fs_info->generation = btrfs_super_generation(disk_super) + 1;
1516 fs_info->data_alloc_profile = (u64)-1;
1517 fs_info->metadata_alloc_profile = (u64)-1;
1518 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
1519 fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
1520 "btrfs-cleaner");
1521 if (!fs_info->cleaner_kthread)
1522 goto fail_extent_root;
1523
1524 fs_info->transaction_kthread = kthread_run(transaction_kthread,
1525 tree_root,
1526 "btrfs-transaction");
1527 if (!fs_info->transaction_kthread)
1528 goto fail_cleaner;
1529
1530
1531 return tree_root;
1532
1533 fail_cleaner:
1534 kthread_stop(fs_info->cleaner_kthread);
1535 fail_extent_root:
1536 free_extent_buffer(extent_root->node);
1537 fail_tree_root:
1538 free_extent_buffer(tree_root->node);
1539 fail_sys_array:
1540 fail_sb_buffer:
1541 btrfs_stop_workers(&fs_info->fixup_workers);
1542 btrfs_stop_workers(&fs_info->workers);
1543 btrfs_stop_workers(&fs_info->endio_workers);
1544 btrfs_stop_workers(&fs_info->endio_write_workers);
1545 btrfs_stop_workers(&fs_info->submit_workers);
1546 fail_iput:
1547 iput(fs_info->btree_inode);
1548 fail:
1549 btrfs_close_devices(fs_info->fs_devices);
1550 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1551
1552 kfree(extent_root);
1553 kfree(tree_root);
1554 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1555 bdi_destroy(&fs_info->bdi);
1556 #endif
1557 kfree(fs_info);
1558 return ERR_PTR(err);
1559 }
1560
1561 static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
1562 {
1563 char b[BDEVNAME_SIZE];
1564
1565 if (uptodate) {
1566 set_buffer_uptodate(bh);
1567 } else {
1568 if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
1569 printk(KERN_WARNING "lost page write due to "
1570 "I/O error on %s\n",
1571 bdevname(bh->b_bdev, b));
1572 }
1573 /* note, we dont' set_buffer_write_io_error because we have
1574 * our own ways of dealing with the IO errors
1575 */
1576 clear_buffer_uptodate(bh);
1577 }
1578 unlock_buffer(bh);
1579 put_bh(bh);
1580 }
1581
1582 int write_all_supers(struct btrfs_root *root)
1583 {
1584 struct list_head *cur;
1585 struct list_head *head = &root->fs_info->fs_devices->devices;
1586 struct btrfs_device *dev;
1587 struct btrfs_super_block *sb;
1588 struct btrfs_dev_item *dev_item;
1589 struct buffer_head *bh;
1590 int ret;
1591 int do_barriers;
1592 int max_errors;
1593 int total_errors = 0;
1594 u32 crc;
1595 u64 flags;
1596
1597 max_errors = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
1598 do_barriers = !btrfs_test_opt(root, NOBARRIER);
1599
1600 sb = &root->fs_info->super_for_commit;
1601 dev_item = &sb->dev_item;
1602 list_for_each(cur, head) {
1603 dev = list_entry(cur, struct btrfs_device, dev_list);
1604 if (!dev->bdev) {
1605 total_errors++;
1606 continue;
1607 }
1608 if (!dev->in_fs_metadata)
1609 continue;
1610
1611 btrfs_set_stack_device_type(dev_item, dev->type);
1612 btrfs_set_stack_device_id(dev_item, dev->devid);
1613 btrfs_set_stack_device_total_bytes(dev_item, dev->total_bytes);
1614 btrfs_set_stack_device_bytes_used(dev_item, dev->bytes_used);
1615 btrfs_set_stack_device_io_align(dev_item, dev->io_align);
1616 btrfs_set_stack_device_io_width(dev_item, dev->io_width);
1617 btrfs_set_stack_device_sector_size(dev_item, dev->sector_size);
1618 memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE);
1619 flags = btrfs_super_flags(sb);
1620 btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN);
1621
1622
1623 crc = ~(u32)0;
1624 crc = btrfs_csum_data(root, (char *)sb + BTRFS_CSUM_SIZE, crc,
1625 BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE);
1626 btrfs_csum_final(crc, sb->csum);
1627
1628 bh = __getblk(dev->bdev, BTRFS_SUPER_INFO_OFFSET / 4096,
1629 BTRFS_SUPER_INFO_SIZE);
1630
1631 memcpy(bh->b_data, sb, BTRFS_SUPER_INFO_SIZE);
1632 dev->pending_io = bh;
1633
1634 get_bh(bh);
1635 set_buffer_uptodate(bh);
1636 lock_buffer(bh);
1637 bh->b_end_io = btrfs_end_buffer_write_sync;
1638
1639 if (do_barriers && dev->barriers) {
1640 ret = submit_bh(WRITE_BARRIER, bh);
1641 if (ret == -EOPNOTSUPP) {
1642 printk("btrfs: disabling barriers on dev %s\n",
1643 dev->name);
1644 set_buffer_uptodate(bh);
1645 dev->barriers = 0;
1646 get_bh(bh);
1647 lock_buffer(bh);
1648 ret = submit_bh(WRITE, bh);
1649 }
1650 } else {
1651 ret = submit_bh(WRITE, bh);
1652 }
1653 if (ret)
1654 total_errors++;
1655 }
1656 if (total_errors > max_errors) {
1657 printk("btrfs: %d errors while writing supers\n", total_errors);
1658 BUG();
1659 }
1660 total_errors = 0;
1661
1662 list_for_each(cur, head) {
1663 dev = list_entry(cur, struct btrfs_device, dev_list);
1664 if (!dev->bdev)
1665 continue;
1666 if (!dev->in_fs_metadata)
1667 continue;
1668
1669 BUG_ON(!dev->pending_io);
1670 bh = dev->pending_io;
1671 wait_on_buffer(bh);
1672 if (!buffer_uptodate(dev->pending_io)) {
1673 if (do_barriers && dev->barriers) {
1674 printk("btrfs: disabling barriers on dev %s\n",
1675 dev->name);
1676 set_buffer_uptodate(bh);
1677 get_bh(bh);
1678 lock_buffer(bh);
1679 dev->barriers = 0;
1680 ret = submit_bh(WRITE, bh);
1681 BUG_ON(ret);
1682 wait_on_buffer(bh);
1683 if (!buffer_uptodate(bh))
1684 total_errors++;
1685 } else {
1686 total_errors++;
1687 }
1688
1689 }
1690 dev->pending_io = NULL;
1691 brelse(bh);
1692 }
1693 if (total_errors > max_errors) {
1694 printk("btrfs: %d errors while writing supers\n", total_errors);
1695 BUG();
1696 }
1697 return 0;
1698 }
1699
1700 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
1701 *root)
1702 {
1703 int ret;
1704
1705 ret = write_all_supers(root);
1706 return ret;
1707 }
1708
1709 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
1710 {
1711 radix_tree_delete(&fs_info->fs_roots_radix,
1712 (unsigned long)root->root_key.objectid);
1713 if (root->in_sysfs)
1714 btrfs_sysfs_del_root(root);
1715 if (root->inode)
1716 iput(root->inode);
1717 if (root->node)
1718 free_extent_buffer(root->node);
1719 if (root->commit_root)
1720 free_extent_buffer(root->commit_root);
1721 if (root->name)
1722 kfree(root->name);
1723 kfree(root);
1724 return 0;
1725 }
1726
1727 static int del_fs_roots(struct btrfs_fs_info *fs_info)
1728 {
1729 int ret;
1730 struct btrfs_root *gang[8];
1731 int i;
1732
1733 while(1) {
1734 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
1735 (void **)gang, 0,
1736 ARRAY_SIZE(gang));
1737 if (!ret)
1738 break;
1739 for (i = 0; i < ret; i++)
1740 btrfs_free_fs_root(fs_info, gang[i]);
1741 }
1742 return 0;
1743 }
1744
1745 int close_ctree(struct btrfs_root *root)
1746 {
1747 int ret;
1748 struct btrfs_trans_handle *trans;
1749 struct btrfs_fs_info *fs_info = root->fs_info;
1750
1751 fs_info->closing = 1;
1752 smp_mb();
1753
1754 kthread_stop(root->fs_info->transaction_kthread);
1755 kthread_stop(root->fs_info->cleaner_kthread);
1756
1757 btrfs_clean_old_snapshots(root);
1758 trans = btrfs_start_transaction(root, 1);
1759 ret = btrfs_commit_transaction(trans, root);
1760 /* run commit again to drop the original snapshot */
1761 trans = btrfs_start_transaction(root, 1);
1762 btrfs_commit_transaction(trans, root);
1763 ret = btrfs_write_and_wait_transaction(NULL, root);
1764 BUG_ON(ret);
1765
1766 write_ctree_super(NULL, root);
1767
1768 if (fs_info->delalloc_bytes) {
1769 printk("btrfs: at unmount delalloc count %Lu\n",
1770 fs_info->delalloc_bytes);
1771 }
1772 if (fs_info->total_ref_cache_size) {
1773 printk("btrfs: at umount reference cache size %Lu\n",
1774 fs_info->total_ref_cache_size);
1775 }
1776
1777 if (fs_info->extent_root->node)
1778 free_extent_buffer(fs_info->extent_root->node);
1779
1780 if (fs_info->tree_root->node)
1781 free_extent_buffer(fs_info->tree_root->node);
1782
1783 if (root->fs_info->chunk_root->node);
1784 free_extent_buffer(root->fs_info->chunk_root->node);
1785
1786 if (root->fs_info->dev_root->node);
1787 free_extent_buffer(root->fs_info->dev_root->node);
1788
1789 btrfs_free_block_groups(root->fs_info);
1790 fs_info->closing = 2;
1791 del_fs_roots(fs_info);
1792
1793 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1794
1795 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1796
1797 btrfs_stop_workers(&fs_info->fixup_workers);
1798 btrfs_stop_workers(&fs_info->workers);
1799 btrfs_stop_workers(&fs_info->endio_workers);
1800 btrfs_stop_workers(&fs_info->endio_write_workers);
1801 btrfs_stop_workers(&fs_info->submit_workers);
1802
1803 iput(fs_info->btree_inode);
1804 #if 0
1805 while(!list_empty(&fs_info->hashers)) {
1806 struct btrfs_hasher *hasher;
1807 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
1808 hashers);
1809 list_del(&hasher->hashers);
1810 crypto_free_hash(&fs_info->hash_tfm);
1811 kfree(hasher);
1812 }
1813 #endif
1814 btrfs_close_devices(fs_info->fs_devices);
1815 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1816
1817 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1818 bdi_destroy(&fs_info->bdi);
1819 #endif
1820
1821 kfree(fs_info->extent_root);
1822 kfree(fs_info->tree_root);
1823 kfree(fs_info->chunk_root);
1824 kfree(fs_info->dev_root);
1825 return 0;
1826 }
1827
1828 int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid)
1829 {
1830 int ret;
1831 struct inode *btree_inode = buf->first_page->mapping->host;
1832
1833 ret = extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
1834 if (!ret)
1835 return ret;
1836
1837 ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf,
1838 parent_transid);
1839 return !ret;
1840 }
1841
1842 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
1843 {
1844 struct inode *btree_inode = buf->first_page->mapping->host;
1845 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
1846 buf);
1847 }
1848
1849 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
1850 {
1851 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1852 u64 transid = btrfs_header_generation(buf);
1853 struct inode *btree_inode = root->fs_info->btree_inode;
1854
1855 WARN_ON(!btrfs_tree_locked(buf));
1856 if (transid != root->fs_info->generation) {
1857 printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
1858 (unsigned long long)buf->start,
1859 transid, root->fs_info->generation);
1860 WARN_ON(1);
1861 }
1862 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
1863 }
1864
1865 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1866 {
1867 /*
1868 * looks as though older kernels can get into trouble with
1869 * this code, they end up stuck in balance_dirty_pages forever
1870 */
1871 struct extent_io_tree *tree;
1872 u64 num_dirty;
1873 u64 start = 0;
1874 unsigned long thresh = 96 * 1024 * 1024;
1875 tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
1876
1877 if (current_is_pdflush() || current->flags & PF_MEMALLOC)
1878 return;
1879
1880 num_dirty = count_range_bits(tree, &start, (u64)-1,
1881 thresh, EXTENT_DIRTY);
1882 if (num_dirty > thresh) {
1883 balance_dirty_pages_ratelimited_nr(
1884 root->fs_info->btree_inode->i_mapping, 1);
1885 }
1886 return;
1887 }
1888
1889 int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid)
1890 {
1891 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1892 int ret;
1893 ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
1894 if (ret == 0) {
1895 buf->flags |= EXTENT_UPTODATE;
1896 }
1897 return ret;
1898 }
1899
1900 static struct extent_io_ops btree_extent_io_ops = {
1901 .writepage_io_hook = btree_writepage_io_hook,
1902 .readpage_end_io_hook = btree_readpage_end_io_hook,
1903 .submit_bio_hook = btree_submit_bio_hook,
1904 /* note we're sharing with inode.c for the merge bio hook */
1905 .merge_bio_hook = btrfs_merge_bio_hook,
1906 };
This page took 0.124697 seconds and 5 git commands to generate.