42522232fde4c32e70385aff6b3384ee986a1896
[deliverable/linux.git] / fs / btrfs / disk-io.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/fs.h>
20 #include <linux/blkdev.h>
21 #include <linux/crc32c.h>
22 #include <linux/scatterlist.h>
23 #include <linux/swap.h>
24 #include <linux/radix-tree.h>
25 #include <linux/writeback.h>
26 #include <linux/buffer_head.h> // for block_sync_page
27 #include "ctree.h"
28 #include "disk-io.h"
29 #include "transaction.h"
30 #include "btrfs_inode.h"
31 #include "volumes.h"
32 #include "print-tree.h"
33
34 #if 0
35 static int check_tree_block(struct btrfs_root *root, struct extent_buffer *buf)
36 {
37 if (extent_buffer_blocknr(buf) != btrfs_header_blocknr(buf)) {
38 printk(KERN_CRIT "buf blocknr(buf) is %llu, header is %llu\n",
39 (unsigned long long)extent_buffer_blocknr(buf),
40 (unsigned long long)btrfs_header_blocknr(buf));
41 return 1;
42 }
43 return 0;
44 }
45 #endif
46
47 static struct extent_io_ops btree_extent_io_ops;
48
49 struct extent_map *btree_get_extent(struct inode *inode, struct page *page,
50 size_t page_offset, u64 start, u64 len,
51 int create)
52 {
53 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
54 struct extent_map *em;
55 int ret;
56
57 again:
58 spin_lock(&em_tree->lock);
59 em = lookup_extent_mapping(em_tree, start, len);
60 spin_unlock(&em_tree->lock);
61 if (em) {
62 goto out;
63 }
64 em = alloc_extent_map(GFP_NOFS);
65 if (!em) {
66 em = ERR_PTR(-ENOMEM);
67 goto out;
68 }
69 em->start = 0;
70 em->len = i_size_read(inode);
71 em->block_start = 0;
72 em->bdev = inode->i_sb->s_bdev;
73
74 spin_lock(&em_tree->lock);
75 ret = add_extent_mapping(em_tree, em);
76 spin_unlock(&em_tree->lock);
77
78 if (ret == -EEXIST) {
79 free_extent_map(em);
80 em = NULL;
81 goto again;
82 } else if (ret) {
83 em = ERR_PTR(ret);
84 }
85 out:
86 return em;
87 }
88
89 u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len)
90 {
91 return crc32c(seed, data, len);
92 }
93
94 void btrfs_csum_final(u32 crc, char *result)
95 {
96 *(__le32 *)result = ~cpu_to_le32(crc);
97 }
98
99 static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
100 int verify)
101 {
102 char result[BTRFS_CRC32_SIZE];
103 unsigned long len;
104 unsigned long cur_len;
105 unsigned long offset = BTRFS_CSUM_SIZE;
106 char *map_token = NULL;
107 char *kaddr;
108 unsigned long map_start;
109 unsigned long map_len;
110 int err;
111 u32 crc = ~(u32)0;
112
113 len = buf->len - offset;
114 while(len > 0) {
115 err = map_private_extent_buffer(buf, offset, 32,
116 &map_token, &kaddr,
117 &map_start, &map_len, KM_USER0);
118 if (err) {
119 printk("failed to map extent buffer! %lu\n",
120 offset);
121 return 1;
122 }
123 cur_len = min(len, map_len - (offset - map_start));
124 crc = btrfs_csum_data(root, kaddr + offset - map_start,
125 crc, cur_len);
126 len -= cur_len;
127 offset += cur_len;
128 unmap_extent_buffer(buf, map_token, KM_USER0);
129 }
130 btrfs_csum_final(crc, result);
131
132 if (verify) {
133 int from_this_trans = 0;
134
135 if (root->fs_info->running_transaction &&
136 btrfs_header_generation(buf) ==
137 root->fs_info->running_transaction->transid)
138 from_this_trans = 1;
139
140 /* FIXME, this is not good */
141 if (memcmp_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE)) {
142 u32 val;
143 u32 found = 0;
144 memcpy(&found, result, BTRFS_CRC32_SIZE);
145
146 read_extent_buffer(buf, &val, 0, BTRFS_CRC32_SIZE);
147 WARN_ON(1);
148 printk("btrfs: %s checksum verify failed on %llu "
149 "wanted %X found %X from_this_trans %d "
150 "level %d\n",
151 root->fs_info->sb->s_id,
152 buf->start, val, found, from_this_trans,
153 btrfs_header_level(buf));
154 return 1;
155 }
156 } else {
157 write_extent_buffer(buf, result, 0, BTRFS_CRC32_SIZE);
158 }
159 return 0;
160 }
161
162
163 int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
164 {
165 struct extent_io_tree *tree;
166 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
167 u64 found_start;
168 int found_level;
169 unsigned long len;
170 struct extent_buffer *eb;
171 tree = &BTRFS_I(page->mapping->host)->io_tree;
172
173 if (page->private == EXTENT_PAGE_PRIVATE)
174 goto out;
175 if (!page->private)
176 goto out;
177 len = page->private >> 2;
178 if (len == 0) {
179 WARN_ON(1);
180 }
181 eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS);
182 read_extent_buffer_pages(tree, eb, start + PAGE_CACHE_SIZE, 1,
183 btree_get_extent);
184 btrfs_clear_buffer_defrag(eb);
185 found_start = btrfs_header_bytenr(eb);
186 if (found_start != start) {
187 printk("warning: eb start incorrect %Lu buffer %Lu len %lu\n",
188 start, found_start, len);
189 WARN_ON(1);
190 goto err;
191 }
192 if (eb->first_page != page) {
193 printk("bad first page %lu %lu\n", eb->first_page->index,
194 page->index);
195 WARN_ON(1);
196 goto err;
197 }
198 if (!PageUptodate(page)) {
199 printk("csum not up to date page %lu\n", page->index);
200 WARN_ON(1);
201 goto err;
202 }
203 found_level = btrfs_header_level(eb);
204 spin_lock(&root->fs_info->hash_lock);
205 btrfs_set_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
206 spin_unlock(&root->fs_info->hash_lock);
207 csum_tree_block(root, eb, 0);
208 err:
209 free_extent_buffer(eb);
210 out:
211 return 0;
212 }
213
214 static int btree_writepage_io_hook(struct page *page, u64 start, u64 end)
215 {
216 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
217
218 csum_dirty_buffer(root, page);
219 return 0;
220 }
221
222 static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio)
223 {
224 struct btrfs_root *root = BTRFS_I(inode)->root;
225 u64 offset;
226 offset = bio->bi_sector << 9;
227 if (offset == BTRFS_SUPER_INFO_OFFSET) {
228 bio->bi_bdev = root->fs_info->sb->s_bdev;
229 submit_bio(rw, bio);
230 return 0;
231 }
232 return btrfs_map_bio(BTRFS_I(inode)->root, rw, bio);
233 }
234
235 static int btree_writepage(struct page *page, struct writeback_control *wbc)
236 {
237 struct extent_io_tree *tree;
238 tree = &BTRFS_I(page->mapping->host)->io_tree;
239 return extent_write_full_page(tree, page, btree_get_extent, wbc);
240 }
241
242 static int btree_writepages(struct address_space *mapping,
243 struct writeback_control *wbc)
244 {
245 struct extent_io_tree *tree;
246 tree = &BTRFS_I(mapping->host)->io_tree;
247 if (wbc->sync_mode == WB_SYNC_NONE) {
248 u64 num_dirty;
249 u64 start = 0;
250 unsigned long thresh = 96 * 1024 * 1024;
251
252 if (wbc->for_kupdate)
253 return 0;
254
255 if (current_is_pdflush()) {
256 thresh = 96 * 1024 * 1024;
257 } else {
258 thresh = 8 * 1024 * 1024;
259 }
260 num_dirty = count_range_bits(tree, &start, (u64)-1,
261 thresh, EXTENT_DIRTY);
262 if (num_dirty < thresh) {
263 return 0;
264 }
265 }
266 return extent_writepages(tree, mapping, btree_get_extent, wbc);
267 }
268
269 int btree_readpage(struct file *file, struct page *page)
270 {
271 struct extent_io_tree *tree;
272 tree = &BTRFS_I(page->mapping->host)->io_tree;
273 return extent_read_full_page(tree, page, btree_get_extent);
274 }
275
276 static int btree_releasepage(struct page *page, gfp_t gfp_flags)
277 {
278 struct extent_io_tree *tree;
279 struct extent_map_tree *map;
280 int ret;
281
282 tree = &BTRFS_I(page->mapping->host)->io_tree;
283 map = &BTRFS_I(page->mapping->host)->extent_tree;
284 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
285 if (ret == 1) {
286 ClearPagePrivate(page);
287 set_page_private(page, 0);
288 page_cache_release(page);
289 }
290 return ret;
291 }
292
293 static void btree_invalidatepage(struct page *page, unsigned long offset)
294 {
295 struct extent_io_tree *tree;
296 tree = &BTRFS_I(page->mapping->host)->io_tree;
297 extent_invalidatepage(tree, page, offset);
298 btree_releasepage(page, GFP_NOFS);
299 }
300
301 #if 0
302 static int btree_writepage(struct page *page, struct writeback_control *wbc)
303 {
304 struct buffer_head *bh;
305 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
306 struct buffer_head *head;
307 if (!page_has_buffers(page)) {
308 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
309 (1 << BH_Dirty)|(1 << BH_Uptodate));
310 }
311 head = page_buffers(page);
312 bh = head;
313 do {
314 if (buffer_dirty(bh))
315 csum_tree_block(root, bh, 0);
316 bh = bh->b_this_page;
317 } while (bh != head);
318 return block_write_full_page(page, btree_get_block, wbc);
319 }
320 #endif
321
322 static struct address_space_operations btree_aops = {
323 .readpage = btree_readpage,
324 .writepage = btree_writepage,
325 .writepages = btree_writepages,
326 .releasepage = btree_releasepage,
327 .invalidatepage = btree_invalidatepage,
328 .sync_page = block_sync_page,
329 };
330
331 int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
332 {
333 struct extent_buffer *buf = NULL;
334 struct inode *btree_inode = root->fs_info->btree_inode;
335 int ret = 0;
336
337 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
338 if (!buf)
339 return 0;
340 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
341 buf, 0, 0, btree_get_extent);
342 free_extent_buffer(buf);
343 return ret;
344 }
345
346 static int close_all_devices(struct btrfs_fs_info *fs_info)
347 {
348 struct list_head *list;
349 struct list_head *next;
350 struct btrfs_device *device;
351
352 list = &fs_info->fs_devices->devices;
353 list_for_each(next, list) {
354 device = list_entry(next, struct btrfs_device, dev_list);
355 if (device->bdev && device->bdev != fs_info->sb->s_bdev)
356 close_bdev_excl(device->bdev);
357 device->bdev = NULL;
358 }
359 return 0;
360 }
361
362 int btrfs_verify_block_csum(struct btrfs_root *root,
363 struct extent_buffer *buf)
364 {
365 struct extent_io_tree *io_tree;
366 u64 end;
367 int ret;
368
369 io_tree = &BTRFS_I(root->fs_info->btree_inode)->io_tree;
370 if (buf->flags & EXTENT_CSUM)
371 return 0;
372
373 end = min_t(u64, buf->len, PAGE_CACHE_SIZE);
374 end = buf->start + end - 1;
375 if (test_range_bit(io_tree, buf->start, end, EXTENT_CSUM, 1)) {
376 buf->flags |= EXTENT_CSUM;
377 return 0;
378 }
379
380 lock_extent(io_tree, buf->start, end, GFP_NOFS);
381
382 if (test_range_bit(io_tree, buf->start, end, EXTENT_CSUM, 1)) {
383 buf->flags |= EXTENT_CSUM;
384 ret = 0;
385 goto out_unlock;
386 }
387
388 ret = csum_tree_block(root, buf, 1);
389 set_extent_bits(io_tree, buf->start, end, EXTENT_CSUM, GFP_NOFS);
390 buf->flags |= EXTENT_CSUM;
391
392 out_unlock:
393 unlock_extent(io_tree, buf->start, end, GFP_NOFS);
394 return ret;
395 }
396
397 struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
398 u64 bytenr, u32 blocksize)
399 {
400 struct inode *btree_inode = root->fs_info->btree_inode;
401 struct extent_buffer *eb;
402 eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
403 bytenr, blocksize, GFP_NOFS);
404 return eb;
405 }
406
407 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
408 u64 bytenr, u32 blocksize)
409 {
410 struct inode *btree_inode = root->fs_info->btree_inode;
411 struct extent_buffer *eb;
412
413 eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree,
414 bytenr, blocksize, NULL, GFP_NOFS);
415 return eb;
416 }
417
418
419 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
420 u32 blocksize)
421 {
422 struct extent_buffer *buf = NULL;
423 struct inode *btree_inode = root->fs_info->btree_inode;
424 struct extent_io_tree *io_tree;
425 int ret;
426
427 io_tree = &BTRFS_I(btree_inode)->io_tree;
428
429 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
430 if (!buf)
431 return NULL;
432 read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, buf, 0, 1,
433 btree_get_extent);
434
435 ret = btrfs_verify_block_csum(root, buf);
436 return buf;
437 }
438
439 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
440 struct extent_buffer *buf)
441 {
442 struct inode *btree_inode = root->fs_info->btree_inode;
443 if (btrfs_header_generation(buf) ==
444 root->fs_info->running_transaction->transid)
445 clear_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree,
446 buf);
447 return 0;
448 }
449
450 int wait_on_tree_block_writeback(struct btrfs_root *root,
451 struct extent_buffer *buf)
452 {
453 struct inode *btree_inode = root->fs_info->btree_inode;
454 wait_on_extent_buffer_writeback(&BTRFS_I(btree_inode)->io_tree,
455 buf);
456 return 0;
457 }
458
459 static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
460 u32 stripesize, struct btrfs_root *root,
461 struct btrfs_fs_info *fs_info,
462 u64 objectid)
463 {
464 root->node = NULL;
465 root->inode = NULL;
466 root->commit_root = NULL;
467 root->sectorsize = sectorsize;
468 root->nodesize = nodesize;
469 root->leafsize = leafsize;
470 root->stripesize = stripesize;
471 root->ref_cows = 0;
472 root->track_dirty = 0;
473
474 root->fs_info = fs_info;
475 root->objectid = objectid;
476 root->last_trans = 0;
477 root->highest_inode = 0;
478 root->last_inode_alloc = 0;
479 root->name = NULL;
480 root->in_sysfs = 0;
481
482 INIT_LIST_HEAD(&root->dirty_list);
483 memset(&root->root_key, 0, sizeof(root->root_key));
484 memset(&root->root_item, 0, sizeof(root->root_item));
485 memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
486 memset(&root->root_kobj, 0, sizeof(root->root_kobj));
487 init_completion(&root->kobj_unregister);
488 root->defrag_running = 0;
489 root->defrag_level = 0;
490 root->root_key.objectid = objectid;
491 return 0;
492 }
493
494 static int find_and_setup_root(struct btrfs_root *tree_root,
495 struct btrfs_fs_info *fs_info,
496 u64 objectid,
497 struct btrfs_root *root)
498 {
499 int ret;
500 u32 blocksize;
501
502 __setup_root(tree_root->nodesize, tree_root->leafsize,
503 tree_root->sectorsize, tree_root->stripesize,
504 root, fs_info, objectid);
505 ret = btrfs_find_last_root(tree_root, objectid,
506 &root->root_item, &root->root_key);
507 BUG_ON(ret);
508
509 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
510 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
511 blocksize);
512 BUG_ON(!root->node);
513 return 0;
514 }
515
516 struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_fs_info *fs_info,
517 struct btrfs_key *location)
518 {
519 struct btrfs_root *root;
520 struct btrfs_root *tree_root = fs_info->tree_root;
521 struct btrfs_path *path;
522 struct extent_buffer *l;
523 u64 highest_inode;
524 u32 blocksize;
525 int ret = 0;
526
527 root = kzalloc(sizeof(*root), GFP_NOFS);
528 if (!root)
529 return ERR_PTR(-ENOMEM);
530 if (location->offset == (u64)-1) {
531 ret = find_and_setup_root(tree_root, fs_info,
532 location->objectid, root);
533 if (ret) {
534 kfree(root);
535 return ERR_PTR(ret);
536 }
537 goto insert;
538 }
539
540 __setup_root(tree_root->nodesize, tree_root->leafsize,
541 tree_root->sectorsize, tree_root->stripesize,
542 root, fs_info, location->objectid);
543
544 path = btrfs_alloc_path();
545 BUG_ON(!path);
546 ret = btrfs_search_slot(NULL, tree_root, location, path, 0, 0);
547 if (ret != 0) {
548 if (ret > 0)
549 ret = -ENOENT;
550 goto out;
551 }
552 l = path->nodes[0];
553 read_extent_buffer(l, &root->root_item,
554 btrfs_item_ptr_offset(l, path->slots[0]),
555 sizeof(root->root_item));
556 memcpy(&root->root_key, location, sizeof(*location));
557 ret = 0;
558 out:
559 btrfs_release_path(root, path);
560 btrfs_free_path(path);
561 if (ret) {
562 kfree(root);
563 return ERR_PTR(ret);
564 }
565 blocksize = btrfs_level_size(root, btrfs_root_level(&root->root_item));
566 root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item),
567 blocksize);
568 BUG_ON(!root->node);
569 insert:
570 root->ref_cows = 1;
571 ret = btrfs_find_highest_inode(root, &highest_inode);
572 if (ret == 0) {
573 root->highest_inode = highest_inode;
574 root->last_inode_alloc = highest_inode;
575 }
576 return root;
577 }
578
579 struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info,
580 u64 root_objectid)
581 {
582 struct btrfs_root *root;
583
584 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID)
585 return fs_info->tree_root;
586 if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID)
587 return fs_info->extent_root;
588
589 root = radix_tree_lookup(&fs_info->fs_roots_radix,
590 (unsigned long)root_objectid);
591 return root;
592 }
593
594 struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info,
595 struct btrfs_key *location)
596 {
597 struct btrfs_root *root;
598 int ret;
599
600 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
601 return fs_info->tree_root;
602 if (location->objectid == BTRFS_EXTENT_TREE_OBJECTID)
603 return fs_info->extent_root;
604
605 root = radix_tree_lookup(&fs_info->fs_roots_radix,
606 (unsigned long)location->objectid);
607 if (root)
608 return root;
609
610 root = btrfs_read_fs_root_no_radix(fs_info, location);
611 if (IS_ERR(root))
612 return root;
613 ret = radix_tree_insert(&fs_info->fs_roots_radix,
614 (unsigned long)root->root_key.objectid,
615 root);
616 if (ret) {
617 free_extent_buffer(root->node);
618 kfree(root);
619 return ERR_PTR(ret);
620 }
621 ret = btrfs_find_dead_roots(fs_info->tree_root,
622 root->root_key.objectid, root);
623 BUG_ON(ret);
624
625 return root;
626 }
627
628 struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info,
629 struct btrfs_key *location,
630 const char *name, int namelen)
631 {
632 struct btrfs_root *root;
633 int ret;
634
635 root = btrfs_read_fs_root_no_name(fs_info, location);
636 if (!root)
637 return NULL;
638
639 if (root->in_sysfs)
640 return root;
641
642 ret = btrfs_set_root_name(root, name, namelen);
643 if (ret) {
644 free_extent_buffer(root->node);
645 kfree(root);
646 return ERR_PTR(ret);
647 }
648
649 ret = btrfs_sysfs_add_root(root);
650 if (ret) {
651 free_extent_buffer(root->node);
652 kfree(root->name);
653 kfree(root);
654 return ERR_PTR(ret);
655 }
656 root->in_sysfs = 1;
657 return root;
658 }
659 #if 0
660 static int add_hasher(struct btrfs_fs_info *info, char *type) {
661 struct btrfs_hasher *hasher;
662
663 hasher = kmalloc(sizeof(*hasher), GFP_NOFS);
664 if (!hasher)
665 return -ENOMEM;
666 hasher->hash_tfm = crypto_alloc_hash(type, 0, CRYPTO_ALG_ASYNC);
667 if (!hasher->hash_tfm) {
668 kfree(hasher);
669 return -EINVAL;
670 }
671 spin_lock(&info->hash_lock);
672 list_add(&hasher->list, &info->hashers);
673 spin_unlock(&info->hash_lock);
674 return 0;
675 }
676 #endif
677
678 static int btrfs_congested_fn(void *congested_data, int bdi_bits)
679 {
680 struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data;
681 int ret = 0;
682 struct list_head *cur;
683 struct btrfs_device *device;
684 struct backing_dev_info *bdi;
685
686 list_for_each(cur, &info->fs_devices->devices) {
687 device = list_entry(cur, struct btrfs_device, dev_list);
688 bdi = blk_get_backing_dev_info(device->bdev);
689 if (bdi && bdi_congested(bdi, bdi_bits)) {
690 ret = 1;
691 break;
692 }
693 }
694 return ret;
695 }
696
697 void btrfs_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
698 {
699 struct list_head *cur;
700 struct btrfs_device *device;
701 struct btrfs_fs_info *info;
702
703 info = (struct btrfs_fs_info *)bdi->unplug_io_data;
704 list_for_each(cur, &info->fs_devices->devices) {
705 device = list_entry(cur, struct btrfs_device, dev_list);
706 bdi = blk_get_backing_dev_info(device->bdev);
707 if (bdi->unplug_io_fn) {
708 bdi->unplug_io_fn(bdi, page);
709 }
710 }
711 }
712
713 static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
714 {
715 bdi_init(bdi);
716 bdi->ra_pages = default_backing_dev_info.ra_pages * 4;
717 bdi->state = 0;
718 bdi->capabilities = default_backing_dev_info.capabilities;
719 bdi->unplug_io_fn = btrfs_unplug_io_fn;
720 bdi->unplug_io_data = info;
721 bdi->congested_fn = btrfs_congested_fn;
722 bdi->congested_data = info;
723 return 0;
724 }
725
726 struct btrfs_root *open_ctree(struct super_block *sb,
727 struct btrfs_fs_devices *fs_devices)
728 {
729 u32 sectorsize;
730 u32 nodesize;
731 u32 leafsize;
732 u32 blocksize;
733 u32 stripesize;
734 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
735 GFP_NOFS);
736 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
737 GFP_NOFS);
738 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
739 GFP_NOFS);
740 struct btrfs_root *chunk_root = kmalloc(sizeof(struct btrfs_root),
741 GFP_NOFS);
742 struct btrfs_root *dev_root = kmalloc(sizeof(struct btrfs_root),
743 GFP_NOFS);
744 int ret;
745 int err = -EINVAL;
746 struct btrfs_super_block *disk_super;
747
748 if (!extent_root || !tree_root || !fs_info) {
749 err = -ENOMEM;
750 goto fail;
751 }
752 INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_NOFS);
753 INIT_LIST_HEAD(&fs_info->trans_list);
754 INIT_LIST_HEAD(&fs_info->dead_roots);
755 INIT_LIST_HEAD(&fs_info->hashers);
756 spin_lock_init(&fs_info->hash_lock);
757 spin_lock_init(&fs_info->delalloc_lock);
758 spin_lock_init(&fs_info->new_trans_lock);
759
760 init_completion(&fs_info->kobj_unregister);
761 sb_set_blocksize(sb, 4096);
762 fs_info->tree_root = tree_root;
763 fs_info->extent_root = extent_root;
764 fs_info->chunk_root = chunk_root;
765 fs_info->dev_root = dev_root;
766 fs_info->fs_devices = fs_devices;
767 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
768 INIT_LIST_HEAD(&fs_info->space_info);
769 btrfs_mapping_init(&fs_info->mapping_tree);
770 fs_info->sb = sb;
771 fs_info->max_extent = (u64)-1;
772 fs_info->max_inline = 8192 * 1024;
773 setup_bdi(fs_info, &fs_info->bdi);
774 fs_info->btree_inode = new_inode(sb);
775 fs_info->btree_inode->i_ino = 1;
776 fs_info->btree_inode->i_nlink = 1;
777 fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
778 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
779 fs_info->btree_inode->i_mapping->backing_dev_info = &fs_info->bdi;
780
781 extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree,
782 fs_info->btree_inode->i_mapping,
783 GFP_NOFS);
784 extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree,
785 GFP_NOFS);
786
787 BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops;
788
789 extent_io_tree_init(&fs_info->free_space_cache,
790 fs_info->btree_inode->i_mapping, GFP_NOFS);
791 extent_io_tree_init(&fs_info->block_group_cache,
792 fs_info->btree_inode->i_mapping, GFP_NOFS);
793 extent_io_tree_init(&fs_info->pinned_extents,
794 fs_info->btree_inode->i_mapping, GFP_NOFS);
795 extent_io_tree_init(&fs_info->pending_del,
796 fs_info->btree_inode->i_mapping, GFP_NOFS);
797 extent_io_tree_init(&fs_info->extent_ins,
798 fs_info->btree_inode->i_mapping, GFP_NOFS);
799 fs_info->do_barriers = 1;
800
801 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
802 INIT_WORK(&fs_info->trans_work, btrfs_transaction_cleaner, fs_info);
803 #else
804 INIT_DELAYED_WORK(&fs_info->trans_work, btrfs_transaction_cleaner);
805 #endif
806 BTRFS_I(fs_info->btree_inode)->root = tree_root;
807 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
808 sizeof(struct btrfs_key));
809 insert_inode_hash(fs_info->btree_inode);
810 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
811
812 mutex_init(&fs_info->trans_mutex);
813 mutex_init(&fs_info->fs_mutex);
814
815 #if 0
816 ret = add_hasher(fs_info, "crc32c");
817 if (ret) {
818 printk("btrfs: failed hash setup, modprobe cryptomgr?\n");
819 err = -ENOMEM;
820 goto fail_iput;
821 }
822 #endif
823 __setup_root(4096, 4096, 4096, 4096, tree_root,
824 fs_info, BTRFS_ROOT_TREE_OBJECTID);
825
826 fs_info->sb_buffer = read_tree_block(tree_root,
827 BTRFS_SUPER_INFO_OFFSET,
828 4096);
829
830 if (!fs_info->sb_buffer)
831 goto fail_iput;
832
833 read_extent_buffer(fs_info->sb_buffer, &fs_info->super_copy, 0,
834 sizeof(fs_info->super_copy));
835
836 read_extent_buffer(fs_info->sb_buffer, fs_info->fsid,
837 (unsigned long)btrfs_super_fsid(fs_info->sb_buffer),
838 BTRFS_FSID_SIZE);
839
840 disk_super = &fs_info->super_copy;
841 if (!btrfs_super_root(disk_super))
842 goto fail_sb_buffer;
843
844 if (btrfs_super_num_devices(disk_super) != fs_devices->num_devices) {
845 printk("Btrfs: wanted %llu devices, but found %llu\n",
846 (unsigned long long)btrfs_super_num_devices(disk_super),
847 (unsigned long long)fs_devices->num_devices);
848 goto fail_sb_buffer;
849 }
850 nodesize = btrfs_super_nodesize(disk_super);
851 leafsize = btrfs_super_leafsize(disk_super);
852 sectorsize = btrfs_super_sectorsize(disk_super);
853 stripesize = btrfs_super_stripesize(disk_super);
854 tree_root->nodesize = nodesize;
855 tree_root->leafsize = leafsize;
856 tree_root->sectorsize = sectorsize;
857 tree_root->stripesize = stripesize;
858 sb_set_blocksize(sb, sectorsize);
859
860 i_size_write(fs_info->btree_inode,
861 btrfs_super_total_bytes(disk_super));
862
863 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
864 sizeof(disk_super->magic))) {
865 printk("btrfs: valid FS not found on %s\n", sb->s_id);
866 goto fail_sb_buffer;
867 }
868
869 mutex_lock(&fs_info->fs_mutex);
870
871 ret = btrfs_read_sys_array(tree_root);
872 BUG_ON(ret);
873
874 blocksize = btrfs_level_size(tree_root,
875 btrfs_super_chunk_root_level(disk_super));
876
877 __setup_root(nodesize, leafsize, sectorsize, stripesize,
878 chunk_root, fs_info, BTRFS_CHUNK_TREE_OBJECTID);
879
880 chunk_root->node = read_tree_block(chunk_root,
881 btrfs_super_chunk_root(disk_super),
882 blocksize);
883 BUG_ON(!chunk_root->node);
884
885 ret = btrfs_read_chunk_tree(chunk_root);
886 BUG_ON(ret);
887
888 blocksize = btrfs_level_size(tree_root,
889 btrfs_super_root_level(disk_super));
890
891
892 tree_root->node = read_tree_block(tree_root,
893 btrfs_super_root(disk_super),
894 blocksize);
895 if (!tree_root->node)
896 goto fail_sb_buffer;
897
898
899 ret = find_and_setup_root(tree_root, fs_info,
900 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
901 if (ret)
902 goto fail_tree_root;
903 extent_root->track_dirty = 1;
904
905 ret = find_and_setup_root(tree_root, fs_info,
906 BTRFS_DEV_TREE_OBJECTID, dev_root);
907 dev_root->track_dirty = 1;
908
909 if (ret)
910 goto fail_extent_root;
911
912 btrfs_read_block_groups(extent_root);
913
914 fs_info->generation = btrfs_super_generation(disk_super) + 1;
915 if (btrfs_super_num_devices(disk_super) > 0) {
916 fs_info->data_alloc_profile = BTRFS_BLOCK_GROUP_RAID0 |
917 BTRFS_BLOCK_GROUP_RAID1;
918 fs_info->metadata_alloc_profile = BTRFS_BLOCK_GROUP_RAID1 |
919 BTRFS_BLOCK_GROUP_DUP;
920 fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
921 }
922 mutex_unlock(&fs_info->fs_mutex);
923 return tree_root;
924
925 fail_extent_root:
926 free_extent_buffer(extent_root->node);
927 fail_tree_root:
928 mutex_unlock(&fs_info->fs_mutex);
929 free_extent_buffer(tree_root->node);
930 fail_sb_buffer:
931 free_extent_buffer(fs_info->sb_buffer);
932 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
933 fail_iput:
934 iput(fs_info->btree_inode);
935 fail:
936 close_all_devices(fs_info);
937 kfree(extent_root);
938 kfree(tree_root);
939 bdi_destroy(&fs_info->bdi);
940 kfree(fs_info);
941 return ERR_PTR(err);
942 }
943
944 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
945 *root)
946 {
947 int ret;
948 struct extent_buffer *super = root->fs_info->sb_buffer;
949 struct inode *btree_inode = root->fs_info->btree_inode;
950 struct super_block *sb = root->fs_info->sb;
951
952 if (!btrfs_test_opt(root, NOBARRIER))
953 blkdev_issue_flush(sb->s_bdev, NULL);
954 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, super);
955 ret = sync_page_range_nolock(btree_inode, btree_inode->i_mapping,
956 super->start, super->len);
957 if (!btrfs_test_opt(root, NOBARRIER))
958 blkdev_issue_flush(sb->s_bdev, NULL);
959 return ret;
960 }
961
962 int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
963 {
964 radix_tree_delete(&fs_info->fs_roots_radix,
965 (unsigned long)root->root_key.objectid);
966 if (root->in_sysfs)
967 btrfs_sysfs_del_root(root);
968 if (root->inode)
969 iput(root->inode);
970 if (root->node)
971 free_extent_buffer(root->node);
972 if (root->commit_root)
973 free_extent_buffer(root->commit_root);
974 if (root->name)
975 kfree(root->name);
976 kfree(root);
977 return 0;
978 }
979
980 static int del_fs_roots(struct btrfs_fs_info *fs_info)
981 {
982 int ret;
983 struct btrfs_root *gang[8];
984 int i;
985
986 while(1) {
987 ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
988 (void **)gang, 0,
989 ARRAY_SIZE(gang));
990 if (!ret)
991 break;
992 for (i = 0; i < ret; i++)
993 btrfs_free_fs_root(fs_info, gang[i]);
994 }
995 return 0;
996 }
997
998 int close_ctree(struct btrfs_root *root)
999 {
1000 int ret;
1001 struct btrfs_trans_handle *trans;
1002 struct btrfs_fs_info *fs_info = root->fs_info;
1003
1004 fs_info->closing = 1;
1005 btrfs_transaction_flush_work(root);
1006 mutex_lock(&fs_info->fs_mutex);
1007 btrfs_defrag_dirty_roots(root->fs_info);
1008 trans = btrfs_start_transaction(root, 1);
1009 ret = btrfs_commit_transaction(trans, root);
1010 /* run commit again to drop the original snapshot */
1011 trans = btrfs_start_transaction(root, 1);
1012 btrfs_commit_transaction(trans, root);
1013 ret = btrfs_write_and_wait_transaction(NULL, root);
1014 BUG_ON(ret);
1015 write_ctree_super(NULL, root);
1016 mutex_unlock(&fs_info->fs_mutex);
1017
1018 if (fs_info->delalloc_bytes) {
1019 printk("btrfs: at unmount delalloc count %Lu\n",
1020 fs_info->delalloc_bytes);
1021 }
1022 if (fs_info->extent_root->node)
1023 free_extent_buffer(fs_info->extent_root->node);
1024
1025 if (fs_info->tree_root->node)
1026 free_extent_buffer(fs_info->tree_root->node);
1027
1028 if (root->fs_info->chunk_root->node);
1029 free_extent_buffer(root->fs_info->chunk_root->node);
1030
1031 if (root->fs_info->dev_root->node);
1032 free_extent_buffer(root->fs_info->dev_root->node);
1033
1034 free_extent_buffer(fs_info->sb_buffer);
1035
1036 btrfs_free_block_groups(root->fs_info);
1037 del_fs_roots(fs_info);
1038
1039 filemap_write_and_wait(fs_info->btree_inode->i_mapping);
1040
1041 extent_io_tree_empty_lru(&fs_info->free_space_cache);
1042 extent_io_tree_empty_lru(&fs_info->block_group_cache);
1043 extent_io_tree_empty_lru(&fs_info->pinned_extents);
1044 extent_io_tree_empty_lru(&fs_info->pending_del);
1045 extent_io_tree_empty_lru(&fs_info->extent_ins);
1046 extent_io_tree_empty_lru(&BTRFS_I(fs_info->btree_inode)->io_tree);
1047
1048 truncate_inode_pages(fs_info->btree_inode->i_mapping, 0);
1049
1050 iput(fs_info->btree_inode);
1051 #if 0
1052 while(!list_empty(&fs_info->hashers)) {
1053 struct btrfs_hasher *hasher;
1054 hasher = list_entry(fs_info->hashers.next, struct btrfs_hasher,
1055 hashers);
1056 list_del(&hasher->hashers);
1057 crypto_free_hash(&fs_info->hash_tfm);
1058 kfree(hasher);
1059 }
1060 #endif
1061 close_all_devices(fs_info);
1062 btrfs_mapping_tree_free(&fs_info->mapping_tree);
1063 bdi_destroy(&fs_info->bdi);
1064
1065 kfree(fs_info->extent_root);
1066 kfree(fs_info->tree_root);
1067 kfree(fs_info->chunk_root);
1068 kfree(fs_info->dev_root);
1069 return 0;
1070 }
1071
1072 int btrfs_buffer_uptodate(struct extent_buffer *buf)
1073 {
1074 struct inode *btree_inode = buf->first_page->mapping->host;
1075 return extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree, buf);
1076 }
1077
1078 int btrfs_set_buffer_uptodate(struct extent_buffer *buf)
1079 {
1080 struct inode *btree_inode = buf->first_page->mapping->host;
1081 return set_extent_buffer_uptodate(&BTRFS_I(btree_inode)->io_tree,
1082 buf);
1083 }
1084
1085 void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
1086 {
1087 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1088 u64 transid = btrfs_header_generation(buf);
1089 struct inode *btree_inode = root->fs_info->btree_inode;
1090
1091 if (transid != root->fs_info->generation) {
1092 printk(KERN_CRIT "transid mismatch buffer %llu, found %Lu running %Lu\n",
1093 (unsigned long long)buf->start,
1094 transid, root->fs_info->generation);
1095 WARN_ON(1);
1096 }
1097 set_extent_buffer_dirty(&BTRFS_I(btree_inode)->io_tree, buf);
1098 }
1099
1100 void btrfs_throttle(struct btrfs_root *root)
1101 {
1102 struct backing_dev_info *bdi;
1103
1104 bdi = root->fs_info->sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
1105 if (root->fs_info->throttles && bdi_write_congested(bdi)) {
1106 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
1107 congestion_wait(WRITE, HZ/20);
1108 #else
1109 blk_congestion_wait(WRITE, HZ/20);
1110 #endif
1111 }
1112 }
1113
1114 void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
1115 {
1116 balance_dirty_pages_ratelimited_nr(
1117 root->fs_info->btree_inode->i_mapping, 1);
1118 }
1119
1120 void btrfs_set_buffer_defrag(struct extent_buffer *buf)
1121 {
1122 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1123 struct inode *btree_inode = root->fs_info->btree_inode;
1124 set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
1125 buf->start + buf->len - 1, EXTENT_DEFRAG, GFP_NOFS);
1126 }
1127
1128 void btrfs_set_buffer_defrag_done(struct extent_buffer *buf)
1129 {
1130 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1131 struct inode *btree_inode = root->fs_info->btree_inode;
1132 set_extent_bits(&BTRFS_I(btree_inode)->io_tree, buf->start,
1133 buf->start + buf->len - 1, EXTENT_DEFRAG_DONE,
1134 GFP_NOFS);
1135 }
1136
1137 int btrfs_buffer_defrag(struct extent_buffer *buf)
1138 {
1139 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1140 struct inode *btree_inode = root->fs_info->btree_inode;
1141 return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
1142 buf->start, buf->start + buf->len - 1, EXTENT_DEFRAG, 0);
1143 }
1144
1145 int btrfs_buffer_defrag_done(struct extent_buffer *buf)
1146 {
1147 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1148 struct inode *btree_inode = root->fs_info->btree_inode;
1149 return test_range_bit(&BTRFS_I(btree_inode)->io_tree,
1150 buf->start, buf->start + buf->len - 1,
1151 EXTENT_DEFRAG_DONE, 0);
1152 }
1153
1154 int btrfs_clear_buffer_defrag_done(struct extent_buffer *buf)
1155 {
1156 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1157 struct inode *btree_inode = root->fs_info->btree_inode;
1158 return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
1159 buf->start, buf->start + buf->len - 1,
1160 EXTENT_DEFRAG_DONE, GFP_NOFS);
1161 }
1162
1163 int btrfs_clear_buffer_defrag(struct extent_buffer *buf)
1164 {
1165 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1166 struct inode *btree_inode = root->fs_info->btree_inode;
1167 return clear_extent_bits(&BTRFS_I(btree_inode)->io_tree,
1168 buf->start, buf->start + buf->len - 1,
1169 EXTENT_DEFRAG, GFP_NOFS);
1170 }
1171
1172 int btrfs_read_buffer(struct extent_buffer *buf)
1173 {
1174 struct btrfs_root *root = BTRFS_I(buf->first_page->mapping->host)->root;
1175 struct inode *btree_inode = root->fs_info->btree_inode;
1176 return read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
1177 buf, 0, 1, btree_get_extent);
1178 }
1179
1180 static struct extent_io_ops btree_extent_io_ops = {
1181 .writepage_io_hook = btree_writepage_io_hook,
1182 .submit_bio_hook = btree_submit_bio_hook,
1183 /* note we're sharing with inode.c for the merge bio hook */
1184 .merge_bio_hook = btrfs_merge_bio_hook,
1185 };
This page took 0.071827 seconds and 5 git commands to generate.