Btrfs: dirindex optimizations
[deliverable/linux.git] / fs / btrfs / disk-io.c
1 #include <linux/module.h>
2 #include <linux/fs.h>
3 #include <linux/blkdev.h>
4 #include <linux/crypto.h>
5 #include <linux/scatterlist.h>
6 #include <linux/swap.h>
7 #include "ctree.h"
8 #include "disk-io.h"
9 #include "transaction.h"
10
11 static int check_tree_block(struct btrfs_root *root, struct buffer_head *buf)
12 {
13 struct btrfs_node *node = btrfs_buffer_node(buf);
14 if (buf->b_blocknr != btrfs_header_blocknr(&node->header)) {
15 BUG();
16 }
17 if (root->node && btrfs_header_parentid(&node->header) !=
18 btrfs_header_parentid(btrfs_buffer_header(root->node))) {
19 BUG();
20 }
21 return 0;
22 }
23
24 struct buffer_head *btrfs_find_tree_block(struct btrfs_root *root, u64 blocknr)
25 {
26 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
27 int blockbits = root->fs_info->sb->s_blocksize_bits;
28 unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
29 struct page *page;
30 struct buffer_head *bh;
31 struct buffer_head *head;
32 struct buffer_head *ret = NULL;
33
34
35 page = find_lock_page(mapping, index);
36 if (!page)
37 return NULL;
38
39 if (!page_has_buffers(page))
40 goto out_unlock;
41
42 head = page_buffers(page);
43 bh = head;
44 do {
45 if (buffer_mapped(bh) && bh->b_blocknr == blocknr) {
46 ret = bh;
47 get_bh(bh);
48 goto out_unlock;
49 }
50 bh = bh->b_this_page;
51 } while (bh != head);
52 out_unlock:
53 unlock_page(page);
54 if (ret) {
55 touch_buffer(ret);
56 }
57 page_cache_release(page);
58 return ret;
59 }
60
61 struct buffer_head *btrfs_find_create_tree_block(struct btrfs_root *root,
62 u64 blocknr)
63 {
64 struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
65 int blockbits = root->fs_info->sb->s_blocksize_bits;
66 unsigned long index = blocknr >> (PAGE_CACHE_SHIFT - blockbits);
67 struct page *page;
68 struct buffer_head *bh;
69 struct buffer_head *head;
70 struct buffer_head *ret = NULL;
71 u64 first_block = index << (PAGE_CACHE_SHIFT - blockbits);
72
73 page = grab_cache_page(mapping, index);
74 if (!page)
75 return NULL;
76
77 if (!page_has_buffers(page))
78 create_empty_buffers(page, root->fs_info->sb->s_blocksize, 0);
79 head = page_buffers(page);
80 bh = head;
81 do {
82 if (!buffer_mapped(bh)) {
83 bh->b_bdev = root->fs_info->sb->s_bdev;
84 bh->b_blocknr = first_block;
85 set_buffer_mapped(bh);
86 }
87 if (bh->b_blocknr == blocknr) {
88 ret = bh;
89 get_bh(bh);
90 goto out_unlock;
91 }
92 bh = bh->b_this_page;
93 first_block++;
94 } while (bh != head);
95 out_unlock:
96 unlock_page(page);
97 if (ret)
98 touch_buffer(ret);
99 page_cache_release(page);
100 return ret;
101 }
102
103 static sector_t max_block(struct block_device *bdev)
104 {
105 sector_t retval = ~((sector_t)0);
106 loff_t sz = i_size_read(bdev->bd_inode);
107
108 if (sz) {
109 unsigned int size = block_size(bdev);
110 unsigned int sizebits = blksize_bits(size);
111 retval = (sz >> sizebits);
112 }
113 return retval;
114 }
115
116 static int btree_get_block(struct inode *inode, sector_t iblock,
117 struct buffer_head *bh, int create)
118 {
119 if (iblock >= max_block(inode->i_sb->s_bdev)) {
120 if (create)
121 return -EIO;
122
123 /*
124 * for reads, we're just trying to fill a partial page.
125 * return a hole, they will have to call get_block again
126 * before they can fill it, and they will get -EIO at that
127 * time
128 */
129 return 0;
130 }
131 bh->b_bdev = inode->i_sb->s_bdev;
132 bh->b_blocknr = iblock;
133 set_buffer_mapped(bh);
134 return 0;
135 }
136
137 int btrfs_csum_data(struct btrfs_root * root, char *data, size_t len,
138 char *result)
139 {
140 struct scatterlist sg;
141 struct crypto_hash *tfm = root->fs_info->hash_tfm;
142 struct hash_desc desc;
143 int ret;
144
145 desc.tfm = tfm;
146 desc.flags = 0;
147 sg_init_one(&sg, data, len);
148 spin_lock(&root->fs_info->hash_lock);
149 ret = crypto_hash_digest(&desc, &sg, 1, result);
150 spin_unlock(&root->fs_info->hash_lock);
151 if (ret) {
152 printk("sha256 digest failed\n");
153 }
154 return ret;
155 }
156 static int csum_tree_block(struct btrfs_root *root, struct buffer_head *bh,
157 int verify)
158 {
159 char result[BTRFS_CSUM_SIZE];
160 int ret;
161 struct btrfs_node *node;
162
163 ret = btrfs_csum_data(root, bh->b_data + BTRFS_CSUM_SIZE,
164 bh->b_size - BTRFS_CSUM_SIZE, result);
165 if (ret)
166 return ret;
167 if (verify) {
168 if (memcmp(bh->b_data, result, BTRFS_CSUM_SIZE)) {
169 printk("checksum verify failed on %lu\n",
170 bh->b_blocknr);
171 return 1;
172 }
173 } else {
174 node = btrfs_buffer_node(bh);
175 memcpy(node->header.csum, result, BTRFS_CSUM_SIZE);
176 }
177 return 0;
178 }
179
180 static int btree_writepage(struct page *page, struct writeback_control *wbc)
181 {
182 struct buffer_head *bh;
183 struct btrfs_root *root = btrfs_sb(page->mapping->host->i_sb);
184 struct buffer_head *head;
185 if (!page_has_buffers(page)) {
186 create_empty_buffers(page, root->fs_info->sb->s_blocksize,
187 (1 << BH_Dirty)|(1 << BH_Uptodate));
188 }
189 head = page_buffers(page);
190 bh = head;
191 do {
192 if (buffer_dirty(bh))
193 csum_tree_block(root, bh, 0);
194 bh = bh->b_this_page;
195 } while (bh != head);
196 return block_write_full_page(page, btree_get_block, wbc);
197 }
198
199 static int btree_readpage(struct file * file, struct page * page)
200 {
201 return block_read_full_page(page, btree_get_block);
202 }
203
204 static struct address_space_operations btree_aops = {
205 .readpage = btree_readpage,
206 .writepage = btree_writepage,
207 .sync_page = block_sync_page,
208 };
209
210 struct buffer_head *read_tree_block(struct btrfs_root *root, u64 blocknr)
211 {
212 struct buffer_head *bh = NULL;
213
214 bh = btrfs_find_create_tree_block(root, blocknr);
215 if (!bh)
216 return bh;
217 if (buffer_uptodate(bh))
218 goto uptodate;
219 lock_buffer(bh);
220 if (!buffer_uptodate(bh)) {
221 get_bh(bh);
222 bh->b_end_io = end_buffer_read_sync;
223 submit_bh(READ, bh);
224 wait_on_buffer(bh);
225 if (!buffer_uptodate(bh))
226 goto fail;
227 csum_tree_block(root, bh, 1);
228 } else {
229 unlock_buffer(bh);
230 }
231 uptodate:
232 if (check_tree_block(root, bh))
233 BUG();
234 return bh;
235 fail:
236 brelse(bh);
237 return NULL;
238 }
239
240 int dirty_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
241 struct buffer_head *buf)
242 {
243 WARN_ON(atomic_read(&buf->b_count) == 0);
244 mark_buffer_dirty(buf);
245 return 0;
246 }
247
248 int clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root,
249 struct buffer_head *buf)
250 {
251 WARN_ON(atomic_read(&buf->b_count) == 0);
252 clear_buffer_dirty(buf);
253 return 0;
254 }
255
256 static int __setup_root(int blocksize,
257 struct btrfs_root *root,
258 struct btrfs_fs_info *fs_info,
259 u64 objectid)
260 {
261 root->node = NULL;
262 root->commit_root = NULL;
263 root->blocksize = blocksize;
264 root->ref_cows = 0;
265 root->fs_info = fs_info;
266 memset(&root->root_key, 0, sizeof(root->root_key));
267 memset(&root->root_item, 0, sizeof(root->root_item));
268 return 0;
269 }
270
271 static int find_and_setup_root(int blocksize,
272 struct btrfs_root *tree_root,
273 struct btrfs_fs_info *fs_info,
274 u64 objectid,
275 struct btrfs_root *root)
276 {
277 int ret;
278
279 __setup_root(blocksize, root, fs_info, objectid);
280 ret = btrfs_find_last_root(tree_root, objectid,
281 &root->root_item, &root->root_key);
282 BUG_ON(ret);
283
284 root->node = read_tree_block(root,
285 btrfs_root_blocknr(&root->root_item));
286 BUG_ON(!root->node);
287 return 0;
288 }
289
290 struct btrfs_root *open_ctree(struct super_block *sb)
291 {
292 struct btrfs_root *root = kmalloc(sizeof(struct btrfs_root),
293 GFP_NOFS);
294 struct btrfs_root *extent_root = kmalloc(sizeof(struct btrfs_root),
295 GFP_NOFS);
296 struct btrfs_root *tree_root = kmalloc(sizeof(struct btrfs_root),
297 GFP_NOFS);
298 struct btrfs_root *inode_root = kmalloc(sizeof(struct btrfs_root),
299 GFP_NOFS);
300 struct btrfs_fs_info *fs_info = kmalloc(sizeof(*fs_info),
301 GFP_NOFS);
302 int ret;
303 struct btrfs_super_block *disk_super;
304
305 init_bit_radix(&fs_info->pinned_radix);
306 init_bit_radix(&fs_info->pending_del_radix);
307 sb_set_blocksize(sb, 4096);
308 fs_info->running_transaction = NULL;
309 fs_info->fs_root = root;
310 fs_info->tree_root = tree_root;
311 fs_info->extent_root = extent_root;
312 fs_info->inode_root = inode_root;
313 fs_info->last_inode_alloc = 0;
314 fs_info->highest_inode = 0;
315 fs_info->sb = sb;
316 fs_info->btree_inode = new_inode(sb);
317 fs_info->btree_inode->i_ino = 1;
318 fs_info->btree_inode->i_nlink = 1;
319 fs_info->btree_inode->i_size = sb->s_bdev->bd_inode->i_size;
320 fs_info->btree_inode->i_mapping->a_ops = &btree_aops;
321 insert_inode_hash(fs_info->btree_inode);
322 mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
323 fs_info->hash_tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
324 spin_lock_init(&fs_info->hash_lock);
325 if (!fs_info->hash_tfm || IS_ERR(fs_info->hash_tfm)) {
326 printk("failed to allocate sha256 hash\n");
327 return NULL;
328 }
329 mutex_init(&fs_info->trans_mutex);
330 mutex_init(&fs_info->fs_mutex);
331 memset(&fs_info->current_insert, 0, sizeof(fs_info->current_insert));
332 memset(&fs_info->last_insert, 0, sizeof(fs_info->last_insert));
333
334 __setup_root(sb->s_blocksize, tree_root,
335 fs_info, BTRFS_ROOT_TREE_OBJECTID);
336 fs_info->sb_buffer = read_tree_block(tree_root,
337 BTRFS_SUPER_INFO_OFFSET /
338 sb->s_blocksize);
339
340 if (!fs_info->sb_buffer) {
341 printk("failed2\n");
342 return NULL;
343 }
344 disk_super = (struct btrfs_super_block *)fs_info->sb_buffer->b_data;
345 if (!btrfs_super_root(disk_super)) {
346 return NULL;
347 }
348 fs_info->disk_super = disk_super;
349 tree_root->node = read_tree_block(tree_root,
350 btrfs_super_root(disk_super));
351 BUG_ON(!tree_root->node);
352
353 mutex_lock(&fs_info->fs_mutex);
354 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
355 BTRFS_EXTENT_TREE_OBJECTID, extent_root);
356 BUG_ON(ret);
357
358 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
359 BTRFS_INODE_MAP_OBJECTID, inode_root);
360 BUG_ON(ret);
361
362 ret = find_and_setup_root(sb->s_blocksize, tree_root, fs_info,
363 BTRFS_FS_TREE_OBJECTID, root);
364 BUG_ON(ret);
365 root->commit_root = root->node;
366 get_bh(root->node);
367 root->ref_cows = 1;
368 root->fs_info->generation = root->root_key.offset + 1;
369 ret = btrfs_find_highest_inode(root, &root->fs_info->last_inode_alloc);
370 if (ret == 0)
371 fs_info->highest_inode = fs_info->last_inode_alloc;
372 mutex_unlock(&fs_info->fs_mutex);
373 return root;
374 }
375
376 int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root
377 *root)
378 {
379 struct buffer_head *bh = root->fs_info->sb_buffer;
380
381 btrfs_set_super_root(root->fs_info->disk_super,
382 root->fs_info->tree_root->node->b_blocknr);
383 lock_buffer(bh);
384 WARN_ON(atomic_read(&bh->b_count) < 1);
385 clear_buffer_dirty(bh);
386 csum_tree_block(root, bh, 0);
387 bh->b_end_io = end_buffer_write_sync;
388 get_bh(bh);
389 submit_bh(WRITE, bh);
390 wait_on_buffer(bh);
391 if (!buffer_uptodate(bh)) {
392 WARN_ON(1);
393 return -EIO;
394 }
395 return 0;
396 }
397
398 int close_ctree(struct btrfs_root *root)
399 {
400 int ret;
401 struct btrfs_trans_handle *trans;
402
403 mutex_lock(&root->fs_info->fs_mutex);
404 trans = btrfs_start_transaction(root, 1);
405 btrfs_commit_transaction(trans, root);
406 /* run commit again to drop the original snapshot */
407 trans = btrfs_start_transaction(root, 1);
408 btrfs_commit_transaction(trans, root);
409 ret = btrfs_write_and_wait_transaction(NULL, root);
410 BUG_ON(ret);
411 write_ctree_super(NULL, root);
412 mutex_unlock(&root->fs_info->fs_mutex);
413
414 if (root->node)
415 btrfs_block_release(root, root->node);
416 if (root->fs_info->extent_root->node)
417 btrfs_block_release(root->fs_info->extent_root,
418 root->fs_info->extent_root->node);
419 if (root->fs_info->inode_root->node)
420 btrfs_block_release(root->fs_info->inode_root,
421 root->fs_info->inode_root->node);
422 if (root->fs_info->tree_root->node)
423 btrfs_block_release(root->fs_info->tree_root,
424 root->fs_info->tree_root->node);
425 btrfs_block_release(root, root->commit_root);
426 btrfs_block_release(root, root->fs_info->sb_buffer);
427 crypto_free_hash(root->fs_info->hash_tfm);
428 truncate_inode_pages(root->fs_info->btree_inode->i_mapping, 0);
429 iput(root->fs_info->btree_inode);
430 kfree(root->fs_info->extent_root);
431 kfree(root->fs_info->inode_root);
432 kfree(root->fs_info->tree_root);
433 kfree(root->fs_info);
434 kfree(root);
435 return 0;
436 }
437
438 void btrfs_block_release(struct btrfs_root *root, struct buffer_head *buf)
439 {
440 brelse(buf);
441 }
442
This page took 0.051986 seconds and 6 git commands to generate.