Btrfs: early metadata/data split
[deliverable/linux.git] / fs / btrfs / super.c
1 #include <linux/module.h>
2 #include <linux/buffer_head.h>
3 #include <linux/fs.h>
4 #include <linux/pagemap.h>
5 #include <linux/highmem.h>
6 #include <linux/time.h>
7 #include <linux/init.h>
8 #include <linux/string.h>
9 #include <linux/smp_lock.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mpage.h>
12 #include <linux/swap.h>
13 #include <linux/writeback.h>
14 #include <linux/statfs.h>
15 #include "ctree.h"
16 #include "disk-io.h"
17 #include "transaction.h"
18 #include "btrfs_inode.h"
19 #include "ioctl.h"
20
21 static void btrfs_fsinfo_release(struct kobject *obj)
22 {
23 struct btrfs_fs_info *fsinfo = container_of(obj,
24 struct btrfs_fs_info, kobj);
25 kfree(fsinfo);
26 }
27
28 static struct kobj_type btrfs_fsinfo_ktype = {
29 .release = btrfs_fsinfo_release,
30 };
31
32 struct btrfs_iget_args {
33 u64 ino;
34 struct btrfs_root *root;
35 };
36
37 decl_subsys(btrfs, &btrfs_fsinfo_ktype, NULL);
38
39 #define BTRFS_SUPER_MAGIC 0x9123682E
40
41 static struct inode_operations btrfs_dir_inode_operations;
42 static struct inode_operations btrfs_dir_ro_inode_operations;
43 static struct super_operations btrfs_super_ops;
44 static struct file_operations btrfs_dir_file_operations;
45 static struct inode_operations btrfs_file_inode_operations;
46 static struct address_space_operations btrfs_aops;
47 static struct file_operations btrfs_file_operations;
48
49 static void btrfs_read_locked_inode(struct inode *inode)
50 {
51 struct btrfs_path *path;
52 struct btrfs_inode_item *inode_item;
53 struct btrfs_root *root = BTRFS_I(inode)->root;
54 struct btrfs_key location;
55 struct btrfs_block_group_cache *alloc_group;
56 u64 alloc_group_block;
57 int ret;
58
59 path = btrfs_alloc_path();
60 BUG_ON(!path);
61 btrfs_init_path(path);
62 mutex_lock(&root->fs_info->fs_mutex);
63
64 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
65 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
66 if (ret) {
67 btrfs_free_path(path);
68 goto make_bad;
69 }
70 inode_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
71 path->slots[0],
72 struct btrfs_inode_item);
73
74 inode->i_mode = btrfs_inode_mode(inode_item);
75 inode->i_nlink = btrfs_inode_nlink(inode_item);
76 inode->i_uid = btrfs_inode_uid(inode_item);
77 inode->i_gid = btrfs_inode_gid(inode_item);
78 inode->i_size = btrfs_inode_size(inode_item);
79 inode->i_atime.tv_sec = btrfs_timespec_sec(&inode_item->atime);
80 inode->i_atime.tv_nsec = btrfs_timespec_nsec(&inode_item->atime);
81 inode->i_mtime.tv_sec = btrfs_timespec_sec(&inode_item->mtime);
82 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(&inode_item->mtime);
83 inode->i_ctime.tv_sec = btrfs_timespec_sec(&inode_item->ctime);
84 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(&inode_item->ctime);
85 inode->i_blocks = btrfs_inode_nblocks(inode_item);
86 inode->i_generation = btrfs_inode_generation(inode_item);
87 alloc_group_block = btrfs_inode_block_group(inode_item);
88 ret = radix_tree_gang_lookup(&root->fs_info->block_group_radix,
89 (void **)&alloc_group,
90 alloc_group_block, 1);
91 BUG_ON(!ret);
92 BTRFS_I(inode)->block_group = alloc_group;
93
94 btrfs_free_path(path);
95 inode_item = NULL;
96
97 mutex_unlock(&root->fs_info->fs_mutex);
98
99 switch (inode->i_mode & S_IFMT) {
100 #if 0
101 default:
102 init_special_inode(inode, inode->i_mode,
103 btrfs_inode_rdev(inode_item));
104 break;
105 #endif
106 case S_IFREG:
107 inode->i_mapping->a_ops = &btrfs_aops;
108 inode->i_fop = &btrfs_file_operations;
109 inode->i_op = &btrfs_file_inode_operations;
110 break;
111 case S_IFDIR:
112 inode->i_fop = &btrfs_dir_file_operations;
113 if (root == root->fs_info->tree_root)
114 inode->i_op = &btrfs_dir_ro_inode_operations;
115 else
116 inode->i_op = &btrfs_dir_inode_operations;
117 break;
118 case S_IFLNK:
119 // inode->i_op = &page_symlink_inode_operations;
120 break;
121 }
122 return;
123
124 make_bad:
125 btrfs_release_path(root, path);
126 btrfs_free_path(path);
127 mutex_unlock(&root->fs_info->fs_mutex);
128 make_bad_inode(inode);
129 }
130
131 static void fill_inode_item(struct btrfs_inode_item *item,
132 struct inode *inode)
133 {
134 btrfs_set_inode_uid(item, inode->i_uid);
135 btrfs_set_inode_gid(item, inode->i_gid);
136 btrfs_set_inode_size(item, inode->i_size);
137 btrfs_set_inode_mode(item, inode->i_mode);
138 btrfs_set_inode_nlink(item, inode->i_nlink);
139 btrfs_set_timespec_sec(&item->atime, inode->i_atime.tv_sec);
140 btrfs_set_timespec_nsec(&item->atime, inode->i_atime.tv_nsec);
141 btrfs_set_timespec_sec(&item->mtime, inode->i_mtime.tv_sec);
142 btrfs_set_timespec_nsec(&item->mtime, inode->i_mtime.tv_nsec);
143 btrfs_set_timespec_sec(&item->ctime, inode->i_ctime.tv_sec);
144 btrfs_set_timespec_nsec(&item->ctime, inode->i_ctime.tv_nsec);
145 btrfs_set_inode_nblocks(item, inode->i_blocks);
146 btrfs_set_inode_generation(item, inode->i_generation);
147 btrfs_set_inode_block_group(item,
148 BTRFS_I(inode)->block_group->key.objectid);
149 }
150
151 static int btrfs_update_inode(struct btrfs_trans_handle *trans,
152 struct btrfs_root *root,
153 struct inode *inode)
154 {
155 struct btrfs_inode_item *inode_item;
156 struct btrfs_path *path;
157 int ret;
158
159 path = btrfs_alloc_path();
160 BUG_ON(!path);
161 btrfs_init_path(path);
162 ret = btrfs_lookup_inode(trans, root, path,
163 &BTRFS_I(inode)->location, 1);
164 if (ret) {
165 if (ret > 0)
166 ret = -ENOENT;
167 goto failed;
168 }
169
170 inode_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
171 path->slots[0],
172 struct btrfs_inode_item);
173
174 fill_inode_item(inode_item, inode);
175 btrfs_mark_buffer_dirty(path->nodes[0]);
176 ret = 0;
177 failed:
178 btrfs_release_path(root, path);
179 btrfs_free_path(path);
180 return ret;
181 }
182
183
184 static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
185 struct btrfs_root *root,
186 struct inode *dir,
187 struct dentry *dentry)
188 {
189 struct btrfs_path *path;
190 const char *name = dentry->d_name.name;
191 int name_len = dentry->d_name.len;
192 int ret = 0;
193 u64 objectid;
194 struct btrfs_dir_item *di;
195
196 path = btrfs_alloc_path();
197 BUG_ON(!path);
198 btrfs_init_path(path);
199 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
200 name, name_len, -1);
201 if (IS_ERR(di)) {
202 ret = PTR_ERR(di);
203 goto err;
204 }
205 if (!di) {
206 ret = -ENOENT;
207 goto err;
208 }
209 objectid = btrfs_disk_key_objectid(&di->location);
210 ret = btrfs_delete_one_dir_name(trans, root, path, di);
211 BUG_ON(ret);
212 btrfs_release_path(root, path);
213
214 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
215 objectid, name, name_len, -1);
216 if (IS_ERR(di)) {
217 ret = PTR_ERR(di);
218 goto err;
219 }
220 if (!di) {
221 ret = -ENOENT;
222 goto err;
223 }
224 ret = btrfs_delete_one_dir_name(trans, root, path, di);
225 BUG_ON(ret);
226
227 dentry->d_inode->i_ctime = dir->i_ctime;
228 err:
229 btrfs_free_path(path);
230 if (!ret) {
231 dir->i_size -= name_len * 2;
232 btrfs_update_inode(trans, root, dir);
233 drop_nlink(dentry->d_inode);
234 btrfs_update_inode(trans, root, dentry->d_inode);
235 dir->i_sb->s_dirt = 1;
236 }
237 return ret;
238 }
239
240 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
241 {
242 struct btrfs_root *root;
243 struct btrfs_trans_handle *trans;
244 int ret;
245
246 root = BTRFS_I(dir)->root;
247 mutex_lock(&root->fs_info->fs_mutex);
248 trans = btrfs_start_transaction(root, 1);
249 btrfs_set_trans_block_group(trans, dir);
250 ret = btrfs_unlink_trans(trans, root, dir, dentry);
251 btrfs_end_transaction(trans, root);
252 mutex_unlock(&root->fs_info->fs_mutex);
253 btrfs_btree_balance_dirty(root);
254 return ret;
255 }
256
257 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
258 {
259 struct inode *inode = dentry->d_inode;
260 int err;
261 int ret;
262 struct btrfs_root *root = BTRFS_I(dir)->root;
263 struct btrfs_path *path;
264 struct btrfs_key key;
265 struct btrfs_trans_handle *trans;
266 struct btrfs_key found_key;
267 int found_type;
268 struct btrfs_leaf *leaf;
269 char *goodnames = "..";
270
271 path = btrfs_alloc_path();
272 BUG_ON(!path);
273 btrfs_init_path(path);
274 mutex_lock(&root->fs_info->fs_mutex);
275 trans = btrfs_start_transaction(root, 1);
276 btrfs_set_trans_block_group(trans, dir);
277 key.objectid = inode->i_ino;
278 key.offset = (u64)-1;
279 key.flags = (u32)-1;
280 while(1) {
281 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
282 if (ret < 0) {
283 err = ret;
284 goto out;
285 }
286 BUG_ON(ret == 0);
287 if (path->slots[0] == 0) {
288 err = -ENOENT;
289 goto out;
290 }
291 path->slots[0]--;
292 leaf = btrfs_buffer_leaf(path->nodes[0]);
293 btrfs_disk_key_to_cpu(&found_key,
294 &leaf->items[path->slots[0]].key);
295 found_type = btrfs_key_type(&found_key);
296 if (found_key.objectid != inode->i_ino) {
297 err = -ENOENT;
298 goto out;
299 }
300 if ((found_type != BTRFS_DIR_ITEM_KEY &&
301 found_type != BTRFS_DIR_INDEX_KEY) ||
302 (!btrfs_match_dir_item_name(root, path, goodnames, 2) &&
303 !btrfs_match_dir_item_name(root, path, goodnames, 1))) {
304 err = -ENOTEMPTY;
305 goto out;
306 }
307 ret = btrfs_del_item(trans, root, path);
308 BUG_ON(ret);
309
310 if (found_type == BTRFS_DIR_ITEM_KEY && found_key.offset == 1)
311 break;
312 btrfs_release_path(root, path);
313 }
314 ret = 0;
315 btrfs_release_path(root, path);
316
317 /* now the directory is empty */
318 err = btrfs_unlink_trans(trans, root, dir, dentry);
319 if (!err) {
320 inode->i_size = 0;
321 }
322 out:
323 btrfs_release_path(root, path);
324 btrfs_free_path(path);
325 mutex_unlock(&root->fs_info->fs_mutex);
326 ret = btrfs_end_transaction(trans, root);
327 btrfs_btree_balance_dirty(root);
328 if (ret && !err)
329 err = ret;
330 return err;
331 }
332
333 static int btrfs_free_inode(struct btrfs_trans_handle *trans,
334 struct btrfs_root *root,
335 struct inode *inode)
336 {
337 struct btrfs_path *path;
338 int ret;
339
340 clear_inode(inode);
341
342 path = btrfs_alloc_path();
343 BUG_ON(!path);
344 btrfs_init_path(path);
345 ret = btrfs_lookup_inode(trans, root, path,
346 &BTRFS_I(inode)->location, -1);
347 BUG_ON(ret);
348 ret = btrfs_del_item(trans, root, path);
349 BUG_ON(ret);
350 btrfs_free_path(path);
351 return ret;
352 }
353
354 static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
355 struct btrfs_root *root,
356 struct inode *inode)
357 {
358 int ret;
359 struct btrfs_path *path;
360 struct btrfs_key key;
361 struct btrfs_disk_key *found_key;
362 struct btrfs_leaf *leaf;
363 struct btrfs_file_extent_item *fi = NULL;
364 u64 extent_start = 0;
365 u64 extent_num_blocks = 0;
366 int found_extent;
367
368 path = btrfs_alloc_path();
369 BUG_ON(!path);
370 /* FIXME, add redo link to tree so we don't leak on crash */
371 key.objectid = inode->i_ino;
372 key.offset = (u64)-1;
373 key.flags = 0;
374 /*
375 * use BTRFS_CSUM_ITEM_KEY because it is larger than inline keys
376 * or extent data
377 */
378 btrfs_set_key_type(&key, BTRFS_CSUM_ITEM_KEY);
379 while(1) {
380 btrfs_init_path(path);
381 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
382 if (ret < 0) {
383 goto error;
384 }
385 if (ret > 0) {
386 BUG_ON(path->slots[0] == 0);
387 path->slots[0]--;
388 }
389 leaf = btrfs_buffer_leaf(path->nodes[0]);
390 found_key = &leaf->items[path->slots[0]].key;
391 if (btrfs_disk_key_objectid(found_key) != inode->i_ino)
392 break;
393 if (btrfs_disk_key_type(found_key) != BTRFS_CSUM_ITEM_KEY &&
394 btrfs_disk_key_type(found_key) != BTRFS_EXTENT_DATA_KEY)
395 break;
396 if (btrfs_disk_key_offset(found_key) < inode->i_size)
397 break;
398 found_extent = 0;
399 if (btrfs_disk_key_type(found_key) == BTRFS_EXTENT_DATA_KEY) {
400 fi = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
401 path->slots[0],
402 struct btrfs_file_extent_item);
403 if (btrfs_file_extent_type(fi) !=
404 BTRFS_FILE_EXTENT_INLINE) {
405 extent_start =
406 btrfs_file_extent_disk_blocknr(fi);
407 extent_num_blocks =
408 btrfs_file_extent_disk_num_blocks(fi);
409 /* FIXME blocksize != 4096 */
410 inode->i_blocks -=
411 btrfs_file_extent_num_blocks(fi) << 3;
412 found_extent = 1;
413 }
414 }
415 ret = btrfs_del_item(trans, root, path);
416 BUG_ON(ret);
417 btrfs_release_path(root, path);
418 if (found_extent) {
419 ret = btrfs_free_extent(trans, root, extent_start,
420 extent_num_blocks, 0);
421 BUG_ON(ret);
422 }
423 }
424 ret = 0;
425 error:
426 btrfs_release_path(root, path);
427 btrfs_free_path(path);
428 inode->i_sb->s_dirt = 1;
429 return ret;
430 }
431
432 static void btrfs_delete_inode(struct inode *inode)
433 {
434 struct btrfs_trans_handle *trans;
435 struct btrfs_root *root = BTRFS_I(inode)->root;
436 int ret;
437
438 truncate_inode_pages(&inode->i_data, 0);
439 if (is_bad_inode(inode)) {
440 goto no_delete;
441 }
442 inode->i_size = 0;
443 mutex_lock(&root->fs_info->fs_mutex);
444 trans = btrfs_start_transaction(root, 1);
445 btrfs_set_trans_block_group(trans, inode);
446 if (S_ISREG(inode->i_mode)) {
447 ret = btrfs_truncate_in_trans(trans, root, inode);
448 BUG_ON(ret);
449 }
450 btrfs_free_inode(trans, root, inode);
451 btrfs_end_transaction(trans, root);
452 mutex_unlock(&root->fs_info->fs_mutex);
453 btrfs_btree_balance_dirty(root);
454 return;
455 no_delete:
456 clear_inode(inode);
457 }
458
459 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
460 struct btrfs_key *location)
461 {
462 const char *name = dentry->d_name.name;
463 int namelen = dentry->d_name.len;
464 struct btrfs_dir_item *di;
465 struct btrfs_path *path;
466 struct btrfs_root *root = BTRFS_I(dir)->root;
467 int ret;
468
469 path = btrfs_alloc_path();
470 BUG_ON(!path);
471 btrfs_init_path(path);
472 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
473 namelen, 0);
474 if (!di || IS_ERR(di)) {
475 location->objectid = 0;
476 ret = 0;
477 goto out;
478 }
479 btrfs_disk_key_to_cpu(location, &di->location);
480 out:
481 btrfs_release_path(root, path);
482 btrfs_free_path(path);
483 return ret;
484 }
485
486 static int fixup_tree_root_location(struct btrfs_root *root,
487 struct btrfs_key *location,
488 struct btrfs_root **sub_root)
489 {
490 struct btrfs_path *path;
491 struct btrfs_root_item *ri;
492
493 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
494 return 0;
495 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
496 return 0;
497
498 path = btrfs_alloc_path();
499 BUG_ON(!path);
500 mutex_lock(&root->fs_info->fs_mutex);
501
502 *sub_root = btrfs_read_fs_root(root->fs_info, location);
503 if (IS_ERR(*sub_root))
504 return PTR_ERR(*sub_root);
505
506 ri = &(*sub_root)->root_item;
507 location->objectid = btrfs_root_dirid(ri);
508 location->flags = 0;
509 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
510 location->offset = 0;
511
512 btrfs_free_path(path);
513 mutex_unlock(&root->fs_info->fs_mutex);
514 return 0;
515 }
516
517 static int btrfs_init_locked_inode(struct inode *inode, void *p)
518 {
519 struct btrfs_iget_args *args = p;
520 inode->i_ino = args->ino;
521 BTRFS_I(inode)->root = args->root;
522 return 0;
523 }
524
525 static int btrfs_find_actor(struct inode *inode, void *opaque)
526 {
527 struct btrfs_iget_args *args = opaque;
528 return (args->ino == inode->i_ino &&
529 args->root == BTRFS_I(inode)->root);
530 }
531
532 static struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
533 struct btrfs_root *root)
534 {
535 struct inode *inode;
536 struct btrfs_iget_args args;
537 args.ino = objectid;
538 args.root = root;
539
540 inode = iget5_locked(s, objectid, btrfs_find_actor,
541 btrfs_init_locked_inode,
542 (void *)&args);
543 return inode;
544 }
545
546 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
547 struct nameidata *nd)
548 {
549 struct inode * inode;
550 struct btrfs_inode *bi = BTRFS_I(dir);
551 struct btrfs_root *root = bi->root;
552 struct btrfs_root *sub_root = root;
553 struct btrfs_key location;
554 int ret;
555
556 if (dentry->d_name.len > BTRFS_NAME_LEN)
557 return ERR_PTR(-ENAMETOOLONG);
558 mutex_lock(&root->fs_info->fs_mutex);
559 ret = btrfs_inode_by_name(dir, dentry, &location);
560 mutex_unlock(&root->fs_info->fs_mutex);
561 if (ret < 0)
562 return ERR_PTR(ret);
563 inode = NULL;
564 if (location.objectid) {
565 ret = fixup_tree_root_location(root, &location, &sub_root);
566 if (ret < 0)
567 return ERR_PTR(ret);
568 if (ret > 0)
569 return ERR_PTR(-ENOENT);
570 inode = btrfs_iget_locked(dir->i_sb, location.objectid,
571 sub_root);
572 if (!inode)
573 return ERR_PTR(-EACCES);
574 if (inode->i_state & I_NEW) {
575 if (sub_root != root) {
576 printk("adding new root for inode %lu root %p (found %p)\n", inode->i_ino, sub_root, BTRFS_I(inode)->root);
577 igrab(inode);
578 sub_root->inode = inode;
579 }
580 BTRFS_I(inode)->root = sub_root;
581 memcpy(&BTRFS_I(inode)->location, &location,
582 sizeof(location));
583 btrfs_read_locked_inode(inode);
584 unlock_new_inode(inode);
585 }
586 }
587 return d_splice_alias(inode, dentry);
588 }
589
590 static void reada_leaves(struct btrfs_root *root, struct btrfs_path *path)
591 {
592 struct btrfs_node *node;
593 int i;
594 int nritems;
595 u64 objectid;
596 u64 item_objectid;
597 u64 blocknr;
598 int slot;
599
600 if (!path->nodes[1])
601 return;
602 node = btrfs_buffer_node(path->nodes[1]);
603 slot = path->slots[1];
604 objectid = btrfs_disk_key_objectid(&node->ptrs[slot].key);
605 nritems = btrfs_header_nritems(&node->header);
606 for (i = slot; i < nritems; i++) {
607 item_objectid = btrfs_disk_key_objectid(&node->ptrs[i].key);
608 if (item_objectid != objectid)
609 break;
610 blocknr = btrfs_node_blockptr(node, i);
611 readahead_tree_block(root, blocknr);
612 }
613 }
614
615 static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
616 {
617 struct inode *inode = filp->f_path.dentry->d_inode;
618 struct btrfs_root *root = BTRFS_I(inode)->root;
619 struct btrfs_item *item;
620 struct btrfs_dir_item *di;
621 struct btrfs_key key;
622 struct btrfs_path *path;
623 int ret;
624 u32 nritems;
625 struct btrfs_leaf *leaf;
626 int slot;
627 int advance;
628 unsigned char d_type = DT_UNKNOWN;
629 int over = 0;
630 u32 di_cur;
631 u32 di_total;
632 u32 di_len;
633 int key_type = BTRFS_DIR_INDEX_KEY;
634
635 /* FIXME, use a real flag for deciding about the key type */
636 if (root->fs_info->tree_root == root)
637 key_type = BTRFS_DIR_ITEM_KEY;
638 mutex_lock(&root->fs_info->fs_mutex);
639 key.objectid = inode->i_ino;
640 key.flags = 0;
641 btrfs_set_key_type(&key, key_type);
642 key.offset = filp->f_pos;
643 path = btrfs_alloc_path();
644 btrfs_init_path(path);
645 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
646 if (ret < 0)
647 goto err;
648 advance = 0;
649 reada_leaves(root, path);
650 while(1) {
651 leaf = btrfs_buffer_leaf(path->nodes[0]);
652 nritems = btrfs_header_nritems(&leaf->header);
653 slot = path->slots[0];
654 if (advance || slot >= nritems) {
655 if (slot >= nritems -1) {
656 ret = btrfs_next_leaf(root, path);
657 if (ret)
658 break;
659 leaf = btrfs_buffer_leaf(path->nodes[0]);
660 nritems = btrfs_header_nritems(&leaf->header);
661 slot = path->slots[0];
662 if (path->slots[1] == 0)
663 reada_leaves(root, path);
664 } else {
665 slot++;
666 path->slots[0]++;
667 }
668 }
669 advance = 1;
670 item = leaf->items + slot;
671 if (btrfs_disk_key_objectid(&item->key) != key.objectid)
672 break;
673 if (btrfs_disk_key_type(&item->key) != key_type)
674 break;
675 if (btrfs_disk_key_offset(&item->key) < filp->f_pos)
676 continue;
677 filp->f_pos = btrfs_disk_key_offset(&item->key);
678 advance = 1;
679 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
680 di_cur = 0;
681 di_total = btrfs_item_size(leaf->items + slot);
682 while(di_cur < di_total) {
683 over = filldir(dirent, (const char *)(di + 1),
684 btrfs_dir_name_len(di),
685 btrfs_disk_key_offset(&item->key),
686 btrfs_disk_key_objectid(&di->location),
687 d_type);
688 if (over)
689 goto nopos;
690 di_len = btrfs_dir_name_len(di) + sizeof(*di);
691 di_cur += di_len;
692 di = (struct btrfs_dir_item *)((char *)di + di_len);
693 }
694 }
695 filp->f_pos++;
696 nopos:
697 ret = 0;
698 err:
699 btrfs_release_path(root, path);
700 btrfs_free_path(path);
701 mutex_unlock(&root->fs_info->fs_mutex);
702 return ret;
703 }
704
705 static void btrfs_put_super (struct super_block * sb)
706 {
707 struct btrfs_root *root = btrfs_sb(sb);
708 int ret;
709
710 ret = close_ctree(root);
711 if (ret) {
712 printk("close ctree returns %d\n", ret);
713 }
714 sb->s_fs_info = NULL;
715 }
716
717 static int btrfs_fill_super(struct super_block * sb, void * data, int silent)
718 {
719 struct inode * inode;
720 struct dentry * root_dentry;
721 struct btrfs_super_block *disk_super;
722 struct btrfs_root *tree_root;
723 struct btrfs_inode *bi;
724
725 sb->s_maxbytes = MAX_LFS_FILESIZE;
726 sb->s_magic = BTRFS_SUPER_MAGIC;
727 sb->s_op = &btrfs_super_ops;
728 sb->s_time_gran = 1;
729
730 tree_root = open_ctree(sb);
731
732 if (!tree_root) {
733 printk("btrfs: open_ctree failed\n");
734 return -EIO;
735 }
736 sb->s_fs_info = tree_root;
737 disk_super = tree_root->fs_info->disk_super;
738 printk("read in super total blocks %Lu root %Lu\n",
739 btrfs_super_total_blocks(disk_super),
740 btrfs_super_root_dir(disk_super));
741
742 inode = btrfs_iget_locked(sb, btrfs_super_root_dir(disk_super),
743 tree_root);
744 bi = BTRFS_I(inode);
745 bi->location.objectid = inode->i_ino;
746 bi->location.offset = 0;
747 bi->location.flags = 0;
748 bi->root = tree_root;
749 btrfs_set_key_type(&bi->location, BTRFS_INODE_ITEM_KEY);
750
751 if (!inode)
752 return -ENOMEM;
753 if (inode->i_state & I_NEW) {
754 btrfs_read_locked_inode(inode);
755 unlock_new_inode(inode);
756 }
757
758 root_dentry = d_alloc_root(inode);
759 if (!root_dentry) {
760 iput(inode);
761 return -ENOMEM;
762 }
763 sb->s_root = root_dentry;
764
765 return 0;
766 }
767
768 static int btrfs_write_inode(struct inode *inode, int wait)
769 {
770 struct btrfs_root *root = BTRFS_I(inode)->root;
771 struct btrfs_trans_handle *trans;
772 int ret = 0;
773
774 if (wait) {
775 mutex_lock(&root->fs_info->fs_mutex);
776 trans = btrfs_start_transaction(root, 1);
777 btrfs_set_trans_block_group(trans, inode);
778 ret = btrfs_commit_transaction(trans, root);
779 mutex_unlock(&root->fs_info->fs_mutex);
780 }
781 return ret;
782 }
783
784 static void btrfs_dirty_inode(struct inode *inode)
785 {
786 struct btrfs_root *root = BTRFS_I(inode)->root;
787 struct btrfs_trans_handle *trans;
788
789 mutex_lock(&root->fs_info->fs_mutex);
790 trans = btrfs_start_transaction(root, 1);
791 btrfs_set_trans_block_group(trans, inode);
792 btrfs_update_inode(trans, root, inode);
793 btrfs_end_transaction(trans, root);
794 mutex_unlock(&root->fs_info->fs_mutex);
795 btrfs_btree_balance_dirty(root);
796 }
797
798 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
799 struct btrfs_root *root,
800 u64 objectid,
801 struct btrfs_block_group_cache *group,
802 int mode)
803 {
804 struct inode *inode;
805 struct btrfs_inode_item inode_item;
806 struct btrfs_key *location;
807 int ret;
808
809 inode = new_inode(root->fs_info->sb);
810 if (!inode)
811 return ERR_PTR(-ENOMEM);
812
813 BTRFS_I(inode)->root = root;
814 group = btrfs_find_block_group(root, group, 0);
815 BTRFS_I(inode)->block_group = group;
816
817 inode->i_uid = current->fsuid;
818 inode->i_gid = current->fsgid;
819 inode->i_mode = mode;
820 inode->i_ino = objectid;
821 inode->i_blocks = 0;
822 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
823 fill_inode_item(&inode_item, inode);
824 location = &BTRFS_I(inode)->location;
825 location->objectid = objectid;
826 location->flags = 0;
827 location->offset = 0;
828 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
829
830 ret = btrfs_insert_inode(trans, root, objectid, &inode_item);
831 BUG_ON(ret);
832
833 insert_inode_hash(inode);
834 return inode;
835 }
836
837 static int btrfs_add_link(struct btrfs_trans_handle *trans,
838 struct dentry *dentry, struct inode *inode)
839 {
840 int ret;
841 struct btrfs_key key;
842 struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
843 key.objectid = inode->i_ino;
844 key.flags = 0;
845 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
846 key.offset = 0;
847
848 ret = btrfs_insert_dir_item(trans, root,
849 dentry->d_name.name, dentry->d_name.len,
850 dentry->d_parent->d_inode->i_ino,
851 &key, 0);
852 if (ret == 0) {
853 dentry->d_parent->d_inode->i_size += dentry->d_name.len * 2;
854 ret = btrfs_update_inode(trans, root,
855 dentry->d_parent->d_inode);
856 }
857 return ret;
858 }
859
860 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
861 struct dentry *dentry, struct inode *inode)
862 {
863 int err = btrfs_add_link(trans, dentry, inode);
864 if (!err) {
865 d_instantiate(dentry, inode);
866 return 0;
867 }
868 if (err > 0)
869 err = -EEXIST;
870 return err;
871 }
872
873 static int btrfs_create(struct inode *dir, struct dentry *dentry,
874 int mode, struct nameidata *nd)
875 {
876 struct btrfs_trans_handle *trans;
877 struct btrfs_root *root = BTRFS_I(dir)->root;
878 struct inode *inode;
879 int err;
880 int drop_inode = 0;
881 u64 objectid;
882
883 mutex_lock(&root->fs_info->fs_mutex);
884 trans = btrfs_start_transaction(root, 1);
885 btrfs_set_trans_block_group(trans, dir);
886
887 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
888 if (err) {
889 err = -ENOSPC;
890 goto out_unlock;
891 }
892
893 inode = btrfs_new_inode(trans, root, objectid,
894 BTRFS_I(dir)->block_group, mode);
895 err = PTR_ERR(inode);
896 if (IS_ERR(inode))
897 goto out_unlock;
898
899 btrfs_set_trans_block_group(trans, inode);
900 err = btrfs_add_nondir(trans, dentry, inode);
901 if (err)
902 drop_inode = 1;
903 else {
904 inode->i_mapping->a_ops = &btrfs_aops;
905 inode->i_fop = &btrfs_file_operations;
906 inode->i_op = &btrfs_file_inode_operations;
907 }
908 dir->i_sb->s_dirt = 1;
909 btrfs_update_inode_block_group(trans, inode);
910 btrfs_update_inode_block_group(trans, dir);
911 out_unlock:
912 btrfs_end_transaction(trans, root);
913 mutex_unlock(&root->fs_info->fs_mutex);
914
915 if (drop_inode) {
916 inode_dec_link_count(inode);
917 iput(inode);
918 }
919 btrfs_btree_balance_dirty(root);
920 return err;
921 }
922
923 static int btrfs_make_empty_dir(struct btrfs_trans_handle *trans,
924 struct btrfs_root *root,
925 u64 objectid, u64 dirid)
926 {
927 int ret;
928 char buf[2];
929 struct btrfs_key key;
930
931 buf[0] = '.';
932 buf[1] = '.';
933
934 key.objectid = objectid;
935 key.offset = 0;
936 key.flags = 0;
937 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
938
939 ret = btrfs_insert_dir_item(trans, root, buf, 1, objectid,
940 &key, 1);
941 if (ret)
942 goto error;
943 key.objectid = dirid;
944 ret = btrfs_insert_dir_item(trans, root, buf, 2, objectid,
945 &key, 1);
946 if (ret)
947 goto error;
948 error:
949 return ret;
950 }
951
952 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
953 {
954 struct inode *inode;
955 struct btrfs_trans_handle *trans;
956 struct btrfs_root *root = BTRFS_I(dir)->root;
957 int err = 0;
958 int drop_on_err = 0;
959 u64 objectid;
960
961 mutex_lock(&root->fs_info->fs_mutex);
962 trans = btrfs_start_transaction(root, 1);
963 btrfs_set_trans_block_group(trans, dir);
964 if (IS_ERR(trans)) {
965 err = PTR_ERR(trans);
966 goto out_unlock;
967 }
968
969 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
970 if (err) {
971 err = -ENOSPC;
972 goto out_unlock;
973 }
974
975 inode = btrfs_new_inode(trans, root, objectid,
976 BTRFS_I(dir)->block_group, S_IFDIR | mode);
977 if (IS_ERR(inode)) {
978 err = PTR_ERR(inode);
979 goto out_fail;
980 }
981 drop_on_err = 1;
982 inode->i_op = &btrfs_dir_inode_operations;
983 inode->i_fop = &btrfs_dir_file_operations;
984 btrfs_set_trans_block_group(trans, inode);
985
986 err = btrfs_make_empty_dir(trans, root, inode->i_ino, dir->i_ino);
987 if (err)
988 goto out_fail;
989
990 inode->i_size = 6;
991 err = btrfs_update_inode(trans, root, inode);
992 if (err)
993 goto out_fail;
994 err = btrfs_add_link(trans, dentry, inode);
995 if (err)
996 goto out_fail;
997 d_instantiate(dentry, inode);
998 drop_on_err = 0;
999 dir->i_sb->s_dirt = 1;
1000 btrfs_update_inode_block_group(trans, inode);
1001 btrfs_update_inode_block_group(trans, dir);
1002
1003 out_fail:
1004 btrfs_end_transaction(trans, root);
1005 out_unlock:
1006 mutex_unlock(&root->fs_info->fs_mutex);
1007 if (drop_on_err)
1008 iput(inode);
1009 btrfs_btree_balance_dirty(root);
1010 return err;
1011 }
1012
1013 static int btrfs_sync_file(struct file *file,
1014 struct dentry *dentry, int datasync)
1015 {
1016 struct inode *inode = dentry->d_inode;
1017 struct btrfs_root *root = BTRFS_I(inode)->root;
1018 int ret;
1019 struct btrfs_trans_handle *trans;
1020
1021 mutex_lock(&root->fs_info->fs_mutex);
1022 trans = btrfs_start_transaction(root, 1);
1023 if (!trans) {
1024 ret = -ENOMEM;
1025 goto out;
1026 }
1027 ret = btrfs_commit_transaction(trans, root);
1028 mutex_unlock(&root->fs_info->fs_mutex);
1029 out:
1030 return ret > 0 ? EIO : ret;
1031 }
1032
1033 static int btrfs_sync_fs(struct super_block *sb, int wait)
1034 {
1035 struct btrfs_trans_handle *trans;
1036 struct btrfs_root *root;
1037 int ret;
1038 root = btrfs_sb(sb);
1039
1040 sb->s_dirt = 0;
1041 if (!wait) {
1042 filemap_flush(root->fs_info->btree_inode->i_mapping);
1043 return 0;
1044 }
1045 mutex_lock(&root->fs_info->fs_mutex);
1046 trans = btrfs_start_transaction(root, 1);
1047 ret = btrfs_commit_transaction(trans, root);
1048 sb->s_dirt = 0;
1049 BUG_ON(ret);
1050 printk("btrfs sync_fs\n");
1051 mutex_unlock(&root->fs_info->fs_mutex);
1052 return 0;
1053 }
1054
1055 static int btrfs_get_block_lock(struct inode *inode, sector_t iblock,
1056 struct buffer_head *result, int create)
1057 {
1058 int ret;
1059 int err = 0;
1060 u64 blocknr;
1061 u64 extent_start = 0;
1062 u64 extent_end = 0;
1063 u64 objectid = inode->i_ino;
1064 u32 found_type;
1065 struct btrfs_path *path;
1066 struct btrfs_root *root = BTRFS_I(inode)->root;
1067 struct btrfs_file_extent_item *item;
1068 struct btrfs_leaf *leaf;
1069 struct btrfs_disk_key *found_key;
1070
1071 path = btrfs_alloc_path();
1072 BUG_ON(!path);
1073 btrfs_init_path(path);
1074 if (create) {
1075 WARN_ON(1);
1076 }
1077
1078 ret = btrfs_lookup_file_extent(NULL, root, path,
1079 inode->i_ino,
1080 iblock << inode->i_blkbits, 0);
1081 if (ret < 0) {
1082 err = ret;
1083 goto out;
1084 }
1085
1086 if (ret != 0) {
1087 if (path->slots[0] == 0) {
1088 btrfs_release_path(root, path);
1089 goto out;
1090 }
1091 path->slots[0]--;
1092 }
1093
1094 item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]), path->slots[0],
1095 struct btrfs_file_extent_item);
1096 leaf = btrfs_buffer_leaf(path->nodes[0]);
1097 blocknr = btrfs_file_extent_disk_blocknr(item);
1098 blocknr += btrfs_file_extent_offset(item);
1099
1100 /* are we inside the extent that was found? */
1101 found_key = &leaf->items[path->slots[0]].key;
1102 found_type = btrfs_disk_key_type(found_key);
1103 if (btrfs_disk_key_objectid(found_key) != objectid ||
1104 found_type != BTRFS_EXTENT_DATA_KEY) {
1105 extent_end = 0;
1106 extent_start = 0;
1107 goto out;
1108 }
1109 found_type = btrfs_file_extent_type(item);
1110 extent_start = btrfs_disk_key_offset(&leaf->items[path->slots[0]].key);
1111 if (found_type == BTRFS_FILE_EXTENT_REG) {
1112 extent_start = extent_start >> inode->i_blkbits;
1113 extent_end = extent_start + btrfs_file_extent_num_blocks(item);
1114 if (iblock >= extent_start && iblock < extent_end) {
1115 err = 0;
1116 btrfs_map_bh_to_logical(root, result, blocknr +
1117 iblock - extent_start);
1118 goto out;
1119 }
1120 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
1121 char *ptr;
1122 char *map;
1123 u32 size;
1124 size = btrfs_file_extent_inline_len(leaf->items +
1125 path->slots[0]);
1126 extent_end = (extent_start + size) >> inode->i_blkbits;
1127 extent_start >>= inode->i_blkbits;
1128 if (iblock < extent_start || iblock > extent_end) {
1129 goto out;
1130 }
1131 ptr = btrfs_file_extent_inline_start(item);
1132 map = kmap(result->b_page);
1133 memcpy(map, ptr, size);
1134 memset(map + size, 0, PAGE_CACHE_SIZE - size);
1135 flush_dcache_page(result->b_page);
1136 kunmap(result->b_page);
1137 set_buffer_uptodate(result);
1138 SetPageChecked(result->b_page);
1139 btrfs_map_bh_to_logical(root, result, 0);
1140 }
1141 out:
1142 btrfs_free_path(path);
1143 return err;
1144 }
1145
1146 static int btrfs_get_block(struct inode *inode, sector_t iblock,
1147 struct buffer_head *result, int create)
1148 {
1149 int err;
1150 struct btrfs_root *root = BTRFS_I(inode)->root;
1151 mutex_lock(&root->fs_info->fs_mutex);
1152 err = btrfs_get_block_lock(inode, iblock, result, create);
1153 mutex_unlock(&root->fs_info->fs_mutex);
1154 return err;
1155 }
1156
1157 static int btrfs_prepare_write(struct file *file, struct page *page,
1158 unsigned from, unsigned to)
1159 {
1160 return nobh_prepare_write(page, from, to, btrfs_get_block);
1161 }
1162
1163 static void btrfs_write_super(struct super_block *sb)
1164 {
1165 btrfs_sync_fs(sb, 1);
1166 }
1167
1168 static int btrfs_readpage(struct file *file, struct page *page)
1169 {
1170 return mpage_readpage(page, btrfs_get_block);
1171 }
1172
1173 /*
1174 * While block_write_full_page is writing back the dirty buffers under
1175 * the page lock, whoever dirtied the buffers may decide to clean them
1176 * again at any time. We handle that by only looking at the buffer
1177 * state inside lock_buffer().
1178 *
1179 * If block_write_full_page() is called for regular writeback
1180 * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
1181 * locked buffer. This only can happen if someone has written the buffer
1182 * directly, with submit_bh(). At the address_space level PageWriteback
1183 * prevents this contention from occurring.
1184 */
1185 static int __btrfs_write_full_page(struct inode *inode, struct page *page,
1186 struct writeback_control *wbc)
1187 {
1188 int err;
1189 sector_t block;
1190 sector_t last_block;
1191 struct buffer_head *bh, *head;
1192 const unsigned blocksize = 1 << inode->i_blkbits;
1193 int nr_underway = 0;
1194
1195 BUG_ON(!PageLocked(page));
1196
1197 last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
1198
1199 if (!page_has_buffers(page)) {
1200 create_empty_buffers(page, blocksize,
1201 (1 << BH_Dirty)|(1 << BH_Uptodate));
1202 }
1203
1204 /*
1205 * Be very careful. We have no exclusion from __set_page_dirty_buffers
1206 * here, and the (potentially unmapped) buffers may become dirty at
1207 * any time. If a buffer becomes dirty here after we've inspected it
1208 * then we just miss that fact, and the page stays dirty.
1209 *
1210 * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
1211 * handle that here by just cleaning them.
1212 */
1213
1214 block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1215 head = page_buffers(page);
1216 bh = head;
1217
1218 /*
1219 * Get all the dirty buffers mapped to disk addresses and
1220 * handle any aliases from the underlying blockdev's mapping.
1221 */
1222 do {
1223 if (block > last_block) {
1224 /*
1225 * mapped buffers outside i_size will occur, because
1226 * this page can be outside i_size when there is a
1227 * truncate in progress.
1228 */
1229 /*
1230 * The buffer was zeroed by block_write_full_page()
1231 */
1232 clear_buffer_dirty(bh);
1233 set_buffer_uptodate(bh);
1234 } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
1235 WARN_ON(bh->b_size != blocksize);
1236 err = btrfs_get_block(inode, block, bh, 0);
1237 if (err) {
1238 printk("writepage going to recovery err %d\n", err);
1239 goto recover;
1240 }
1241 if (buffer_new(bh)) {
1242 /* blockdev mappings never come here */
1243 clear_buffer_new(bh);
1244 }
1245 }
1246 bh = bh->b_this_page;
1247 block++;
1248 } while (bh != head);
1249
1250 do {
1251 if (!buffer_mapped(bh))
1252 continue;
1253 /*
1254 * If it's a fully non-blocking write attempt and we cannot
1255 * lock the buffer then redirty the page. Note that this can
1256 * potentially cause a busy-wait loop from pdflush and kswapd
1257 * activity, but those code paths have their own higher-level
1258 * throttling.
1259 */
1260 if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
1261 lock_buffer(bh);
1262 } else if (test_set_buffer_locked(bh)) {
1263 redirty_page_for_writepage(wbc, page);
1264 continue;
1265 }
1266 if (test_clear_buffer_dirty(bh) && bh->b_blocknr != 0) {
1267 mark_buffer_async_write(bh);
1268 } else {
1269 unlock_buffer(bh);
1270 }
1271 } while ((bh = bh->b_this_page) != head);
1272
1273 /*
1274 * The page and its buffers are protected by PageWriteback(), so we can
1275 * drop the bh refcounts early.
1276 */
1277 BUG_ON(PageWriteback(page));
1278 set_page_writeback(page);
1279
1280 do {
1281 struct buffer_head *next = bh->b_this_page;
1282 if (buffer_async_write(bh)) {
1283 submit_bh(WRITE, bh);
1284 nr_underway++;
1285 }
1286 bh = next;
1287 } while (bh != head);
1288 unlock_page(page);
1289
1290 err = 0;
1291 done:
1292 if (nr_underway == 0) {
1293 /*
1294 * The page was marked dirty, but the buffers were
1295 * clean. Someone wrote them back by hand with
1296 * ll_rw_block/submit_bh. A rare case.
1297 */
1298 int uptodate = 1;
1299 do {
1300 if (!buffer_uptodate(bh)) {
1301 uptodate = 0;
1302 break;
1303 }
1304 bh = bh->b_this_page;
1305 } while (bh != head);
1306 if (uptodate)
1307 SetPageUptodate(page);
1308 end_page_writeback(page);
1309 }
1310 return err;
1311
1312 recover:
1313 /*
1314 * ENOSPC, or some other error. We may already have added some
1315 * blocks to the file, so we need to write these out to avoid
1316 * exposing stale data.
1317 * The page is currently locked and not marked for writeback
1318 */
1319 bh = head;
1320 /* Recovery: lock and submit the mapped buffers */
1321 do {
1322 if (buffer_mapped(bh) && buffer_dirty(bh)) {
1323 lock_buffer(bh);
1324 mark_buffer_async_write(bh);
1325 } else {
1326 /*
1327 * The buffer may have been set dirty during
1328 * attachment to a dirty page.
1329 */
1330 clear_buffer_dirty(bh);
1331 }
1332 } while ((bh = bh->b_this_page) != head);
1333 SetPageError(page);
1334 BUG_ON(PageWriteback(page));
1335 set_page_writeback(page);
1336 do {
1337 struct buffer_head *next = bh->b_this_page;
1338 if (buffer_async_write(bh)) {
1339 clear_buffer_dirty(bh);
1340 submit_bh(WRITE, bh);
1341 nr_underway++;
1342 }
1343 bh = next;
1344 } while (bh != head);
1345 unlock_page(page);
1346 goto done;
1347 }
1348
1349 /*
1350 * The generic ->writepage function for buffer-backed address_spaces
1351 */
1352 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
1353 {
1354 struct inode * const inode = page->mapping->host;
1355 loff_t i_size = i_size_read(inode);
1356 const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT;
1357 unsigned offset;
1358 void *kaddr;
1359
1360 /* Is the page fully inside i_size? */
1361 if (page->index < end_index)
1362 return __btrfs_write_full_page(inode, page, wbc);
1363
1364 /* Is the page fully outside i_size? (truncate in progress) */
1365 offset = i_size & (PAGE_CACHE_SIZE-1);
1366 if (page->index >= end_index+1 || !offset) {
1367 /*
1368 * The page may have dirty, unmapped buffers. For example,
1369 * they may have been added in ext3_writepage(). Make them
1370 * freeable here, so the page does not leak.
1371 */
1372 block_invalidatepage(page, 0);
1373 unlock_page(page);
1374 return 0; /* don't care */
1375 }
1376
1377 /*
1378 * The page straddles i_size. It must be zeroed out on each and every
1379 * writepage invokation because it may be mmapped. "A file is mapped
1380 * in multiples of the page size. For a file that is not a multiple of
1381 * the page size, the remaining memory is zeroed when mapped, and
1382 * writes to that region are not written out to the file."
1383 */
1384 kaddr = kmap_atomic(page, KM_USER0);
1385 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
1386 flush_dcache_page(page);
1387 kunmap_atomic(kaddr, KM_USER0);
1388 return __btrfs_write_full_page(inode, page, wbc);
1389 }
1390
1391 static void btrfs_truncate(struct inode *inode)
1392 {
1393 struct btrfs_root *root = BTRFS_I(inode)->root;
1394 int ret;
1395 struct btrfs_trans_handle *trans;
1396
1397 if (!S_ISREG(inode->i_mode))
1398 return;
1399 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
1400 return;
1401
1402 nobh_truncate_page(inode->i_mapping, inode->i_size);
1403
1404 /* FIXME, add redo link to tree so we don't leak on crash */
1405 mutex_lock(&root->fs_info->fs_mutex);
1406 trans = btrfs_start_transaction(root, 1);
1407 btrfs_set_trans_block_group(trans, inode);
1408 ret = btrfs_truncate_in_trans(trans, root, inode);
1409 BUG_ON(ret);
1410 btrfs_update_inode(trans, root, inode);
1411 ret = btrfs_end_transaction(trans, root);
1412 BUG_ON(ret);
1413 mutex_unlock(&root->fs_info->fs_mutex);
1414 btrfs_btree_balance_dirty(root);
1415 }
1416
1417 /*
1418 * Make sure any changes to nobh_commit_write() are reflected in
1419 * nobh_truncate_page(), since it doesn't call commit_write().
1420 */
1421 static int btrfs_commit_write(struct file *file, struct page *page,
1422 unsigned from, unsigned to)
1423 {
1424 struct inode *inode = page->mapping->host;
1425 struct buffer_head *bh;
1426 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1427
1428 SetPageUptodate(page);
1429 bh = page_buffers(page);
1430 if (buffer_mapped(bh) && bh->b_blocknr != 0) {
1431 set_page_dirty(page);
1432 }
1433 if (pos > inode->i_size) {
1434 i_size_write(inode, pos);
1435 mark_inode_dirty(inode);
1436 }
1437 return 0;
1438 }
1439
1440 static int btrfs_copy_from_user(loff_t pos, int num_pages, int write_bytes,
1441 struct page **prepared_pages,
1442 const char __user * buf)
1443 {
1444 long page_fault = 0;
1445 int i;
1446 int offset = pos & (PAGE_CACHE_SIZE - 1);
1447
1448 for (i = 0; i < num_pages && write_bytes > 0; i++, offset = 0) {
1449 size_t count = min_t(size_t,
1450 PAGE_CACHE_SIZE - offset, write_bytes);
1451 struct page *page = prepared_pages[i];
1452 fault_in_pages_readable(buf, count);
1453
1454 /* Copy data from userspace to the current page */
1455 kmap(page);
1456 page_fault = __copy_from_user(page_address(page) + offset,
1457 buf, count);
1458 /* Flush processor's dcache for this page */
1459 flush_dcache_page(page);
1460 kunmap(page);
1461 buf += count;
1462 write_bytes -= count;
1463
1464 if (page_fault)
1465 break;
1466 }
1467 return page_fault ? -EFAULT : 0;
1468 }
1469
1470 static void btrfs_drop_pages(struct page **pages, size_t num_pages)
1471 {
1472 size_t i;
1473 for (i = 0; i < num_pages; i++) {
1474 if (!pages[i])
1475 break;
1476 unlock_page(pages[i]);
1477 mark_page_accessed(pages[i]);
1478 page_cache_release(pages[i]);
1479 }
1480 }
1481 static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
1482 struct btrfs_root *root,
1483 struct file *file,
1484 struct page **pages,
1485 size_t num_pages,
1486 loff_t pos,
1487 size_t write_bytes)
1488 {
1489 int i;
1490 int offset;
1491 int err = 0;
1492 int ret;
1493 int this_write;
1494 struct inode *inode = file->f_path.dentry->d_inode;
1495 struct buffer_head *bh;
1496 struct btrfs_file_extent_item *ei;
1497
1498 for (i = 0; i < num_pages; i++) {
1499 offset = pos & (PAGE_CACHE_SIZE -1);
1500 this_write = min(PAGE_CACHE_SIZE - offset, write_bytes);
1501 /* FIXME, one block at a time */
1502
1503 mutex_lock(&root->fs_info->fs_mutex);
1504 trans = btrfs_start_transaction(root, 1);
1505 btrfs_set_trans_block_group(trans, inode);
1506
1507 bh = page_buffers(pages[i]);
1508 if (buffer_mapped(bh) && bh->b_blocknr == 0) {
1509 struct btrfs_key key;
1510 struct btrfs_path *path;
1511 char *ptr;
1512 u32 datasize;
1513
1514 path = btrfs_alloc_path();
1515 BUG_ON(!path);
1516 key.objectid = inode->i_ino;
1517 key.offset = pages[i]->index << PAGE_CACHE_SHIFT;
1518 key.flags = 0;
1519 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
1520 BUG_ON(write_bytes >= PAGE_CACHE_SIZE);
1521 datasize = offset +
1522 btrfs_file_extent_calc_inline_size(write_bytes);
1523 ret = btrfs_insert_empty_item(trans, root, path, &key,
1524 datasize);
1525 BUG_ON(ret);
1526 ei = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
1527 path->slots[0], struct btrfs_file_extent_item);
1528 btrfs_set_file_extent_generation(ei, trans->transid);
1529 btrfs_set_file_extent_type(ei,
1530 BTRFS_FILE_EXTENT_INLINE);
1531 ptr = btrfs_file_extent_inline_start(ei);
1532 memcpy(ptr, bh->b_data, offset + write_bytes);
1533 mark_buffer_dirty(path->nodes[0]);
1534 btrfs_free_path(path);
1535 } else {
1536 btrfs_csum_file_block(trans, root, inode->i_ino,
1537 pages[i]->index << PAGE_CACHE_SHIFT,
1538 kmap(pages[i]), PAGE_CACHE_SIZE);
1539 kunmap(pages[i]);
1540 }
1541 SetPageChecked(pages[i]);
1542 btrfs_update_inode_block_group(trans, inode);
1543 ret = btrfs_end_transaction(trans, root);
1544 BUG_ON(ret);
1545 mutex_unlock(&root->fs_info->fs_mutex);
1546
1547 ret = btrfs_commit_write(file, pages[i], offset,
1548 offset + this_write);
1549 pos += this_write;
1550 if (ret) {
1551 err = ret;
1552 goto failed;
1553 }
1554 WARN_ON(this_write > write_bytes);
1555 write_bytes -= this_write;
1556 }
1557 failed:
1558 return err;
1559 }
1560
1561 static int drop_extents(struct btrfs_trans_handle *trans,
1562 struct btrfs_root *root,
1563 struct inode *inode,
1564 u64 start, u64 end)
1565 {
1566 int ret;
1567 struct btrfs_key key;
1568 struct btrfs_leaf *leaf;
1569 int slot;
1570 struct btrfs_file_extent_item *extent;
1571 u64 extent_end = 0;
1572 int keep;
1573 struct btrfs_file_extent_item old;
1574 struct btrfs_path *path;
1575 u64 search_start = start;
1576 int bookend;
1577 int found_type;
1578 int found_extent;
1579 int found_inline;
1580
1581 path = btrfs_alloc_path();
1582 if (!path)
1583 return -ENOMEM;
1584 while(1) {
1585 btrfs_release_path(root, path);
1586 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
1587 search_start, -1);
1588 if (ret < 0)
1589 goto out;
1590 if (ret > 0) {
1591 if (path->slots[0] == 0) {
1592 ret = 0;
1593 goto out;
1594 }
1595 path->slots[0]--;
1596 }
1597 keep = 0;
1598 bookend = 0;
1599 found_extent = 0;
1600 found_inline = 0;
1601 extent = NULL;
1602 leaf = btrfs_buffer_leaf(path->nodes[0]);
1603 slot = path->slots[0];
1604 btrfs_disk_key_to_cpu(&key, &leaf->items[slot].key);
1605 if (key.offset >= end || key.objectid != inode->i_ino) {
1606 ret = 0;
1607 goto out;
1608 }
1609 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) {
1610 ret = 0;
1611 goto out;
1612 }
1613 extent = btrfs_item_ptr(leaf, slot,
1614 struct btrfs_file_extent_item);
1615 found_type = btrfs_file_extent_type(extent);
1616 if (found_type == BTRFS_FILE_EXTENT_REG) {
1617 extent_end = key.offset +
1618 (btrfs_file_extent_num_blocks(extent) <<
1619 inode->i_blkbits);
1620 found_extent = 1;
1621 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
1622 found_inline = 1;
1623 extent_end = key.offset +
1624 btrfs_file_extent_inline_len(leaf->items + slot);
1625 }
1626
1627 if (!found_extent && !found_inline) {
1628 ret = 0;
1629 goto out;
1630 }
1631
1632 if (search_start >= extent_end) {
1633 ret = 0;
1634 goto out;
1635 }
1636
1637 search_start = extent_end;
1638
1639 if (end < extent_end && end >= key.offset) {
1640 if (found_extent) {
1641 memcpy(&old, extent, sizeof(old));
1642 ret = btrfs_inc_extent_ref(trans, root,
1643 btrfs_file_extent_disk_blocknr(&old),
1644 btrfs_file_extent_disk_num_blocks(&old));
1645 BUG_ON(ret);
1646 }
1647 WARN_ON(found_inline);
1648 bookend = 1;
1649 }
1650
1651 if (start > key.offset) {
1652 u64 new_num;
1653 u64 old_num;
1654 /* truncate existing extent */
1655 keep = 1;
1656 WARN_ON(start & (root->blocksize - 1));
1657 if (found_extent) {
1658 new_num = (start - key.offset) >>
1659 inode->i_blkbits;
1660 old_num = btrfs_file_extent_num_blocks(extent);
1661 inode->i_blocks -= (old_num - new_num) << 3;
1662 btrfs_set_file_extent_num_blocks(extent,
1663 new_num);
1664 mark_buffer_dirty(path->nodes[0]);
1665 } else {
1666 WARN_ON(1);
1667 /*
1668 ret = btrfs_truncate_item(trans, root, path,
1669 start - key.offset);
1670 BUG_ON(ret);
1671 */
1672 }
1673 }
1674 if (!keep) {
1675 u64 disk_blocknr = 0;
1676 u64 disk_num_blocks = 0;
1677 u64 extent_num_blocks = 0;
1678 if (found_extent) {
1679 disk_blocknr =
1680 btrfs_file_extent_disk_blocknr(extent);
1681 disk_num_blocks =
1682 btrfs_file_extent_disk_num_blocks(extent);
1683 extent_num_blocks =
1684 btrfs_file_extent_num_blocks(extent);
1685 }
1686 ret = btrfs_del_item(trans, root, path);
1687 BUG_ON(ret);
1688 btrfs_release_path(root, path);
1689 if (found_extent) {
1690 inode->i_blocks -=
1691 btrfs_file_extent_num_blocks(extent) << 3;
1692 ret = btrfs_free_extent(trans, root,
1693 disk_blocknr,
1694 disk_num_blocks, 0);
1695 }
1696
1697 BUG_ON(ret);
1698 if (!bookend && search_start >= end) {
1699 ret = 0;
1700 goto out;
1701 }
1702 if (!bookend)
1703 continue;
1704 }
1705 if (bookend && found_extent) {
1706 /* create bookend */
1707 struct btrfs_key ins;
1708 ins.objectid = inode->i_ino;
1709 ins.offset = end;
1710 ins.flags = 0;
1711 btrfs_set_key_type(&ins, BTRFS_EXTENT_DATA_KEY);
1712
1713 btrfs_release_path(root, path);
1714 ret = btrfs_insert_empty_item(trans, root, path, &ins,
1715 sizeof(*extent));
1716 BUG_ON(ret);
1717 extent = btrfs_item_ptr(
1718 btrfs_buffer_leaf(path->nodes[0]),
1719 path->slots[0],
1720 struct btrfs_file_extent_item);
1721 btrfs_set_file_extent_disk_blocknr(extent,
1722 btrfs_file_extent_disk_blocknr(&old));
1723 btrfs_set_file_extent_disk_num_blocks(extent,
1724 btrfs_file_extent_disk_num_blocks(&old));
1725
1726 btrfs_set_file_extent_offset(extent,
1727 btrfs_file_extent_offset(&old) +
1728 ((end - key.offset) >> inode->i_blkbits));
1729 WARN_ON(btrfs_file_extent_num_blocks(&old) <
1730 (end - key.offset) >> inode->i_blkbits);
1731 btrfs_set_file_extent_num_blocks(extent,
1732 btrfs_file_extent_num_blocks(&old) -
1733 ((end - key.offset) >> inode->i_blkbits));
1734
1735 btrfs_set_file_extent_type(extent,
1736 BTRFS_FILE_EXTENT_REG);
1737 btrfs_set_file_extent_generation(extent,
1738 btrfs_file_extent_generation(&old));
1739 btrfs_mark_buffer_dirty(path->nodes[0]);
1740 inode->i_blocks +=
1741 btrfs_file_extent_num_blocks(extent) << 3;
1742 ret = 0;
1743 goto out;
1744 }
1745 }
1746 out:
1747 btrfs_free_path(path);
1748 return ret;
1749 }
1750
1751 static int prepare_pages(struct btrfs_root *root,
1752 struct file *file,
1753 struct page **pages,
1754 size_t num_pages,
1755 loff_t pos,
1756 unsigned long first_index,
1757 unsigned long last_index,
1758 size_t write_bytes,
1759 u64 alloc_extent_start)
1760 {
1761 int i;
1762 unsigned long index = pos >> PAGE_CACHE_SHIFT;
1763 struct inode *inode = file->f_path.dentry->d_inode;
1764 int offset;
1765 int err = 0;
1766 int this_write;
1767 struct buffer_head *bh;
1768 struct buffer_head *head;
1769 loff_t isize = i_size_read(inode);
1770
1771 memset(pages, 0, num_pages * sizeof(struct page *));
1772
1773 for (i = 0; i < num_pages; i++) {
1774 pages[i] = grab_cache_page(inode->i_mapping, index + i);
1775 if (!pages[i]) {
1776 err = -ENOMEM;
1777 goto failed_release;
1778 }
1779 cancel_dirty_page(pages[i], PAGE_CACHE_SIZE);
1780 wait_on_page_writeback(pages[i]);
1781 offset = pos & (PAGE_CACHE_SIZE -1);
1782 this_write = min(PAGE_CACHE_SIZE - offset, write_bytes);
1783 if (!page_has_buffers(pages[i])) {
1784 create_empty_buffers(pages[i],
1785 root->fs_info->sb->s_blocksize,
1786 (1 << BH_Uptodate));
1787 }
1788 head = page_buffers(pages[i]);
1789 bh = head;
1790 do {
1791 err = btrfs_map_bh_to_logical(root, bh,
1792 alloc_extent_start);
1793 BUG_ON(err);
1794 if (err)
1795 goto failed_truncate;
1796 bh = bh->b_this_page;
1797 if (alloc_extent_start)
1798 alloc_extent_start++;
1799 } while (bh != head);
1800 pos += this_write;
1801 WARN_ON(this_write > write_bytes);
1802 write_bytes -= this_write;
1803 }
1804 return 0;
1805
1806 failed_release:
1807 btrfs_drop_pages(pages, num_pages);
1808 return err;
1809
1810 failed_truncate:
1811 btrfs_drop_pages(pages, num_pages);
1812 if (pos > isize)
1813 vmtruncate(inode, isize);
1814 return err;
1815 }
1816
1817 static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
1818 size_t count, loff_t *ppos)
1819 {
1820 loff_t pos;
1821 size_t num_written = 0;
1822 int err = 0;
1823 int ret = 0;
1824 struct inode *inode = file->f_path.dentry->d_inode;
1825 struct btrfs_root *root = BTRFS_I(inode)->root;
1826 struct page *pages[8];
1827 struct page *pinned[2];
1828 unsigned long first_index;
1829 unsigned long last_index;
1830 u64 start_pos;
1831 u64 num_blocks;
1832 u64 alloc_extent_start;
1833 struct btrfs_trans_handle *trans;
1834 struct btrfs_key ins;
1835
1836 pinned[0] = NULL;
1837 pinned[1] = NULL;
1838 if (file->f_flags & O_DIRECT)
1839 return -EINVAL;
1840 pos = *ppos;
1841 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
1842 current->backing_dev_info = inode->i_mapping->backing_dev_info;
1843 err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
1844 if (err)
1845 goto out;
1846 if (count == 0)
1847 goto out;
1848 err = remove_suid(file->f_path.dentry);
1849 if (err)
1850 goto out;
1851 file_update_time(file);
1852
1853 start_pos = pos & ~((u64)PAGE_CACHE_SIZE - 1);
1854 num_blocks = (count + pos - start_pos + root->blocksize - 1) >>
1855 inode->i_blkbits;
1856
1857 mutex_lock(&inode->i_mutex);
1858 first_index = pos >> PAGE_CACHE_SHIFT;
1859 last_index = (pos + count) >> PAGE_CACHE_SHIFT;
1860
1861 if ((first_index << PAGE_CACHE_SHIFT) < inode->i_size &&
1862 (pos & (PAGE_CACHE_SIZE - 1))) {
1863 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
1864 if (!PageUptodate(pinned[0])) {
1865 ret = mpage_readpage(pinned[0], btrfs_get_block);
1866 BUG_ON(ret);
1867 wait_on_page_locked(pinned[0]);
1868 } else {
1869 unlock_page(pinned[0]);
1870 }
1871 }
1872 if (first_index != last_index &&
1873 (last_index << PAGE_CACHE_SHIFT) < inode->i_size &&
1874 (count & (PAGE_CACHE_SIZE - 1))) {
1875 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
1876 if (!PageUptodate(pinned[1])) {
1877 ret = mpage_readpage(pinned[1], btrfs_get_block);
1878 BUG_ON(ret);
1879 wait_on_page_locked(pinned[1]);
1880 } else {
1881 unlock_page(pinned[1]);
1882 }
1883 }
1884
1885 mutex_lock(&root->fs_info->fs_mutex);
1886 trans = btrfs_start_transaction(root, 1);
1887 if (!trans) {
1888 err = -ENOMEM;
1889 mutex_unlock(&root->fs_info->fs_mutex);
1890 goto out_unlock;
1891 }
1892 btrfs_set_trans_block_group(trans, inode);
1893 /* FIXME blocksize != 4096 */
1894 inode->i_blocks += num_blocks << 3;
1895 if (start_pos < inode->i_size) {
1896 /* FIXME blocksize != pagesize */
1897 ret = drop_extents(trans, root, inode,
1898 start_pos,
1899 (pos + count + root->blocksize -1) &
1900 ~((u64)root->blocksize - 1));
1901 BUG_ON(ret);
1902 }
1903 if (inode->i_size >= PAGE_CACHE_SIZE || pos + count < inode->i_size ||
1904 pos + count - start_pos > BTRFS_MAX_INLINE_DATA_SIZE(root)) {
1905 ret = btrfs_alloc_extent(trans, root, inode->i_ino,
1906 num_blocks, 1, (u64)-1, &ins, 1);
1907 BUG_ON(ret);
1908 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
1909 start_pos, ins.objectid, ins.offset);
1910 BUG_ON(ret);
1911 } else {
1912 ins.offset = 0;
1913 ins.objectid = 0;
1914 }
1915 BUG_ON(ret);
1916 alloc_extent_start = ins.objectid;
1917 btrfs_update_inode_block_group(trans, inode);
1918 ret = btrfs_end_transaction(trans, root);
1919 mutex_unlock(&root->fs_info->fs_mutex);
1920
1921 while(count > 0) {
1922 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
1923 size_t write_bytes = min(count, PAGE_CACHE_SIZE - offset);
1924 size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
1925 PAGE_CACHE_SHIFT;
1926
1927 memset(pages, 0, sizeof(pages));
1928 ret = prepare_pages(root, file, pages, num_pages,
1929 pos, first_index, last_index,
1930 write_bytes, alloc_extent_start);
1931 BUG_ON(ret);
1932
1933 /* FIXME blocks != pagesize */
1934 if (alloc_extent_start)
1935 alloc_extent_start += num_pages;
1936 ret = btrfs_copy_from_user(pos, num_pages,
1937 write_bytes, pages, buf);
1938 BUG_ON(ret);
1939
1940 ret = dirty_and_release_pages(NULL, root, file, pages,
1941 num_pages, pos, write_bytes);
1942 BUG_ON(ret);
1943 btrfs_drop_pages(pages, num_pages);
1944
1945 buf += write_bytes;
1946 count -= write_bytes;
1947 pos += write_bytes;
1948 num_written += write_bytes;
1949
1950 balance_dirty_pages_ratelimited(inode->i_mapping);
1951 btrfs_btree_balance_dirty(root);
1952 cond_resched();
1953 }
1954 out_unlock:
1955 mutex_unlock(&inode->i_mutex);
1956 out:
1957 if (pinned[0])
1958 page_cache_release(pinned[0]);
1959 if (pinned[1])
1960 page_cache_release(pinned[1]);
1961 *ppos = pos;
1962 current->backing_dev_info = NULL;
1963 mark_inode_dirty(inode);
1964 return num_written ? num_written : err;
1965 }
1966
1967 static int btrfs_read_actor(read_descriptor_t *desc, struct page *page,
1968 unsigned long offset, unsigned long size)
1969 {
1970 char *kaddr;
1971 unsigned long left, count = desc->count;
1972 struct inode *inode = page->mapping->host;
1973
1974 if (size > count)
1975 size = count;
1976
1977 if (!PageChecked(page)) {
1978 /* FIXME, do it per block */
1979 struct btrfs_root *root = BTRFS_I(inode)->root;
1980
1981 int ret = btrfs_csum_verify_file_block(root,
1982 page->mapping->host->i_ino,
1983 page->index << PAGE_CACHE_SHIFT,
1984 kmap(page), PAGE_CACHE_SIZE);
1985 if (ret) {
1986 printk("failed to verify ino %lu page %lu\n",
1987 page->mapping->host->i_ino,
1988 page->index);
1989 memset(page_address(page), 0, PAGE_CACHE_SIZE);
1990 }
1991 SetPageChecked(page);
1992 kunmap(page);
1993 }
1994 /*
1995 * Faults on the destination of a read are common, so do it before
1996 * taking the kmap.
1997 */
1998 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1999 kaddr = kmap_atomic(page, KM_USER0);
2000 left = __copy_to_user_inatomic(desc->arg.buf,
2001 kaddr + offset, size);
2002 kunmap_atomic(kaddr, KM_USER0);
2003 if (left == 0)
2004 goto success;
2005 }
2006
2007 /* Do it the slow way */
2008 kaddr = kmap(page);
2009 left = __copy_to_user(desc->arg.buf, kaddr + offset, size);
2010 kunmap(page);
2011
2012 if (left) {
2013 size -= left;
2014 desc->error = -EFAULT;
2015 }
2016 success:
2017 desc->count = count - size;
2018 desc->written += size;
2019 desc->arg.buf += size;
2020 return size;
2021 }
2022
2023 /**
2024 * btrfs_file_aio_read - filesystem read routine
2025 * @iocb: kernel I/O control block
2026 * @iov: io vector request
2027 * @nr_segs: number of segments in the iovec
2028 * @pos: current file position
2029 */
2030 static ssize_t btrfs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
2031 unsigned long nr_segs, loff_t pos)
2032 {
2033 struct file *filp = iocb->ki_filp;
2034 ssize_t retval;
2035 unsigned long seg;
2036 size_t count;
2037 loff_t *ppos = &iocb->ki_pos;
2038
2039 count = 0;
2040 for (seg = 0; seg < nr_segs; seg++) {
2041 const struct iovec *iv = &iov[seg];
2042
2043 /*
2044 * If any segment has a negative length, or the cumulative
2045 * length ever wraps negative then return -EINVAL.
2046 */
2047 count += iv->iov_len;
2048 if (unlikely((ssize_t)(count|iv->iov_len) < 0))
2049 return -EINVAL;
2050 if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len))
2051 continue;
2052 if (seg == 0)
2053 return -EFAULT;
2054 nr_segs = seg;
2055 count -= iv->iov_len; /* This segment is no good */
2056 break;
2057 }
2058 retval = 0;
2059 if (count) {
2060 for (seg = 0; seg < nr_segs; seg++) {
2061 read_descriptor_t desc;
2062
2063 desc.written = 0;
2064 desc.arg.buf = iov[seg].iov_base;
2065 desc.count = iov[seg].iov_len;
2066 if (desc.count == 0)
2067 continue;
2068 desc.error = 0;
2069 do_generic_file_read(filp, ppos, &desc,
2070 btrfs_read_actor);
2071 retval += desc.written;
2072 if (desc.error) {
2073 retval = retval ?: desc.error;
2074 break;
2075 }
2076 }
2077 }
2078 return retval;
2079 }
2080
2081 static int create_subvol(struct btrfs_root *root, char *name, int namelen)
2082 {
2083 struct btrfs_trans_handle *trans;
2084 struct btrfs_key key;
2085 struct btrfs_root_item root_item;
2086 struct btrfs_inode_item *inode_item;
2087 struct buffer_head *subvol;
2088 struct btrfs_leaf *leaf;
2089 struct btrfs_root *new_root;
2090 struct inode *inode;
2091 struct inode *dir;
2092 int ret;
2093 u64 objectid;
2094 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
2095
2096 mutex_lock(&root->fs_info->fs_mutex);
2097 trans = btrfs_start_transaction(root, 1);
2098 BUG_ON(!trans);
2099
2100 subvol = btrfs_alloc_free_block(trans, root, 0);
2101 if (subvol == NULL)
2102 return -ENOSPC;
2103 leaf = btrfs_buffer_leaf(subvol);
2104 btrfs_set_header_nritems(&leaf->header, 0);
2105 btrfs_set_header_level(&leaf->header, 0);
2106 btrfs_set_header_blocknr(&leaf->header, bh_blocknr(subvol));
2107 btrfs_set_header_generation(&leaf->header, trans->transid);
2108 btrfs_set_header_owner(&leaf->header, root->root_key.objectid);
2109 memcpy(leaf->header.fsid, root->fs_info->disk_super->fsid,
2110 sizeof(leaf->header.fsid));
2111 mark_buffer_dirty(subvol);
2112
2113 inode_item = &root_item.inode;
2114 memset(inode_item, 0, sizeof(*inode_item));
2115 btrfs_set_inode_generation(inode_item, 1);
2116 btrfs_set_inode_size(inode_item, 3);
2117 btrfs_set_inode_nlink(inode_item, 1);
2118 btrfs_set_inode_nblocks(inode_item, 1);
2119 btrfs_set_inode_mode(inode_item, S_IFDIR | 0755);
2120
2121 btrfs_set_root_blocknr(&root_item, bh_blocknr(subvol));
2122 btrfs_set_root_refs(&root_item, 1);
2123 brelse(subvol);
2124 subvol = NULL;
2125
2126 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2127 0, &objectid);
2128 BUG_ON(ret);
2129
2130 btrfs_set_root_dirid(&root_item, new_dirid);
2131
2132 key.objectid = objectid;
2133 key.offset = 1;
2134 key.flags = 0;
2135 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2136 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2137 &root_item);
2138 BUG_ON(ret);
2139
2140 /*
2141 * insert the directory item
2142 */
2143 key.offset = (u64)-1;
2144 dir = root->fs_info->sb->s_root->d_inode;
2145 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2146 name, namelen, dir->i_ino, &key, 0);
2147 BUG_ON(ret);
2148
2149 ret = btrfs_commit_transaction(trans, root);
2150 BUG_ON(ret);
2151
2152 new_root = btrfs_read_fs_root(root->fs_info, &key);
2153 BUG_ON(!new_root);
2154
2155 trans = btrfs_start_transaction(new_root, 1);
2156 BUG_ON(!trans);
2157
2158 inode = btrfs_new_inode(trans, new_root, new_dirid,
2159 BTRFS_I(dir)->block_group, S_IFDIR | 0700);
2160 inode->i_op = &btrfs_dir_inode_operations;
2161 inode->i_fop = &btrfs_dir_file_operations;
2162
2163 ret = btrfs_make_empty_dir(trans, new_root, new_dirid, new_dirid);
2164 BUG_ON(ret);
2165
2166 inode->i_nlink = 1;
2167 inode->i_size = 6;
2168 ret = btrfs_update_inode(trans, new_root, inode);
2169 BUG_ON(ret);
2170
2171 ret = btrfs_commit_transaction(trans, new_root);
2172 BUG_ON(ret);
2173
2174 iput(inode);
2175
2176 mutex_unlock(&root->fs_info->fs_mutex);
2177 btrfs_btree_balance_dirty(root);
2178 return 0;
2179 }
2180
2181 static int create_snapshot(struct btrfs_root *root, char *name, int namelen)
2182 {
2183 struct btrfs_trans_handle *trans;
2184 struct btrfs_key key;
2185 struct btrfs_root_item new_root_item;
2186 int ret;
2187 u64 objectid;
2188
2189 if (!root->ref_cows)
2190 return -EINVAL;
2191
2192 mutex_lock(&root->fs_info->fs_mutex);
2193 trans = btrfs_start_transaction(root, 1);
2194 BUG_ON(!trans);
2195
2196 ret = btrfs_update_inode(trans, root, root->inode);
2197 BUG_ON(ret);
2198
2199 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2200 0, &objectid);
2201 BUG_ON(ret);
2202
2203 memcpy(&new_root_item, &root->root_item,
2204 sizeof(new_root_item));
2205
2206 key.objectid = objectid;
2207 key.offset = 1;
2208 key.flags = 0;
2209 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2210 btrfs_set_root_blocknr(&new_root_item, bh_blocknr(root->node));
2211
2212 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2213 &new_root_item);
2214 BUG_ON(ret);
2215
2216 /*
2217 * insert the directory item
2218 */
2219 key.offset = (u64)-1;
2220 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2221 name, namelen,
2222 root->fs_info->sb->s_root->d_inode->i_ino,
2223 &key, 0);
2224
2225 BUG_ON(ret);
2226
2227 ret = btrfs_inc_root_ref(trans, root);
2228 BUG_ON(ret);
2229
2230 ret = btrfs_commit_transaction(trans, root);
2231 BUG_ON(ret);
2232 mutex_unlock(&root->fs_info->fs_mutex);
2233 btrfs_btree_balance_dirty(root);
2234 return 0;
2235 }
2236
2237 static int add_disk(struct btrfs_root *root, char *name, int namelen)
2238 {
2239 struct block_device *bdev;
2240 struct btrfs_path *path;
2241 struct super_block *sb = root->fs_info->sb;
2242 struct btrfs_root *dev_root = root->fs_info->dev_root;
2243 struct btrfs_trans_handle *trans;
2244 struct btrfs_device_item *dev_item;
2245 struct btrfs_key key;
2246 u16 item_size;
2247 u64 num_blocks;
2248 u64 new_blocks;
2249 u64 device_id;
2250 int ret;
2251
2252 printk("adding disk %s\n", name);
2253 path = btrfs_alloc_path();
2254 if (!path)
2255 return -ENOMEM;
2256 num_blocks = btrfs_super_total_blocks(root->fs_info->disk_super);
2257 bdev = open_bdev_excl(name, O_RDWR, sb);
2258 if (IS_ERR(bdev)) {
2259 ret = PTR_ERR(bdev);
2260 printk("open bdev excl failed ret %d\n", ret);
2261 goto out_nolock;
2262 }
2263 set_blocksize(bdev, sb->s_blocksize);
2264 new_blocks = bdev->bd_inode->i_size >> sb->s_blocksize_bits;
2265 key.objectid = num_blocks;
2266 key.offset = new_blocks;
2267 key.flags = 0;
2268 btrfs_set_key_type(&key, BTRFS_DEV_ITEM_KEY);
2269
2270 mutex_lock(&dev_root->fs_info->fs_mutex);
2271 trans = btrfs_start_transaction(dev_root, 1);
2272 item_size = sizeof(*dev_item) + namelen;
2273 printk("insert empty on %Lu %Lu %u size %d\n", num_blocks, new_blocks, key.flags, item_size);
2274 ret = btrfs_insert_empty_item(trans, dev_root, path, &key, item_size);
2275 if (ret) {
2276 printk("insert failed %d\n", ret);
2277 close_bdev_excl(bdev);
2278 if (ret > 0)
2279 ret = -EEXIST;
2280 goto out;
2281 }
2282 dev_item = btrfs_item_ptr(btrfs_buffer_leaf(path->nodes[0]),
2283 path->slots[0], struct btrfs_device_item);
2284 btrfs_set_device_pathlen(dev_item, namelen);
2285 memcpy(dev_item + 1, name, namelen);
2286
2287 device_id = btrfs_super_last_device_id(root->fs_info->disk_super) + 1;
2288 btrfs_set_super_last_device_id(root->fs_info->disk_super, device_id);
2289 btrfs_set_device_id(dev_item, device_id);
2290 mark_buffer_dirty(path->nodes[0]);
2291
2292 ret = btrfs_insert_dev_radix(root, bdev, device_id, num_blocks,
2293 new_blocks);
2294
2295 if (!ret) {
2296 btrfs_set_super_total_blocks(root->fs_info->disk_super,
2297 num_blocks + new_blocks);
2298 i_size_write(root->fs_info->btree_inode,
2299 (num_blocks + new_blocks) <<
2300 root->fs_info->btree_inode->i_blkbits);
2301 }
2302
2303 out:
2304 ret = btrfs_commit_transaction(trans, dev_root);
2305 BUG_ON(ret);
2306 mutex_unlock(&root->fs_info->fs_mutex);
2307 out_nolock:
2308 btrfs_free_path(path);
2309 btrfs_btree_balance_dirty(root);
2310
2311 return ret;
2312 }
2313
2314 static int btrfs_ioctl(struct inode *inode, struct file *filp, unsigned int
2315 cmd, unsigned long arg)
2316 {
2317 struct btrfs_root *root = BTRFS_I(inode)->root;
2318 struct btrfs_ioctl_vol_args vol_args;
2319 int ret = 0;
2320 struct btrfs_dir_item *di;
2321 int namelen;
2322 struct btrfs_path *path;
2323 u64 root_dirid;
2324
2325 switch (cmd) {
2326 case BTRFS_IOC_SNAP_CREATE:
2327 if (copy_from_user(&vol_args,
2328 (struct btrfs_ioctl_vol_args __user *)arg,
2329 sizeof(vol_args)))
2330 return -EFAULT;
2331 namelen = strlen(vol_args.name);
2332 if (namelen > BTRFS_VOL_NAME_MAX)
2333 return -EINVAL;
2334 path = btrfs_alloc_path();
2335 if (!path)
2336 return -ENOMEM;
2337 root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
2338 mutex_lock(&root->fs_info->fs_mutex);
2339 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
2340 path, root_dirid,
2341 vol_args.name, namelen, 0);
2342 mutex_unlock(&root->fs_info->fs_mutex);
2343 btrfs_free_path(path);
2344 if (di && !IS_ERR(di))
2345 return -EEXIST;
2346
2347 if (root == root->fs_info->tree_root)
2348 ret = create_subvol(root, vol_args.name, namelen);
2349 else
2350 ret = create_snapshot(root, vol_args.name, namelen);
2351 WARN_ON(ret);
2352 break;
2353 case BTRFS_IOC_ADD_DISK:
2354 if (copy_from_user(&vol_args,
2355 (struct btrfs_ioctl_vol_args __user *)arg,
2356 sizeof(vol_args)))
2357 return -EFAULT;
2358 namelen = strlen(vol_args.name);
2359 if (namelen > BTRFS_VOL_NAME_MAX)
2360 return -EINVAL;
2361 vol_args.name[namelen] = '\0';
2362 ret = add_disk(root, vol_args.name, namelen);
2363 break;
2364 default:
2365 return -ENOTTY;
2366 }
2367 return ret;
2368 }
2369
2370 static struct kmem_cache *btrfs_inode_cachep;
2371 struct kmem_cache *btrfs_trans_handle_cachep;
2372 struct kmem_cache *btrfs_transaction_cachep;
2373 struct kmem_cache *btrfs_bit_radix_cachep;
2374 struct kmem_cache *btrfs_path_cachep;
2375
2376 /*
2377 * Called inside transaction, so use GFP_NOFS
2378 */
2379 static struct inode *btrfs_alloc_inode(struct super_block *sb)
2380 {
2381 struct btrfs_inode *ei;
2382
2383 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
2384 if (!ei)
2385 return NULL;
2386 return &ei->vfs_inode;
2387 }
2388
2389 static void btrfs_destroy_inode(struct inode *inode)
2390 {
2391 WARN_ON(!list_empty(&inode->i_dentry));
2392 WARN_ON(inode->i_data.nrpages);
2393
2394 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
2395 }
2396
2397 static void init_once(void * foo, struct kmem_cache * cachep,
2398 unsigned long flags)
2399 {
2400 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
2401
2402 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
2403 SLAB_CTOR_CONSTRUCTOR) {
2404 inode_init_once(&ei->vfs_inode);
2405 }
2406 }
2407
2408 static int init_inodecache(void)
2409 {
2410 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
2411 sizeof(struct btrfs_inode),
2412 0, (SLAB_RECLAIM_ACCOUNT|
2413 SLAB_MEM_SPREAD),
2414 init_once, NULL);
2415 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
2416 sizeof(struct btrfs_trans_handle),
2417 0, (SLAB_RECLAIM_ACCOUNT|
2418 SLAB_MEM_SPREAD),
2419 NULL, NULL);
2420 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
2421 sizeof(struct btrfs_transaction),
2422 0, (SLAB_RECLAIM_ACCOUNT|
2423 SLAB_MEM_SPREAD),
2424 NULL, NULL);
2425 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
2426 sizeof(struct btrfs_transaction),
2427 0, (SLAB_RECLAIM_ACCOUNT|
2428 SLAB_MEM_SPREAD),
2429 NULL, NULL);
2430 btrfs_bit_radix_cachep = kmem_cache_create("btrfs_radix",
2431 256,
2432 0, (SLAB_RECLAIM_ACCOUNT|
2433 SLAB_MEM_SPREAD |
2434 SLAB_DESTROY_BY_RCU),
2435 NULL, NULL);
2436 if (btrfs_inode_cachep == NULL || btrfs_trans_handle_cachep == NULL ||
2437 btrfs_transaction_cachep == NULL || btrfs_bit_radix_cachep == NULL)
2438 return -ENOMEM;
2439 return 0;
2440 }
2441
2442 static void destroy_inodecache(void)
2443 {
2444 kmem_cache_destroy(btrfs_inode_cachep);
2445 kmem_cache_destroy(btrfs_trans_handle_cachep);
2446 kmem_cache_destroy(btrfs_transaction_cachep);
2447 kmem_cache_destroy(btrfs_bit_radix_cachep);
2448 kmem_cache_destroy(btrfs_path_cachep);
2449 }
2450
2451 static int btrfs_get_sb(struct file_system_type *fs_type,
2452 int flags, const char *dev_name, void *data, struct vfsmount *mnt)
2453 {
2454 return get_sb_bdev(fs_type, flags, dev_name, data,
2455 btrfs_fill_super, mnt);
2456 }
2457
2458
2459 static int btrfs_getattr(struct vfsmount *mnt,
2460 struct dentry *dentry, struct kstat *stat)
2461 {
2462 struct inode *inode = dentry->d_inode;
2463 generic_fillattr(inode, stat);
2464 stat->blksize = 256 * 1024;
2465 return 0;
2466 }
2467
2468 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
2469 {
2470 struct btrfs_root *root = btrfs_sb(dentry->d_sb);
2471 struct btrfs_super_block *disk_super = root->fs_info->disk_super;
2472
2473 buf->f_namelen = BTRFS_NAME_LEN;
2474 buf->f_blocks = btrfs_super_total_blocks(disk_super);
2475 buf->f_bfree = buf->f_blocks - btrfs_super_blocks_used(disk_super);
2476 buf->f_bavail = buf->f_bfree;
2477 buf->f_bsize = dentry->d_sb->s_blocksize;
2478 buf->f_type = BTRFS_SUPER_MAGIC;
2479 return 0;
2480 }
2481
2482 static struct file_system_type btrfs_fs_type = {
2483 .owner = THIS_MODULE,
2484 .name = "btrfs",
2485 .get_sb = btrfs_get_sb,
2486 .kill_sb = kill_block_super,
2487 .fs_flags = FS_REQUIRES_DEV,
2488 };
2489
2490 static struct super_operations btrfs_super_ops = {
2491 .delete_inode = btrfs_delete_inode,
2492 .put_super = btrfs_put_super,
2493 .read_inode = btrfs_read_locked_inode,
2494 .write_super = btrfs_write_super,
2495 .sync_fs = btrfs_sync_fs,
2496 .write_inode = btrfs_write_inode,
2497 .dirty_inode = btrfs_dirty_inode,
2498 .alloc_inode = btrfs_alloc_inode,
2499 .destroy_inode = btrfs_destroy_inode,
2500 .statfs = btrfs_statfs,
2501 };
2502
2503 static struct inode_operations btrfs_dir_inode_operations = {
2504 .lookup = btrfs_lookup,
2505 .create = btrfs_create,
2506 .unlink = btrfs_unlink,
2507 .mkdir = btrfs_mkdir,
2508 .rmdir = btrfs_rmdir,
2509 };
2510
2511 static struct inode_operations btrfs_dir_ro_inode_operations = {
2512 .lookup = btrfs_lookup,
2513 };
2514
2515 static struct file_operations btrfs_dir_file_operations = {
2516 .llseek = generic_file_llseek,
2517 .read = generic_read_dir,
2518 .readdir = btrfs_readdir,
2519 .ioctl = btrfs_ioctl,
2520 };
2521
2522 static struct address_space_operations btrfs_aops = {
2523 .readpage = btrfs_readpage,
2524 .writepage = btrfs_writepage,
2525 .sync_page = block_sync_page,
2526 .prepare_write = btrfs_prepare_write,
2527 .commit_write = btrfs_commit_write,
2528 };
2529
2530 static struct inode_operations btrfs_file_inode_operations = {
2531 .truncate = btrfs_truncate,
2532 .getattr = btrfs_getattr,
2533 };
2534
2535 static struct file_operations btrfs_file_operations = {
2536 .llseek = generic_file_llseek,
2537 .read = do_sync_read,
2538 .aio_read = btrfs_file_aio_read,
2539 .write = btrfs_file_write,
2540 .mmap = generic_file_mmap,
2541 .open = generic_file_open,
2542 .ioctl = btrfs_ioctl,
2543 .fsync = btrfs_sync_file,
2544 };
2545
2546 static int __init init_btrfs_fs(void)
2547 {
2548 int err;
2549 printk("btrfs loaded!\n");
2550 err = init_inodecache();
2551 if (err)
2552 return err;
2553 kset_set_kset_s(&btrfs_subsys, fs_subsys);
2554 err = subsystem_register(&btrfs_subsys);
2555 if (err)
2556 goto out;
2557 return register_filesystem(&btrfs_fs_type);
2558 out:
2559 destroy_inodecache();
2560 return err;
2561 }
2562
2563 static void __exit exit_btrfs_fs(void)
2564 {
2565 destroy_inodecache();
2566 unregister_filesystem(&btrfs_fs_type);
2567 subsystem_unregister(&btrfs_subsys);
2568 printk("btrfs unloaded\n");
2569 }
2570
2571 module_init(init_btrfs_fs)
2572 module_exit(exit_btrfs_fs)
2573
2574 MODULE_LICENSE("GPL");
This page took 0.131875 seconds and 6 git commands to generate.