Btrfs: Don't wait on tree block writeback before freeing them anymore
[deliverable/linux.git] / fs / btrfs / inode.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/fs.h>
22 #include <linux/pagemap.h>
23 #include <linux/highmem.h>
24 #include <linux/time.h>
25 #include <linux/init.h>
26 #include <linux/string.h>
27 #include <linux/smp_lock.h>
28 #include <linux/backing-dev.h>
29 #include <linux/mpage.h>
30 #include <linux/swap.h>
31 #include <linux/writeback.h>
32 #include <linux/statfs.h>
33 #include <linux/compat.h>
34 #include <linux/bit_spinlock.h>
35 #include <linux/version.h>
36 #include <linux/xattr.h>
37 #include "ctree.h"
38 #include "disk-io.h"
39 #include "transaction.h"
40 #include "btrfs_inode.h"
41 #include "ioctl.h"
42 #include "print-tree.h"
43 #include "volumes.h"
44
45 struct btrfs_iget_args {
46 u64 ino;
47 struct btrfs_root *root;
48 };
49
50 static struct inode_operations btrfs_dir_inode_operations;
51 static struct inode_operations btrfs_symlink_inode_operations;
52 static struct inode_operations btrfs_dir_ro_inode_operations;
53 static struct inode_operations btrfs_special_inode_operations;
54 static struct inode_operations btrfs_file_inode_operations;
55 static struct address_space_operations btrfs_aops;
56 static struct address_space_operations btrfs_symlink_aops;
57 static struct file_operations btrfs_dir_file_operations;
58 static struct extent_io_ops btrfs_extent_io_ops;
59
60 static struct kmem_cache *btrfs_inode_cachep;
61 struct kmem_cache *btrfs_trans_handle_cachep;
62 struct kmem_cache *btrfs_transaction_cachep;
63 struct kmem_cache *btrfs_bit_radix_cachep;
64 struct kmem_cache *btrfs_path_cachep;
65
66 #define S_SHIFT 12
67 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
68 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
69 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
70 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
71 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
72 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
73 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
74 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
75 };
76
77 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
78 int for_del)
79 {
80 u64 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
81 u64 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
82 u64 thresh;
83 int ret = 0;
84
85 if (for_del)
86 thresh = total * 90;
87 else
88 thresh = total * 85;
89
90 do_div(thresh, 100);
91
92 spin_lock(&root->fs_info->delalloc_lock);
93 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
94 ret = -ENOSPC;
95 spin_unlock(&root->fs_info->delalloc_lock);
96 return ret;
97 }
98
99 static int cow_file_range(struct inode *inode, u64 start, u64 end)
100 {
101 struct btrfs_root *root = BTRFS_I(inode)->root;
102 struct btrfs_trans_handle *trans;
103 u64 alloc_hint = 0;
104 u64 num_bytes;
105 u64 cur_alloc_size;
106 u64 blocksize = root->sectorsize;
107 u64 orig_start = start;
108 u64 orig_num_bytes;
109 struct btrfs_key ins;
110 int ret;
111
112 trans = btrfs_start_transaction(root, 1);
113 BUG_ON(!trans);
114 btrfs_set_trans_block_group(trans, inode);
115
116 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
117 num_bytes = max(blocksize, num_bytes);
118 ret = btrfs_drop_extents(trans, root, inode,
119 start, start + num_bytes, start, &alloc_hint);
120 orig_num_bytes = num_bytes;
121
122 if (alloc_hint == EXTENT_MAP_INLINE)
123 goto out;
124
125 while(num_bytes > 0) {
126 cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
127 ret = btrfs_alloc_extent(trans, root, cur_alloc_size,
128 root->sectorsize,
129 root->root_key.objectid,
130 trans->transid,
131 inode->i_ino, start, 0,
132 alloc_hint, (u64)-1, &ins, 1);
133 if (ret) {
134 WARN_ON(1);
135 goto out;
136 }
137 cur_alloc_size = ins.offset;
138 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
139 start, ins.objectid, ins.offset,
140 ins.offset);
141 inode->i_blocks += ins.offset >> 9;
142 btrfs_check_file(root, inode);
143 num_bytes -= cur_alloc_size;
144 alloc_hint = ins.objectid + ins.offset;
145 start += cur_alloc_size;
146 }
147 btrfs_drop_extent_cache(inode, orig_start,
148 orig_start + orig_num_bytes - 1);
149 btrfs_add_ordered_inode(inode);
150 btrfs_update_inode(trans, root, inode);
151 out:
152 btrfs_end_transaction(trans, root);
153 return ret;
154 }
155
156 static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end)
157 {
158 u64 extent_start;
159 u64 extent_end;
160 u64 bytenr;
161 u64 cow_end;
162 u64 loops = 0;
163 u64 total_fs_bytes;
164 struct btrfs_root *root = BTRFS_I(inode)->root;
165 struct extent_buffer *leaf;
166 int found_type;
167 struct btrfs_path *path;
168 struct btrfs_file_extent_item *item;
169 int ret;
170 int err;
171 struct btrfs_key found_key;
172
173 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
174 path = btrfs_alloc_path();
175 BUG_ON(!path);
176 again:
177 ret = btrfs_lookup_file_extent(NULL, root, path,
178 inode->i_ino, start, 0);
179 if (ret < 0) {
180 btrfs_free_path(path);
181 return ret;
182 }
183
184 cow_end = end;
185 if (ret != 0) {
186 if (path->slots[0] == 0)
187 goto not_found;
188 path->slots[0]--;
189 }
190
191 leaf = path->nodes[0];
192 item = btrfs_item_ptr(leaf, path->slots[0],
193 struct btrfs_file_extent_item);
194
195 /* are we inside the extent that was found? */
196 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
197 found_type = btrfs_key_type(&found_key);
198 if (found_key.objectid != inode->i_ino ||
199 found_type != BTRFS_EXTENT_DATA_KEY) {
200 goto not_found;
201 }
202
203 found_type = btrfs_file_extent_type(leaf, item);
204 extent_start = found_key.offset;
205 if (found_type == BTRFS_FILE_EXTENT_REG) {
206 u64 extent_num_bytes;
207
208 extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item);
209 extent_end = extent_start + extent_num_bytes;
210 err = 0;
211
212 if (loops && start != extent_start)
213 goto not_found;
214
215 if (start < extent_start || start >= extent_end)
216 goto not_found;
217
218 cow_end = min(end, extent_end - 1);
219 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
220 if (bytenr == 0)
221 goto not_found;
222
223 /*
224 * we may be called by the resizer, make sure we're inside
225 * the limits of the FS
226 */
227 if (bytenr + extent_num_bytes > total_fs_bytes)
228 goto not_found;
229
230 if (btrfs_count_snapshots_in_path(root, path, bytenr) != 1) {
231 goto not_found;
232 }
233
234 start = extent_end;
235 } else {
236 goto not_found;
237 }
238 loop:
239 if (start > end) {
240 btrfs_free_path(path);
241 return 0;
242 }
243 btrfs_release_path(root, path);
244 loops++;
245 goto again;
246
247 not_found:
248 cow_file_range(inode, start, cow_end);
249 start = cow_end + 1;
250 goto loop;
251 }
252
253 static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
254 {
255 struct btrfs_root *root = BTRFS_I(inode)->root;
256 int ret;
257 mutex_lock(&root->fs_info->fs_mutex);
258 if (btrfs_test_opt(root, NODATACOW) ||
259 btrfs_test_flag(inode, NODATACOW))
260 ret = run_delalloc_nocow(inode, start, end);
261 else
262 ret = cow_file_range(inode, start, end);
263
264 mutex_unlock(&root->fs_info->fs_mutex);
265 return ret;
266 }
267
268 int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
269 unsigned long old, unsigned long bits)
270 {
271 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
272 struct btrfs_root *root = BTRFS_I(inode)->root;
273 spin_lock(&root->fs_info->delalloc_lock);
274 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
275 root->fs_info->delalloc_bytes += end - start + 1;
276 spin_unlock(&root->fs_info->delalloc_lock);
277 }
278 return 0;
279 }
280
281 int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
282 unsigned long old, unsigned long bits)
283 {
284 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
285 struct btrfs_root *root = BTRFS_I(inode)->root;
286 spin_lock(&root->fs_info->delalloc_lock);
287 if (end - start + 1 > root->fs_info->delalloc_bytes) {
288 printk("warning: delalloc account %Lu %Lu\n",
289 end - start + 1, root->fs_info->delalloc_bytes);
290 root->fs_info->delalloc_bytes = 0;
291 BTRFS_I(inode)->delalloc_bytes = 0;
292 } else {
293 root->fs_info->delalloc_bytes -= end - start + 1;
294 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
295 }
296 spin_unlock(&root->fs_info->delalloc_lock);
297 }
298 return 0;
299 }
300
301 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
302 size_t size, struct bio *bio)
303 {
304 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
305 struct btrfs_mapping_tree *map_tree;
306 u64 logical = bio->bi_sector << 9;
307 u64 length = 0;
308 u64 map_length;
309 struct bio_vec *bvec;
310 int i;
311 int ret;
312
313 bio_for_each_segment(bvec, bio, i) {
314 length += bvec->bv_len;
315 }
316 map_tree = &root->fs_info->mapping_tree;
317 map_length = length;
318 ret = btrfs_map_block(map_tree, READ, logical,
319 &map_length, NULL, 0);
320
321 if (map_length < length + size) {
322 return 1;
323 }
324 return 0;
325 }
326
327 int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
328 int mirror_num)
329 {
330 struct btrfs_root *root = BTRFS_I(inode)->root;
331 struct btrfs_trans_handle *trans;
332 int ret = 0;
333 char *sums = NULL;
334
335 ret = btrfs_csum_one_bio(root, bio, &sums);
336 BUG_ON(ret);
337
338 mutex_lock(&root->fs_info->fs_mutex);
339 trans = btrfs_start_transaction(root, 1);
340
341 btrfs_set_trans_block_group(trans, inode);
342 btrfs_csum_file_blocks(trans, root, inode, bio, sums);
343
344 ret = btrfs_end_transaction(trans, root);
345 BUG_ON(ret);
346 mutex_unlock(&root->fs_info->fs_mutex);
347
348 kfree(sums);
349
350 return btrfs_map_bio(root, rw, bio, mirror_num);
351 }
352
353 int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
354 int mirror_num)
355 {
356 struct btrfs_root *root = BTRFS_I(inode)->root;
357 int ret = 0;
358
359 if (!(rw & (1 << BIO_RW))) {
360 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
361 BUG_ON(ret);
362 goto mapit;
363 }
364
365 if (btrfs_test_opt(root, NODATASUM) ||
366 btrfs_test_flag(inode, NODATASUM)) {
367 goto mapit;
368 }
369
370 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
371 inode, rw, bio, mirror_num,
372 __btrfs_submit_bio_hook);
373 mapit:
374 return btrfs_map_bio(root, rw, bio, mirror_num);
375 }
376
377 int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
378 {
379 int ret = 0;
380 struct inode *inode = page->mapping->host;
381 struct btrfs_root *root = BTRFS_I(inode)->root;
382 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
383 struct btrfs_csum_item *item;
384 struct btrfs_path *path = NULL;
385 u32 csum;
386
387 if (btrfs_test_opt(root, NODATASUM) ||
388 btrfs_test_flag(inode, NODATASUM))
389 return 0;
390
391 mutex_lock(&root->fs_info->fs_mutex);
392 path = btrfs_alloc_path();
393 item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0);
394 if (IS_ERR(item)) {
395 ret = PTR_ERR(item);
396 /* a csum that isn't present is a preallocated region. */
397 if (ret == -ENOENT || ret == -EFBIG)
398 ret = 0;
399 csum = 0;
400 printk("no csum found for inode %lu start %Lu\n", inode->i_ino, start);
401 goto out;
402 }
403 read_extent_buffer(path->nodes[0], &csum, (unsigned long)item,
404 BTRFS_CRC32_SIZE);
405 set_state_private(io_tree, start, csum);
406 out:
407 if (path)
408 btrfs_free_path(path);
409 mutex_unlock(&root->fs_info->fs_mutex);
410 return ret;
411 }
412
413 struct io_failure_record {
414 struct page *page;
415 u64 start;
416 u64 len;
417 u64 logical;
418 int last_mirror;
419 };
420
421 int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
422 struct page *page, u64 start, u64 end,
423 struct extent_state *state)
424 {
425 struct io_failure_record *failrec = NULL;
426 u64 private;
427 struct extent_map *em;
428 struct inode *inode = page->mapping->host;
429 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
430 struct bio *bio;
431 int num_copies;
432 int ret;
433 u64 logical;
434
435 ret = get_state_private(failure_tree, start, &private);
436 if (ret) {
437 size_t pg_offset = start - page_offset(page);
438 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
439 if (!failrec)
440 return -ENOMEM;
441 failrec->start = start;
442 failrec->len = end - start + 1;
443 failrec->last_mirror = 0;
444
445 em = btrfs_get_extent(inode, NULL, pg_offset, start,
446 failrec->len, 0);
447
448 if (!em || IS_ERR(em)) {
449 kfree(failrec);
450 return -EIO;
451 }
452 logical = start - em->start;
453 logical = em->block_start + logical;
454 failrec->logical = logical;
455 free_extent_map(em);
456 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
457 EXTENT_DIRTY, GFP_NOFS);
458 set_state_private(failure_tree, start,
459 (u64)(unsigned long)failrec);
460 } else {
461 failrec = (struct io_failure_record *)(unsigned long)private;
462 }
463 num_copies = btrfs_num_copies(
464 &BTRFS_I(inode)->root->fs_info->mapping_tree,
465 failrec->logical, failrec->len);
466 failrec->last_mirror++;
467 if (!state) {
468 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
469 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
470 failrec->start,
471 EXTENT_LOCKED);
472 if (state && state->start != failrec->start)
473 state = NULL;
474 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
475 }
476 if (!state || failrec->last_mirror > num_copies) {
477 set_state_private(failure_tree, failrec->start, 0);
478 clear_extent_bits(failure_tree, failrec->start,
479 failrec->start + failrec->len - 1,
480 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
481 kfree(failrec);
482 return -EIO;
483 }
484 bio = bio_alloc(GFP_NOFS, 1);
485 bio->bi_private = state;
486 bio->bi_end_io = failed_bio->bi_end_io;
487 bio->bi_sector = failrec->logical >> 9;
488 bio->bi_bdev = failed_bio->bi_bdev;
489 bio_add_page(bio, page, failrec->len, start - page_offset(page));
490 btrfs_submit_bio_hook(inode, READ, bio, failrec->last_mirror);
491 return 0;
492 }
493
494 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
495 struct extent_state *state)
496 {
497 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
498 struct inode *inode = page->mapping->host;
499 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
500 char *kaddr;
501 u64 private = ~(u32)0;
502 int ret;
503 struct btrfs_root *root = BTRFS_I(inode)->root;
504 u32 csum = ~(u32)0;
505 unsigned long flags;
506
507 if (btrfs_test_opt(root, NODATASUM) ||
508 btrfs_test_flag(inode, NODATASUM))
509 return 0;
510 if (state && state->start == start) {
511 private = state->private;
512 ret = 0;
513 } else {
514 ret = get_state_private(io_tree, start, &private);
515 }
516 local_irq_save(flags);
517 kaddr = kmap_atomic(page, KM_IRQ0);
518 if (ret) {
519 goto zeroit;
520 }
521 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
522 btrfs_csum_final(csum, (char *)&csum);
523 if (csum != private) {
524 goto zeroit;
525 }
526 kunmap_atomic(kaddr, KM_IRQ0);
527 local_irq_restore(flags);
528
529 /* if the io failure tree for this inode is non-empty,
530 * check to see if we've recovered from a failed IO
531 */
532 private = 0;
533 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
534 (u64)-1, 1, EXTENT_DIRTY)) {
535 u64 private_failure;
536 struct io_failure_record *failure;
537 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
538 start, &private_failure);
539 if (ret == 0) {
540 failure = (struct io_failure_record *)(unsigned long)
541 private_failure;
542 set_state_private(&BTRFS_I(inode)->io_failure_tree,
543 failure->start, 0);
544 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
545 failure->start,
546 failure->start + failure->len - 1,
547 EXTENT_DIRTY | EXTENT_LOCKED,
548 GFP_NOFS);
549 kfree(failure);
550 }
551 }
552 return 0;
553
554 zeroit:
555 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
556 page->mapping->host->i_ino, (unsigned long long)start, csum,
557 private);
558 memset(kaddr + offset, 1, end - start + 1);
559 flush_dcache_page(page);
560 kunmap_atomic(kaddr, KM_IRQ0);
561 local_irq_restore(flags);
562 return -EIO;
563 }
564
565 void btrfs_read_locked_inode(struct inode *inode)
566 {
567 struct btrfs_path *path;
568 struct extent_buffer *leaf;
569 struct btrfs_inode_item *inode_item;
570 struct btrfs_timespec *tspec;
571 struct btrfs_root *root = BTRFS_I(inode)->root;
572 struct btrfs_key location;
573 u64 alloc_group_block;
574 u32 rdev;
575 int ret;
576
577 path = btrfs_alloc_path();
578 BUG_ON(!path);
579 mutex_lock(&root->fs_info->fs_mutex);
580 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
581
582 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
583 if (ret)
584 goto make_bad;
585
586 leaf = path->nodes[0];
587 inode_item = btrfs_item_ptr(leaf, path->slots[0],
588 struct btrfs_inode_item);
589
590 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
591 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
592 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
593 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
594 inode->i_size = btrfs_inode_size(leaf, inode_item);
595
596 tspec = btrfs_inode_atime(inode_item);
597 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
598 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
599
600 tspec = btrfs_inode_mtime(inode_item);
601 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
602 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
603
604 tspec = btrfs_inode_ctime(inode_item);
605 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
606 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
607
608 inode->i_blocks = btrfs_inode_nblocks(leaf, inode_item);
609 inode->i_generation = btrfs_inode_generation(leaf, inode_item);
610 inode->i_rdev = 0;
611 rdev = btrfs_inode_rdev(leaf, inode_item);
612
613 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
614 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
615 alloc_group_block);
616 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
617 if (!BTRFS_I(inode)->block_group) {
618 BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
619 NULL, 0,
620 BTRFS_BLOCK_GROUP_METADATA, 0);
621 }
622 btrfs_free_path(path);
623 inode_item = NULL;
624
625 mutex_unlock(&root->fs_info->fs_mutex);
626
627 switch (inode->i_mode & S_IFMT) {
628 case S_IFREG:
629 inode->i_mapping->a_ops = &btrfs_aops;
630 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
631 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
632 inode->i_fop = &btrfs_file_operations;
633 inode->i_op = &btrfs_file_inode_operations;
634 break;
635 case S_IFDIR:
636 inode->i_fop = &btrfs_dir_file_operations;
637 if (root == root->fs_info->tree_root)
638 inode->i_op = &btrfs_dir_ro_inode_operations;
639 else
640 inode->i_op = &btrfs_dir_inode_operations;
641 break;
642 case S_IFLNK:
643 inode->i_op = &btrfs_symlink_inode_operations;
644 inode->i_mapping->a_ops = &btrfs_symlink_aops;
645 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
646 break;
647 default:
648 init_special_inode(inode, inode->i_mode, rdev);
649 break;
650 }
651 return;
652
653 make_bad:
654 btrfs_release_path(root, path);
655 btrfs_free_path(path);
656 mutex_unlock(&root->fs_info->fs_mutex);
657 make_bad_inode(inode);
658 }
659
660 static void fill_inode_item(struct extent_buffer *leaf,
661 struct btrfs_inode_item *item,
662 struct inode *inode)
663 {
664 btrfs_set_inode_uid(leaf, item, inode->i_uid);
665 btrfs_set_inode_gid(leaf, item, inode->i_gid);
666 btrfs_set_inode_size(leaf, item, inode->i_size);
667 btrfs_set_inode_mode(leaf, item, inode->i_mode);
668 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
669
670 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
671 inode->i_atime.tv_sec);
672 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
673 inode->i_atime.tv_nsec);
674
675 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
676 inode->i_mtime.tv_sec);
677 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
678 inode->i_mtime.tv_nsec);
679
680 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
681 inode->i_ctime.tv_sec);
682 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
683 inode->i_ctime.tv_nsec);
684
685 btrfs_set_inode_nblocks(leaf, item, inode->i_blocks);
686 btrfs_set_inode_generation(leaf, item, inode->i_generation);
687 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
688 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
689 btrfs_set_inode_block_group(leaf, item,
690 BTRFS_I(inode)->block_group->key.objectid);
691 }
692
693 int btrfs_update_inode(struct btrfs_trans_handle *trans,
694 struct btrfs_root *root,
695 struct inode *inode)
696 {
697 struct btrfs_inode_item *inode_item;
698 struct btrfs_path *path;
699 struct extent_buffer *leaf;
700 int ret;
701
702 path = btrfs_alloc_path();
703 BUG_ON(!path);
704 ret = btrfs_lookup_inode(trans, root, path,
705 &BTRFS_I(inode)->location, 1);
706 if (ret) {
707 if (ret > 0)
708 ret = -ENOENT;
709 goto failed;
710 }
711
712 leaf = path->nodes[0];
713 inode_item = btrfs_item_ptr(leaf, path->slots[0],
714 struct btrfs_inode_item);
715
716 fill_inode_item(leaf, inode_item, inode);
717 btrfs_mark_buffer_dirty(leaf);
718 btrfs_set_inode_last_trans(trans, inode);
719 ret = 0;
720 failed:
721 btrfs_release_path(root, path);
722 btrfs_free_path(path);
723 return ret;
724 }
725
726
727 static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
728 struct btrfs_root *root,
729 struct inode *dir,
730 struct dentry *dentry)
731 {
732 struct btrfs_path *path;
733 const char *name = dentry->d_name.name;
734 int name_len = dentry->d_name.len;
735 int ret = 0;
736 struct extent_buffer *leaf;
737 struct btrfs_dir_item *di;
738 struct btrfs_key key;
739
740 path = btrfs_alloc_path();
741 if (!path) {
742 ret = -ENOMEM;
743 goto err;
744 }
745
746 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
747 name, name_len, -1);
748 if (IS_ERR(di)) {
749 ret = PTR_ERR(di);
750 goto err;
751 }
752 if (!di) {
753 ret = -ENOENT;
754 goto err;
755 }
756 leaf = path->nodes[0];
757 btrfs_dir_item_key_to_cpu(leaf, di, &key);
758 ret = btrfs_delete_one_dir_name(trans, root, path, di);
759 if (ret)
760 goto err;
761 btrfs_release_path(root, path);
762
763 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
764 key.objectid, name, name_len, -1);
765 if (IS_ERR(di)) {
766 ret = PTR_ERR(di);
767 goto err;
768 }
769 if (!di) {
770 ret = -ENOENT;
771 goto err;
772 }
773 ret = btrfs_delete_one_dir_name(trans, root, path, di);
774
775 dentry->d_inode->i_ctime = dir->i_ctime;
776 ret = btrfs_del_inode_ref(trans, root, name, name_len,
777 dentry->d_inode->i_ino,
778 dentry->d_parent->d_inode->i_ino);
779 if (ret) {
780 printk("failed to delete reference to %.*s, "
781 "inode %lu parent %lu\n", name_len, name,
782 dentry->d_inode->i_ino,
783 dentry->d_parent->d_inode->i_ino);
784 }
785 err:
786 btrfs_free_path(path);
787 if (!ret) {
788 dir->i_size -= name_len * 2;
789 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
790 btrfs_update_inode(trans, root, dir);
791 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
792 dentry->d_inode->i_nlink--;
793 #else
794 drop_nlink(dentry->d_inode);
795 #endif
796 ret = btrfs_update_inode(trans, root, dentry->d_inode);
797 dir->i_sb->s_dirt = 1;
798 }
799 return ret;
800 }
801
802 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
803 {
804 struct btrfs_root *root;
805 struct btrfs_trans_handle *trans;
806 struct inode *inode = dentry->d_inode;
807 int ret;
808 unsigned long nr = 0;
809
810 root = BTRFS_I(dir)->root;
811 mutex_lock(&root->fs_info->fs_mutex);
812
813 ret = btrfs_check_free_space(root, 1, 1);
814 if (ret)
815 goto fail;
816
817 trans = btrfs_start_transaction(root, 1);
818
819 btrfs_set_trans_block_group(trans, dir);
820 ret = btrfs_unlink_trans(trans, root, dir, dentry);
821 nr = trans->blocks_used;
822
823 if (inode->i_nlink == 0) {
824 int found;
825 /* if the inode isn't linked anywhere,
826 * we don't need to worry about
827 * data=ordered
828 */
829 found = btrfs_del_ordered_inode(inode);
830 if (found == 1) {
831 atomic_dec(&inode->i_count);
832 }
833 }
834
835 btrfs_end_transaction(trans, root);
836 fail:
837 mutex_unlock(&root->fs_info->fs_mutex);
838 btrfs_btree_balance_dirty(root, nr);
839 btrfs_throttle(root);
840 return ret;
841 }
842
843 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
844 {
845 struct inode *inode = dentry->d_inode;
846 int err = 0;
847 int ret;
848 struct btrfs_root *root = BTRFS_I(dir)->root;
849 struct btrfs_trans_handle *trans;
850 unsigned long nr = 0;
851
852 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
853 return -ENOTEMPTY;
854
855 mutex_lock(&root->fs_info->fs_mutex);
856 ret = btrfs_check_free_space(root, 1, 1);
857 if (ret)
858 goto fail;
859
860 trans = btrfs_start_transaction(root, 1);
861 btrfs_set_trans_block_group(trans, dir);
862
863 /* now the directory is empty */
864 err = btrfs_unlink_trans(trans, root, dir, dentry);
865 if (!err) {
866 inode->i_size = 0;
867 }
868
869 nr = trans->blocks_used;
870 ret = btrfs_end_transaction(trans, root);
871 fail:
872 mutex_unlock(&root->fs_info->fs_mutex);
873 btrfs_btree_balance_dirty(root, nr);
874 btrfs_throttle(root);
875
876 if (ret && !err)
877 err = ret;
878 return err;
879 }
880
881 /*
882 * this can truncate away extent items, csum items and directory items.
883 * It starts at a high offset and removes keys until it can't find
884 * any higher than i_size.
885 *
886 * csum items that cross the new i_size are truncated to the new size
887 * as well.
888 */
889 static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
890 struct btrfs_root *root,
891 struct inode *inode,
892 u32 min_type)
893 {
894 int ret;
895 struct btrfs_path *path;
896 struct btrfs_key key;
897 struct btrfs_key found_key;
898 u32 found_type;
899 struct extent_buffer *leaf;
900 struct btrfs_file_extent_item *fi;
901 u64 extent_start = 0;
902 u64 extent_num_bytes = 0;
903 u64 item_end = 0;
904 u64 root_gen = 0;
905 u64 root_owner = 0;
906 int found_extent;
907 int del_item;
908 int pending_del_nr = 0;
909 int pending_del_slot = 0;
910 int extent_type = -1;
911
912 btrfs_drop_extent_cache(inode, inode->i_size, (u64)-1);
913 path = btrfs_alloc_path();
914 path->reada = -1;
915 BUG_ON(!path);
916
917 /* FIXME, add redo link to tree so we don't leak on crash */
918 key.objectid = inode->i_ino;
919 key.offset = (u64)-1;
920 key.type = (u8)-1;
921
922 btrfs_init_path(path);
923 search_again:
924 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
925 if (ret < 0) {
926 goto error;
927 }
928 if (ret > 0) {
929 BUG_ON(path->slots[0] == 0);
930 path->slots[0]--;
931 }
932
933 while(1) {
934 fi = NULL;
935 leaf = path->nodes[0];
936 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
937 found_type = btrfs_key_type(&found_key);
938
939 if (found_key.objectid != inode->i_ino)
940 break;
941
942 if (found_type < min_type)
943 break;
944
945 item_end = found_key.offset;
946 if (found_type == BTRFS_EXTENT_DATA_KEY) {
947 fi = btrfs_item_ptr(leaf, path->slots[0],
948 struct btrfs_file_extent_item);
949 extent_type = btrfs_file_extent_type(leaf, fi);
950 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
951 item_end +=
952 btrfs_file_extent_num_bytes(leaf, fi);
953 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
954 struct btrfs_item *item = btrfs_item_nr(leaf,
955 path->slots[0]);
956 item_end += btrfs_file_extent_inline_len(leaf,
957 item);
958 }
959 item_end--;
960 }
961 if (found_type == BTRFS_CSUM_ITEM_KEY) {
962 ret = btrfs_csum_truncate(trans, root, path,
963 inode->i_size);
964 BUG_ON(ret);
965 }
966 if (item_end < inode->i_size) {
967 if (found_type == BTRFS_DIR_ITEM_KEY) {
968 found_type = BTRFS_INODE_ITEM_KEY;
969 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
970 found_type = BTRFS_CSUM_ITEM_KEY;
971 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
972 found_type = BTRFS_XATTR_ITEM_KEY;
973 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
974 found_type = BTRFS_INODE_REF_KEY;
975 } else if (found_type) {
976 found_type--;
977 } else {
978 break;
979 }
980 btrfs_set_key_type(&key, found_type);
981 goto next;
982 }
983 if (found_key.offset >= inode->i_size)
984 del_item = 1;
985 else
986 del_item = 0;
987 found_extent = 0;
988
989 /* FIXME, shrink the extent if the ref count is only 1 */
990 if (found_type != BTRFS_EXTENT_DATA_KEY)
991 goto delete;
992
993 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
994 u64 num_dec;
995 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
996 if (!del_item) {
997 u64 orig_num_bytes =
998 btrfs_file_extent_num_bytes(leaf, fi);
999 extent_num_bytes = inode->i_size -
1000 found_key.offset + root->sectorsize - 1;
1001 extent_num_bytes = extent_num_bytes &
1002 ~((u64)root->sectorsize - 1);
1003 btrfs_set_file_extent_num_bytes(leaf, fi,
1004 extent_num_bytes);
1005 num_dec = (orig_num_bytes -
1006 extent_num_bytes);
1007 if (extent_start != 0)
1008 dec_i_blocks(inode, num_dec);
1009 btrfs_mark_buffer_dirty(leaf);
1010 } else {
1011 extent_num_bytes =
1012 btrfs_file_extent_disk_num_bytes(leaf,
1013 fi);
1014 /* FIXME blocksize != 4096 */
1015 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
1016 if (extent_start != 0) {
1017 found_extent = 1;
1018 dec_i_blocks(inode, num_dec);
1019 }
1020 root_gen = btrfs_header_generation(leaf);
1021 root_owner = btrfs_header_owner(leaf);
1022 }
1023 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1024 if (!del_item) {
1025 u32 newsize = inode->i_size - found_key.offset;
1026 dec_i_blocks(inode, item_end + 1 -
1027 found_key.offset - newsize);
1028 newsize =
1029 btrfs_file_extent_calc_inline_size(newsize);
1030 ret = btrfs_truncate_item(trans, root, path,
1031 newsize, 1);
1032 BUG_ON(ret);
1033 } else {
1034 dec_i_blocks(inode, item_end + 1 -
1035 found_key.offset);
1036 }
1037 }
1038 delete:
1039 if (del_item) {
1040 if (!pending_del_nr) {
1041 /* no pending yet, add ourselves */
1042 pending_del_slot = path->slots[0];
1043 pending_del_nr = 1;
1044 } else if (pending_del_nr &&
1045 path->slots[0] + 1 == pending_del_slot) {
1046 /* hop on the pending chunk */
1047 pending_del_nr++;
1048 pending_del_slot = path->slots[0];
1049 } else {
1050 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
1051 }
1052 } else {
1053 break;
1054 }
1055 if (found_extent) {
1056 ret = btrfs_free_extent(trans, root, extent_start,
1057 extent_num_bytes,
1058 root_owner,
1059 root_gen, inode->i_ino,
1060 found_key.offset, 0);
1061 BUG_ON(ret);
1062 }
1063 next:
1064 if (path->slots[0] == 0) {
1065 if (pending_del_nr)
1066 goto del_pending;
1067 btrfs_release_path(root, path);
1068 goto search_again;
1069 }
1070
1071 path->slots[0]--;
1072 if (pending_del_nr &&
1073 path->slots[0] + 1 != pending_del_slot) {
1074 struct btrfs_key debug;
1075 del_pending:
1076 btrfs_item_key_to_cpu(path->nodes[0], &debug,
1077 pending_del_slot);
1078 ret = btrfs_del_items(trans, root, path,
1079 pending_del_slot,
1080 pending_del_nr);
1081 BUG_ON(ret);
1082 pending_del_nr = 0;
1083 btrfs_release_path(root, path);
1084 goto search_again;
1085 }
1086 }
1087 ret = 0;
1088 error:
1089 if (pending_del_nr) {
1090 ret = btrfs_del_items(trans, root, path, pending_del_slot,
1091 pending_del_nr);
1092 }
1093 btrfs_release_path(root, path);
1094 btrfs_free_path(path);
1095 inode->i_sb->s_dirt = 1;
1096 return ret;
1097 }
1098
1099 static int btrfs_cow_one_page(struct inode *inode, struct page *page,
1100 size_t zero_start)
1101 {
1102 char *kaddr;
1103 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1104 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1105 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1106 int ret = 0;
1107
1108 WARN_ON(!PageLocked(page));
1109 set_page_extent_mapped(page);
1110
1111 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
1112 set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
1113 page_end, GFP_NOFS);
1114
1115 if (zero_start != PAGE_CACHE_SIZE) {
1116 kaddr = kmap(page);
1117 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
1118 flush_dcache_page(page);
1119 kunmap(page);
1120 }
1121 set_page_dirty(page);
1122 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
1123
1124 return ret;
1125 }
1126
1127 /*
1128 * taken from block_truncate_page, but does cow as it zeros out
1129 * any bytes left in the last page in the file.
1130 */
1131 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
1132 {
1133 struct inode *inode = mapping->host;
1134 struct btrfs_root *root = BTRFS_I(inode)->root;
1135 u32 blocksize = root->sectorsize;
1136 pgoff_t index = from >> PAGE_CACHE_SHIFT;
1137 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1138 struct page *page;
1139 int ret = 0;
1140 u64 page_start;
1141
1142 if ((offset & (blocksize - 1)) == 0)
1143 goto out;
1144
1145 ret = -ENOMEM;
1146 page = grab_cache_page(mapping, index);
1147 if (!page)
1148 goto out;
1149 if (!PageUptodate(page)) {
1150 ret = btrfs_readpage(NULL, page);
1151 lock_page(page);
1152 if (!PageUptodate(page)) {
1153 ret = -EIO;
1154 goto out;
1155 }
1156 }
1157 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1158
1159 ret = btrfs_cow_one_page(inode, page, offset);
1160
1161 unlock_page(page);
1162 page_cache_release(page);
1163 out:
1164 return ret;
1165 }
1166
1167 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
1168 {
1169 struct inode *inode = dentry->d_inode;
1170 int err;
1171
1172 err = inode_change_ok(inode, attr);
1173 if (err)
1174 return err;
1175
1176 if (S_ISREG(inode->i_mode) &&
1177 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
1178 struct btrfs_trans_handle *trans;
1179 struct btrfs_root *root = BTRFS_I(inode)->root;
1180 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1181
1182 u64 mask = root->sectorsize - 1;
1183 u64 hole_start = (inode->i_size + mask) & ~mask;
1184 u64 block_end = (attr->ia_size + mask) & ~mask;
1185 u64 hole_size;
1186 u64 alloc_hint = 0;
1187
1188 if (attr->ia_size <= hole_start)
1189 goto out;
1190
1191 mutex_lock(&root->fs_info->fs_mutex);
1192 err = btrfs_check_free_space(root, 1, 0);
1193 mutex_unlock(&root->fs_info->fs_mutex);
1194 if (err)
1195 goto fail;
1196
1197 btrfs_truncate_page(inode->i_mapping, inode->i_size);
1198
1199 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1200 hole_size = block_end - hole_start;
1201
1202 mutex_lock(&root->fs_info->fs_mutex);
1203 trans = btrfs_start_transaction(root, 1);
1204 btrfs_set_trans_block_group(trans, inode);
1205 err = btrfs_drop_extents(trans, root, inode,
1206 hole_start, block_end, hole_start,
1207 &alloc_hint);
1208
1209 if (alloc_hint != EXTENT_MAP_INLINE) {
1210 err = btrfs_insert_file_extent(trans, root,
1211 inode->i_ino,
1212 hole_start, 0, 0,
1213 hole_size);
1214 btrfs_drop_extent_cache(inode, hole_start,
1215 hole_size - 1);
1216 btrfs_check_file(root, inode);
1217 }
1218 btrfs_end_transaction(trans, root);
1219 mutex_unlock(&root->fs_info->fs_mutex);
1220 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1221 if (err)
1222 return err;
1223 }
1224 out:
1225 err = inode_setattr(inode, attr);
1226 fail:
1227 return err;
1228 }
1229
1230 void btrfs_put_inode(struct inode *inode)
1231 {
1232 int ret;
1233
1234 if (!BTRFS_I(inode)->ordered_trans) {
1235 return;
1236 }
1237
1238 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) ||
1239 mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1240 return;
1241
1242 ret = btrfs_del_ordered_inode(inode);
1243 if (ret == 1) {
1244 atomic_dec(&inode->i_count);
1245 }
1246 }
1247
1248 void btrfs_delete_inode(struct inode *inode)
1249 {
1250 struct btrfs_trans_handle *trans;
1251 struct btrfs_root *root = BTRFS_I(inode)->root;
1252 unsigned long nr;
1253 int ret;
1254
1255 truncate_inode_pages(&inode->i_data, 0);
1256 if (is_bad_inode(inode)) {
1257 goto no_delete;
1258 }
1259
1260 inode->i_size = 0;
1261 mutex_lock(&root->fs_info->fs_mutex);
1262 trans = btrfs_start_transaction(root, 1);
1263
1264 btrfs_set_trans_block_group(trans, inode);
1265 ret = btrfs_truncate_in_trans(trans, root, inode, 0);
1266 if (ret)
1267 goto no_delete_lock;
1268
1269 nr = trans->blocks_used;
1270 clear_inode(inode);
1271
1272 btrfs_end_transaction(trans, root);
1273 mutex_unlock(&root->fs_info->fs_mutex);
1274 btrfs_btree_balance_dirty(root, nr);
1275 btrfs_throttle(root);
1276 return;
1277
1278 no_delete_lock:
1279 nr = trans->blocks_used;
1280 btrfs_end_transaction(trans, root);
1281 mutex_unlock(&root->fs_info->fs_mutex);
1282 btrfs_btree_balance_dirty(root, nr);
1283 btrfs_throttle(root);
1284 no_delete:
1285 clear_inode(inode);
1286 }
1287
1288 /*
1289 * this returns the key found in the dir entry in the location pointer.
1290 * If no dir entries were found, location->objectid is 0.
1291 */
1292 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
1293 struct btrfs_key *location)
1294 {
1295 const char *name = dentry->d_name.name;
1296 int namelen = dentry->d_name.len;
1297 struct btrfs_dir_item *di;
1298 struct btrfs_path *path;
1299 struct btrfs_root *root = BTRFS_I(dir)->root;
1300 int ret = 0;
1301
1302 if (namelen == 1 && strcmp(name, ".") == 0) {
1303 location->objectid = dir->i_ino;
1304 location->type = BTRFS_INODE_ITEM_KEY;
1305 location->offset = 0;
1306 return 0;
1307 }
1308 path = btrfs_alloc_path();
1309 BUG_ON(!path);
1310
1311 if (namelen == 2 && strcmp(name, "..") == 0) {
1312 struct btrfs_key key;
1313 struct extent_buffer *leaf;
1314 u32 nritems;
1315 int slot;
1316
1317 key.objectid = dir->i_ino;
1318 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1319 key.offset = 0;
1320 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1321 BUG_ON(ret == 0);
1322 ret = 0;
1323
1324 leaf = path->nodes[0];
1325 slot = path->slots[0];
1326 nritems = btrfs_header_nritems(leaf);
1327 if (slot >= nritems)
1328 goto out_err;
1329
1330 btrfs_item_key_to_cpu(leaf, &key, slot);
1331 if (key.objectid != dir->i_ino ||
1332 key.type != BTRFS_INODE_REF_KEY) {
1333 goto out_err;
1334 }
1335 location->objectid = key.offset;
1336 location->type = BTRFS_INODE_ITEM_KEY;
1337 location->offset = 0;
1338 goto out;
1339 }
1340
1341 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
1342 namelen, 0);
1343 if (IS_ERR(di))
1344 ret = PTR_ERR(di);
1345 if (!di || IS_ERR(di)) {
1346 goto out_err;
1347 }
1348 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
1349 out:
1350 btrfs_free_path(path);
1351 return ret;
1352 out_err:
1353 location->objectid = 0;
1354 goto out;
1355 }
1356
1357 /*
1358 * when we hit a tree root in a directory, the btrfs part of the inode
1359 * needs to be changed to reflect the root directory of the tree root. This
1360 * is kind of like crossing a mount point.
1361 */
1362 static int fixup_tree_root_location(struct btrfs_root *root,
1363 struct btrfs_key *location,
1364 struct btrfs_root **sub_root,
1365 struct dentry *dentry)
1366 {
1367 struct btrfs_path *path;
1368 struct btrfs_root_item *ri;
1369
1370 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
1371 return 0;
1372 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1373 return 0;
1374
1375 path = btrfs_alloc_path();
1376 BUG_ON(!path);
1377 mutex_lock(&root->fs_info->fs_mutex);
1378
1379 *sub_root = btrfs_read_fs_root(root->fs_info, location,
1380 dentry->d_name.name,
1381 dentry->d_name.len);
1382 if (IS_ERR(*sub_root))
1383 return PTR_ERR(*sub_root);
1384
1385 ri = &(*sub_root)->root_item;
1386 location->objectid = btrfs_root_dirid(ri);
1387 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1388 location->offset = 0;
1389
1390 btrfs_free_path(path);
1391 mutex_unlock(&root->fs_info->fs_mutex);
1392 return 0;
1393 }
1394
1395 static int btrfs_init_locked_inode(struct inode *inode, void *p)
1396 {
1397 struct btrfs_iget_args *args = p;
1398 inode->i_ino = args->ino;
1399 BTRFS_I(inode)->root = args->root;
1400 BTRFS_I(inode)->delalloc_bytes = 0;
1401 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1402 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1403 inode->i_mapping, GFP_NOFS);
1404 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1405 inode->i_mapping, GFP_NOFS);
1406 return 0;
1407 }
1408
1409 static int btrfs_find_actor(struct inode *inode, void *opaque)
1410 {
1411 struct btrfs_iget_args *args = opaque;
1412 return (args->ino == inode->i_ino &&
1413 args->root == BTRFS_I(inode)->root);
1414 }
1415
1416 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
1417 u64 root_objectid)
1418 {
1419 struct btrfs_iget_args args;
1420 args.ino = objectid;
1421 args.root = btrfs_lookup_fs_root(btrfs_sb(s)->fs_info, root_objectid);
1422
1423 if (!args.root)
1424 return NULL;
1425
1426 return ilookup5(s, objectid, btrfs_find_actor, (void *)&args);
1427 }
1428
1429 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
1430 struct btrfs_root *root)
1431 {
1432 struct inode *inode;
1433 struct btrfs_iget_args args;
1434 args.ino = objectid;
1435 args.root = root;
1436
1437 inode = iget5_locked(s, objectid, btrfs_find_actor,
1438 btrfs_init_locked_inode,
1439 (void *)&args);
1440 return inode;
1441 }
1442
1443 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
1444 struct nameidata *nd)
1445 {
1446 struct inode * inode;
1447 struct btrfs_inode *bi = BTRFS_I(dir);
1448 struct btrfs_root *root = bi->root;
1449 struct btrfs_root *sub_root = root;
1450 struct btrfs_key location;
1451 int ret;
1452
1453 if (dentry->d_name.len > BTRFS_NAME_LEN)
1454 return ERR_PTR(-ENAMETOOLONG);
1455
1456 mutex_lock(&root->fs_info->fs_mutex);
1457 ret = btrfs_inode_by_name(dir, dentry, &location);
1458 mutex_unlock(&root->fs_info->fs_mutex);
1459
1460 if (ret < 0)
1461 return ERR_PTR(ret);
1462
1463 inode = NULL;
1464 if (location.objectid) {
1465 ret = fixup_tree_root_location(root, &location, &sub_root,
1466 dentry);
1467 if (ret < 0)
1468 return ERR_PTR(ret);
1469 if (ret > 0)
1470 return ERR_PTR(-ENOENT);
1471 inode = btrfs_iget_locked(dir->i_sb, location.objectid,
1472 sub_root);
1473 if (!inode)
1474 return ERR_PTR(-EACCES);
1475 if (inode->i_state & I_NEW) {
1476 /* the inode and parent dir are two different roots */
1477 if (sub_root != root) {
1478 igrab(inode);
1479 sub_root->inode = inode;
1480 }
1481 BTRFS_I(inode)->root = sub_root;
1482 memcpy(&BTRFS_I(inode)->location, &location,
1483 sizeof(location));
1484 btrfs_read_locked_inode(inode);
1485 unlock_new_inode(inode);
1486 }
1487 }
1488 return d_splice_alias(inode, dentry);
1489 }
1490
1491 static unsigned char btrfs_filetype_table[] = {
1492 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
1493 };
1494
1495 static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1496 {
1497 struct inode *inode = filp->f_dentry->d_inode;
1498 struct btrfs_root *root = BTRFS_I(inode)->root;
1499 struct btrfs_item *item;
1500 struct btrfs_dir_item *di;
1501 struct btrfs_key key;
1502 struct btrfs_key found_key;
1503 struct btrfs_path *path;
1504 int ret;
1505 u32 nritems;
1506 struct extent_buffer *leaf;
1507 int slot;
1508 int advance;
1509 unsigned char d_type;
1510 int over = 0;
1511 u32 di_cur;
1512 u32 di_total;
1513 u32 di_len;
1514 int key_type = BTRFS_DIR_INDEX_KEY;
1515 char tmp_name[32];
1516 char *name_ptr;
1517 int name_len;
1518
1519 /* FIXME, use a real flag for deciding about the key type */
1520 if (root->fs_info->tree_root == root)
1521 key_type = BTRFS_DIR_ITEM_KEY;
1522
1523 /* special case for "." */
1524 if (filp->f_pos == 0) {
1525 over = filldir(dirent, ".", 1,
1526 1, inode->i_ino,
1527 DT_DIR);
1528 if (over)
1529 return 0;
1530 filp->f_pos = 1;
1531 }
1532
1533 mutex_lock(&root->fs_info->fs_mutex);
1534 key.objectid = inode->i_ino;
1535 path = btrfs_alloc_path();
1536 path->reada = 2;
1537
1538 /* special case for .., just use the back ref */
1539 if (filp->f_pos == 1) {
1540 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1541 key.offset = 0;
1542 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1543 BUG_ON(ret == 0);
1544 leaf = path->nodes[0];
1545 slot = path->slots[0];
1546 nritems = btrfs_header_nritems(leaf);
1547 if (slot >= nritems) {
1548 btrfs_release_path(root, path);
1549 goto read_dir_items;
1550 }
1551 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1552 btrfs_release_path(root, path);
1553 if (found_key.objectid != key.objectid ||
1554 found_key.type != BTRFS_INODE_REF_KEY)
1555 goto read_dir_items;
1556 over = filldir(dirent, "..", 2,
1557 2, found_key.offset, DT_DIR);
1558 if (over)
1559 goto nopos;
1560 filp->f_pos = 2;
1561 }
1562
1563 read_dir_items:
1564 btrfs_set_key_type(&key, key_type);
1565 key.offset = filp->f_pos;
1566
1567 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1568 if (ret < 0)
1569 goto err;
1570 advance = 0;
1571 while(1) {
1572 leaf = path->nodes[0];
1573 nritems = btrfs_header_nritems(leaf);
1574 slot = path->slots[0];
1575 if (advance || slot >= nritems) {
1576 if (slot >= nritems -1) {
1577 ret = btrfs_next_leaf(root, path);
1578 if (ret)
1579 break;
1580 leaf = path->nodes[0];
1581 nritems = btrfs_header_nritems(leaf);
1582 slot = path->slots[0];
1583 } else {
1584 slot++;
1585 path->slots[0]++;
1586 }
1587 }
1588 advance = 1;
1589 item = btrfs_item_nr(leaf, slot);
1590 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1591
1592 if (found_key.objectid != key.objectid)
1593 break;
1594 if (btrfs_key_type(&found_key) != key_type)
1595 break;
1596 if (found_key.offset < filp->f_pos)
1597 continue;
1598
1599 filp->f_pos = found_key.offset;
1600 advance = 1;
1601 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
1602 di_cur = 0;
1603 di_total = btrfs_item_size(leaf, item);
1604 while(di_cur < di_total) {
1605 struct btrfs_key location;
1606
1607 name_len = btrfs_dir_name_len(leaf, di);
1608 if (name_len < 32) {
1609 name_ptr = tmp_name;
1610 } else {
1611 name_ptr = kmalloc(name_len, GFP_NOFS);
1612 BUG_ON(!name_ptr);
1613 }
1614 read_extent_buffer(leaf, name_ptr,
1615 (unsigned long)(di + 1), name_len);
1616
1617 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
1618 btrfs_dir_item_key_to_cpu(leaf, di, &location);
1619 over = filldir(dirent, name_ptr, name_len,
1620 found_key.offset,
1621 location.objectid,
1622 d_type);
1623
1624 if (name_ptr != tmp_name)
1625 kfree(name_ptr);
1626
1627 if (over)
1628 goto nopos;
1629 di_len = btrfs_dir_name_len(leaf, di) +
1630 btrfs_dir_data_len(leaf, di) +sizeof(*di);
1631 di_cur += di_len;
1632 di = (struct btrfs_dir_item *)((char *)di + di_len);
1633 }
1634 }
1635 if (key_type == BTRFS_DIR_INDEX_KEY)
1636 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
1637 else
1638 filp->f_pos++;
1639 nopos:
1640 ret = 0;
1641 err:
1642 btrfs_release_path(root, path);
1643 btrfs_free_path(path);
1644 mutex_unlock(&root->fs_info->fs_mutex);
1645 return ret;
1646 }
1647
1648 int btrfs_write_inode(struct inode *inode, int wait)
1649 {
1650 struct btrfs_root *root = BTRFS_I(inode)->root;
1651 struct btrfs_trans_handle *trans;
1652 int ret = 0;
1653
1654 if (wait) {
1655 mutex_lock(&root->fs_info->fs_mutex);
1656 trans = btrfs_start_transaction(root, 1);
1657 btrfs_set_trans_block_group(trans, inode);
1658 ret = btrfs_commit_transaction(trans, root);
1659 mutex_unlock(&root->fs_info->fs_mutex);
1660 }
1661 return ret;
1662 }
1663
1664 /*
1665 * This is somewhat expensive, updating the tree every time the
1666 * inode changes. But, it is most likely to find the inode in cache.
1667 * FIXME, needs more benchmarking...there are no reasons other than performance
1668 * to keep or drop this code.
1669 */
1670 void btrfs_dirty_inode(struct inode *inode)
1671 {
1672 struct btrfs_root *root = BTRFS_I(inode)->root;
1673 struct btrfs_trans_handle *trans;
1674
1675 mutex_lock(&root->fs_info->fs_mutex);
1676 trans = btrfs_start_transaction(root, 1);
1677 btrfs_set_trans_block_group(trans, inode);
1678 btrfs_update_inode(trans, root, inode);
1679 btrfs_end_transaction(trans, root);
1680 mutex_unlock(&root->fs_info->fs_mutex);
1681 }
1682
1683 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
1684 struct btrfs_root *root,
1685 const char *name, int name_len,
1686 u64 ref_objectid,
1687 u64 objectid,
1688 struct btrfs_block_group_cache *group,
1689 int mode)
1690 {
1691 struct inode *inode;
1692 struct btrfs_inode_item *inode_item;
1693 struct btrfs_block_group_cache *new_inode_group;
1694 struct btrfs_key *location;
1695 struct btrfs_path *path;
1696 struct btrfs_inode_ref *ref;
1697 struct btrfs_key key[2];
1698 u32 sizes[2];
1699 unsigned long ptr;
1700 int ret;
1701 int owner;
1702
1703 path = btrfs_alloc_path();
1704 BUG_ON(!path);
1705
1706 inode = new_inode(root->fs_info->sb);
1707 if (!inode)
1708 return ERR_PTR(-ENOMEM);
1709
1710 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1711 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1712 inode->i_mapping, GFP_NOFS);
1713 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1714 inode->i_mapping, GFP_NOFS);
1715 BTRFS_I(inode)->delalloc_bytes = 0;
1716 BTRFS_I(inode)->root = root;
1717
1718 if (mode & S_IFDIR)
1719 owner = 0;
1720 else
1721 owner = 1;
1722 new_inode_group = btrfs_find_block_group(root, group, 0,
1723 BTRFS_BLOCK_GROUP_METADATA, owner);
1724 if (!new_inode_group) {
1725 printk("find_block group failed\n");
1726 new_inode_group = group;
1727 }
1728 BTRFS_I(inode)->block_group = new_inode_group;
1729 BTRFS_I(inode)->flags = 0;
1730
1731 key[0].objectid = objectid;
1732 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
1733 key[0].offset = 0;
1734
1735 key[1].objectid = objectid;
1736 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
1737 key[1].offset = ref_objectid;
1738
1739 sizes[0] = sizeof(struct btrfs_inode_item);
1740 sizes[1] = name_len + sizeof(*ref);
1741
1742 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
1743 if (ret != 0)
1744 goto fail;
1745
1746 if (objectid > root->highest_inode)
1747 root->highest_inode = objectid;
1748
1749 inode->i_uid = current->fsuid;
1750 inode->i_gid = current->fsgid;
1751 inode->i_mode = mode;
1752 inode->i_ino = objectid;
1753 inode->i_blocks = 0;
1754 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1755 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1756 struct btrfs_inode_item);
1757 fill_inode_item(path->nodes[0], inode_item, inode);
1758
1759 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
1760 struct btrfs_inode_ref);
1761 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
1762 ptr = (unsigned long)(ref + 1);
1763 write_extent_buffer(path->nodes[0], name, ptr, name_len);
1764
1765 btrfs_mark_buffer_dirty(path->nodes[0]);
1766 btrfs_free_path(path);
1767
1768 location = &BTRFS_I(inode)->location;
1769 location->objectid = objectid;
1770 location->offset = 0;
1771 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1772
1773 insert_inode_hash(inode);
1774 return inode;
1775 fail:
1776 btrfs_free_path(path);
1777 return ERR_PTR(ret);
1778 }
1779
1780 static inline u8 btrfs_inode_type(struct inode *inode)
1781 {
1782 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
1783 }
1784
1785 static int btrfs_add_link(struct btrfs_trans_handle *trans,
1786 struct dentry *dentry, struct inode *inode,
1787 int add_backref)
1788 {
1789 int ret;
1790 struct btrfs_key key;
1791 struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
1792 struct inode *parent_inode;
1793
1794 key.objectid = inode->i_ino;
1795 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1796 key.offset = 0;
1797
1798 ret = btrfs_insert_dir_item(trans, root,
1799 dentry->d_name.name, dentry->d_name.len,
1800 dentry->d_parent->d_inode->i_ino,
1801 &key, btrfs_inode_type(inode));
1802 if (ret == 0) {
1803 if (add_backref) {
1804 ret = btrfs_insert_inode_ref(trans, root,
1805 dentry->d_name.name,
1806 dentry->d_name.len,
1807 inode->i_ino,
1808 dentry->d_parent->d_inode->i_ino);
1809 }
1810 parent_inode = dentry->d_parent->d_inode;
1811 parent_inode->i_size += dentry->d_name.len * 2;
1812 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1813 ret = btrfs_update_inode(trans, root,
1814 dentry->d_parent->d_inode);
1815 }
1816 return ret;
1817 }
1818
1819 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
1820 struct dentry *dentry, struct inode *inode,
1821 int backref)
1822 {
1823 int err = btrfs_add_link(trans, dentry, inode, backref);
1824 if (!err) {
1825 d_instantiate(dentry, inode);
1826 return 0;
1827 }
1828 if (err > 0)
1829 err = -EEXIST;
1830 return err;
1831 }
1832
1833 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
1834 int mode, dev_t rdev)
1835 {
1836 struct btrfs_trans_handle *trans;
1837 struct btrfs_root *root = BTRFS_I(dir)->root;
1838 struct inode *inode = NULL;
1839 int err;
1840 int drop_inode = 0;
1841 u64 objectid;
1842 unsigned long nr = 0;
1843
1844 if (!new_valid_dev(rdev))
1845 return -EINVAL;
1846
1847 mutex_lock(&root->fs_info->fs_mutex);
1848 err = btrfs_check_free_space(root, 1, 0);
1849 if (err)
1850 goto fail;
1851
1852 trans = btrfs_start_transaction(root, 1);
1853 btrfs_set_trans_block_group(trans, dir);
1854
1855 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1856 if (err) {
1857 err = -ENOSPC;
1858 goto out_unlock;
1859 }
1860
1861 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1862 dentry->d_name.len,
1863 dentry->d_parent->d_inode->i_ino, objectid,
1864 BTRFS_I(dir)->block_group, mode);
1865 err = PTR_ERR(inode);
1866 if (IS_ERR(inode))
1867 goto out_unlock;
1868
1869 btrfs_set_trans_block_group(trans, inode);
1870 err = btrfs_add_nondir(trans, dentry, inode, 0);
1871 if (err)
1872 drop_inode = 1;
1873 else {
1874 inode->i_op = &btrfs_special_inode_operations;
1875 init_special_inode(inode, inode->i_mode, rdev);
1876 btrfs_update_inode(trans, root, inode);
1877 }
1878 dir->i_sb->s_dirt = 1;
1879 btrfs_update_inode_block_group(trans, inode);
1880 btrfs_update_inode_block_group(trans, dir);
1881 out_unlock:
1882 nr = trans->blocks_used;
1883 btrfs_end_transaction(trans, root);
1884 fail:
1885 mutex_unlock(&root->fs_info->fs_mutex);
1886
1887 if (drop_inode) {
1888 inode_dec_link_count(inode);
1889 iput(inode);
1890 }
1891 btrfs_btree_balance_dirty(root, nr);
1892 btrfs_throttle(root);
1893 return err;
1894 }
1895
1896 static int btrfs_create(struct inode *dir, struct dentry *dentry,
1897 int mode, struct nameidata *nd)
1898 {
1899 struct btrfs_trans_handle *trans;
1900 struct btrfs_root *root = BTRFS_I(dir)->root;
1901 struct inode *inode = NULL;
1902 int err;
1903 int drop_inode = 0;
1904 unsigned long nr = 0;
1905 u64 objectid;
1906
1907 mutex_lock(&root->fs_info->fs_mutex);
1908 err = btrfs_check_free_space(root, 1, 0);
1909 if (err)
1910 goto fail;
1911 trans = btrfs_start_transaction(root, 1);
1912 btrfs_set_trans_block_group(trans, dir);
1913
1914 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1915 if (err) {
1916 err = -ENOSPC;
1917 goto out_unlock;
1918 }
1919
1920 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1921 dentry->d_name.len,
1922 dentry->d_parent->d_inode->i_ino,
1923 objectid, BTRFS_I(dir)->block_group, mode);
1924 err = PTR_ERR(inode);
1925 if (IS_ERR(inode))
1926 goto out_unlock;
1927
1928 btrfs_set_trans_block_group(trans, inode);
1929 err = btrfs_add_nondir(trans, dentry, inode, 0);
1930 if (err)
1931 drop_inode = 1;
1932 else {
1933 inode->i_mapping->a_ops = &btrfs_aops;
1934 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1935 inode->i_fop = &btrfs_file_operations;
1936 inode->i_op = &btrfs_file_inode_operations;
1937 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1938 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1939 inode->i_mapping, GFP_NOFS);
1940 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1941 inode->i_mapping, GFP_NOFS);
1942 BTRFS_I(inode)->delalloc_bytes = 0;
1943 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1944 }
1945 dir->i_sb->s_dirt = 1;
1946 btrfs_update_inode_block_group(trans, inode);
1947 btrfs_update_inode_block_group(trans, dir);
1948 out_unlock:
1949 nr = trans->blocks_used;
1950 btrfs_end_transaction(trans, root);
1951 fail:
1952 mutex_unlock(&root->fs_info->fs_mutex);
1953
1954 if (drop_inode) {
1955 inode_dec_link_count(inode);
1956 iput(inode);
1957 }
1958 btrfs_btree_balance_dirty(root, nr);
1959 btrfs_throttle(root);
1960 return err;
1961 }
1962
1963 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
1964 struct dentry *dentry)
1965 {
1966 struct btrfs_trans_handle *trans;
1967 struct btrfs_root *root = BTRFS_I(dir)->root;
1968 struct inode *inode = old_dentry->d_inode;
1969 unsigned long nr = 0;
1970 int err;
1971 int drop_inode = 0;
1972
1973 if (inode->i_nlink == 0)
1974 return -ENOENT;
1975
1976 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1977 inode->i_nlink++;
1978 #else
1979 inc_nlink(inode);
1980 #endif
1981 mutex_lock(&root->fs_info->fs_mutex);
1982 err = btrfs_check_free_space(root, 1, 0);
1983 if (err)
1984 goto fail;
1985 trans = btrfs_start_transaction(root, 1);
1986
1987 btrfs_set_trans_block_group(trans, dir);
1988 atomic_inc(&inode->i_count);
1989 err = btrfs_add_nondir(trans, dentry, inode, 1);
1990
1991 if (err)
1992 drop_inode = 1;
1993
1994 dir->i_sb->s_dirt = 1;
1995 btrfs_update_inode_block_group(trans, dir);
1996 err = btrfs_update_inode(trans, root, inode);
1997
1998 if (err)
1999 drop_inode = 1;
2000
2001 nr = trans->blocks_used;
2002 btrfs_end_transaction(trans, root);
2003 fail:
2004 mutex_unlock(&root->fs_info->fs_mutex);
2005
2006 if (drop_inode) {
2007 inode_dec_link_count(inode);
2008 iput(inode);
2009 }
2010 btrfs_btree_balance_dirty(root, nr);
2011 btrfs_throttle(root);
2012 return err;
2013 }
2014
2015 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2016 {
2017 struct inode *inode;
2018 struct btrfs_trans_handle *trans;
2019 struct btrfs_root *root = BTRFS_I(dir)->root;
2020 int err = 0;
2021 int drop_on_err = 0;
2022 u64 objectid;
2023 unsigned long nr = 1;
2024
2025 mutex_lock(&root->fs_info->fs_mutex);
2026 err = btrfs_check_free_space(root, 1, 0);
2027 if (err)
2028 goto out_unlock;
2029
2030 trans = btrfs_start_transaction(root, 1);
2031 btrfs_set_trans_block_group(trans, dir);
2032
2033 if (IS_ERR(trans)) {
2034 err = PTR_ERR(trans);
2035 goto out_unlock;
2036 }
2037
2038 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
2039 if (err) {
2040 err = -ENOSPC;
2041 goto out_unlock;
2042 }
2043
2044 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
2045 dentry->d_name.len,
2046 dentry->d_parent->d_inode->i_ino, objectid,
2047 BTRFS_I(dir)->block_group, S_IFDIR | mode);
2048 if (IS_ERR(inode)) {
2049 err = PTR_ERR(inode);
2050 goto out_fail;
2051 }
2052
2053 drop_on_err = 1;
2054 inode->i_op = &btrfs_dir_inode_operations;
2055 inode->i_fop = &btrfs_dir_file_operations;
2056 btrfs_set_trans_block_group(trans, inode);
2057
2058 inode->i_size = 0;
2059 err = btrfs_update_inode(trans, root, inode);
2060 if (err)
2061 goto out_fail;
2062
2063 err = btrfs_add_link(trans, dentry, inode, 0);
2064 if (err)
2065 goto out_fail;
2066
2067 d_instantiate(dentry, inode);
2068 drop_on_err = 0;
2069 dir->i_sb->s_dirt = 1;
2070 btrfs_update_inode_block_group(trans, inode);
2071 btrfs_update_inode_block_group(trans, dir);
2072
2073 out_fail:
2074 nr = trans->blocks_used;
2075 btrfs_end_transaction(trans, root);
2076
2077 out_unlock:
2078 mutex_unlock(&root->fs_info->fs_mutex);
2079 if (drop_on_err)
2080 iput(inode);
2081 btrfs_btree_balance_dirty(root, nr);
2082 btrfs_throttle(root);
2083 return err;
2084 }
2085
2086 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
2087 size_t pg_offset, u64 start, u64 len,
2088 int create)
2089 {
2090 int ret;
2091 int err = 0;
2092 u64 bytenr;
2093 u64 extent_start = 0;
2094 u64 extent_end = 0;
2095 u64 objectid = inode->i_ino;
2096 u32 found_type;
2097 struct btrfs_path *path;
2098 struct btrfs_root *root = BTRFS_I(inode)->root;
2099 struct btrfs_file_extent_item *item;
2100 struct extent_buffer *leaf;
2101 struct btrfs_key found_key;
2102 struct extent_map *em = NULL;
2103 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2104 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2105 struct btrfs_trans_handle *trans = NULL;
2106
2107 path = btrfs_alloc_path();
2108 BUG_ON(!path);
2109 mutex_lock(&root->fs_info->fs_mutex);
2110
2111 again:
2112 spin_lock(&em_tree->lock);
2113 em = lookup_extent_mapping(em_tree, start, len);
2114 spin_unlock(&em_tree->lock);
2115
2116 if (em) {
2117 if (em->start > start) {
2118 printk("get_extent lookup [%Lu %Lu] em [%Lu %Lu]\n",
2119 start, len, em->start, em->len);
2120 WARN_ON(1);
2121 }
2122 if (em->block_start == EXTENT_MAP_INLINE && page)
2123 free_extent_map(em);
2124 else
2125 goto out;
2126 }
2127 em = alloc_extent_map(GFP_NOFS);
2128 if (!em) {
2129 err = -ENOMEM;
2130 goto out;
2131 }
2132
2133 em->start = EXTENT_MAP_HOLE;
2134 em->len = (u64)-1;
2135 em->bdev = inode->i_sb->s_bdev;
2136 ret = btrfs_lookup_file_extent(trans, root, path,
2137 objectid, start, trans != NULL);
2138 if (ret < 0) {
2139 err = ret;
2140 goto out;
2141 }
2142
2143 if (ret != 0) {
2144 if (path->slots[0] == 0)
2145 goto not_found;
2146 path->slots[0]--;
2147 }
2148
2149 leaf = path->nodes[0];
2150 item = btrfs_item_ptr(leaf, path->slots[0],
2151 struct btrfs_file_extent_item);
2152 /* are we inside the extent that was found? */
2153 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2154 found_type = btrfs_key_type(&found_key);
2155 if (found_key.objectid != objectid ||
2156 found_type != BTRFS_EXTENT_DATA_KEY) {
2157 goto not_found;
2158 }
2159
2160 found_type = btrfs_file_extent_type(leaf, item);
2161 extent_start = found_key.offset;
2162 if (found_type == BTRFS_FILE_EXTENT_REG) {
2163 extent_end = extent_start +
2164 btrfs_file_extent_num_bytes(leaf, item);
2165 err = 0;
2166 if (start < extent_start || start >= extent_end) {
2167 em->start = start;
2168 if (start < extent_start) {
2169 if (start + len <= extent_start)
2170 goto not_found;
2171 em->len = extent_end - extent_start;
2172 } else {
2173 em->len = len;
2174 }
2175 goto not_found_em;
2176 }
2177 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
2178 if (bytenr == 0) {
2179 em->start = extent_start;
2180 em->len = extent_end - extent_start;
2181 em->block_start = EXTENT_MAP_HOLE;
2182 goto insert;
2183 }
2184 bytenr += btrfs_file_extent_offset(leaf, item);
2185 em->block_start = bytenr;
2186 em->start = extent_start;
2187 em->len = extent_end - extent_start;
2188 goto insert;
2189 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
2190 u64 page_start;
2191 unsigned long ptr;
2192 char *map;
2193 size_t size;
2194 size_t extent_offset;
2195 size_t copy_size;
2196
2197 size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf,
2198 path->slots[0]));
2199 extent_end = (extent_start + size + root->sectorsize - 1) &
2200 ~((u64)root->sectorsize - 1);
2201 if (start < extent_start || start >= extent_end) {
2202 em->start = start;
2203 if (start < extent_start) {
2204 if (start + len <= extent_start)
2205 goto not_found;
2206 em->len = extent_end - extent_start;
2207 } else {
2208 em->len = len;
2209 }
2210 goto not_found_em;
2211 }
2212 em->block_start = EXTENT_MAP_INLINE;
2213
2214 if (!page) {
2215 em->start = extent_start;
2216 em->len = size;
2217 goto out;
2218 }
2219
2220 page_start = page_offset(page) + pg_offset;
2221 extent_offset = page_start - extent_start;
2222 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
2223 size - extent_offset);
2224 em->start = extent_start + extent_offset;
2225 em->len = (copy_size + root->sectorsize - 1) &
2226 ~((u64)root->sectorsize - 1);
2227 map = kmap(page);
2228 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
2229 if (create == 0 && !PageUptodate(page)) {
2230 read_extent_buffer(leaf, map + pg_offset, ptr,
2231 copy_size);
2232 flush_dcache_page(page);
2233 } else if (create && PageUptodate(page)) {
2234 if (!trans) {
2235 kunmap(page);
2236 free_extent_map(em);
2237 em = NULL;
2238 btrfs_release_path(root, path);
2239 trans = btrfs_start_transaction(root, 1);
2240 goto again;
2241 }
2242 write_extent_buffer(leaf, map + pg_offset, ptr,
2243 copy_size);
2244 btrfs_mark_buffer_dirty(leaf);
2245 }
2246 kunmap(page);
2247 set_extent_uptodate(io_tree, em->start,
2248 extent_map_end(em) - 1, GFP_NOFS);
2249 goto insert;
2250 } else {
2251 printk("unkknown found_type %d\n", found_type);
2252 WARN_ON(1);
2253 }
2254 not_found:
2255 em->start = start;
2256 em->len = len;
2257 not_found_em:
2258 em->block_start = EXTENT_MAP_HOLE;
2259 insert:
2260 btrfs_release_path(root, path);
2261 if (em->start > start || extent_map_end(em) <= start) {
2262 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
2263 err = -EIO;
2264 goto out;
2265 }
2266
2267 err = 0;
2268 spin_lock(&em_tree->lock);
2269 ret = add_extent_mapping(em_tree, em);
2270 if (ret == -EEXIST) {
2271 free_extent_map(em);
2272 em = lookup_extent_mapping(em_tree, start, len);
2273 if (!em) {
2274 err = -EIO;
2275 printk("failing to insert %Lu %Lu\n", start, len);
2276 }
2277 }
2278 spin_unlock(&em_tree->lock);
2279 out:
2280 btrfs_free_path(path);
2281 if (trans) {
2282 ret = btrfs_end_transaction(trans, root);
2283 if (!err)
2284 err = ret;
2285 }
2286 mutex_unlock(&root->fs_info->fs_mutex);
2287 if (err) {
2288 free_extent_map(em);
2289 WARN_ON(1);
2290 return ERR_PTR(err);
2291 }
2292 return em;
2293 }
2294
2295 static int btrfs_get_block(struct inode *inode, sector_t iblock,
2296 struct buffer_head *bh_result, int create)
2297 {
2298 struct extent_map *em;
2299 u64 start = (u64)iblock << inode->i_blkbits;
2300 struct btrfs_multi_bio *multi = NULL;
2301 struct btrfs_root *root = BTRFS_I(inode)->root;
2302 u64 len;
2303 u64 logical;
2304 u64 map_length;
2305 int ret = 0;
2306
2307 em = btrfs_get_extent(inode, NULL, 0, start, bh_result->b_size, 0);
2308
2309 if (!em || IS_ERR(em))
2310 goto out;
2311
2312 if (em->start > start || em->start + em->len <= start)
2313 goto out;
2314
2315 if (em->block_start == EXTENT_MAP_INLINE) {
2316 ret = -EINVAL;
2317 goto out;
2318 }
2319
2320 if (em->block_start == EXTENT_MAP_HOLE ||
2321 em->block_start == EXTENT_MAP_DELALLOC) {
2322 goto out;
2323 }
2324
2325 len = em->start + em->len - start;
2326 len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size)));
2327
2328 logical = start - em->start;
2329 logical = em->block_start + logical;
2330
2331 map_length = len;
2332 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
2333 logical, &map_length, &multi, 0);
2334 BUG_ON(ret);
2335 bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits;
2336 bh_result->b_size = min(map_length, len);
2337 bh_result->b_bdev = multi->stripes[0].dev->bdev;
2338 set_buffer_mapped(bh_result);
2339 kfree(multi);
2340 out:
2341 free_extent_map(em);
2342 return ret;
2343 }
2344
2345 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
2346 const struct iovec *iov, loff_t offset,
2347 unsigned long nr_segs)
2348 {
2349 struct file *file = iocb->ki_filp;
2350 struct inode *inode = file->f_mapping->host;
2351
2352 if (rw == WRITE)
2353 return -EINVAL;
2354
2355 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
2356 offset, nr_segs, btrfs_get_block, NULL);
2357 }
2358
2359 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
2360 {
2361 return extent_bmap(mapping, iblock, btrfs_get_extent);
2362 }
2363
2364 int btrfs_readpage(struct file *file, struct page *page)
2365 {
2366 struct extent_io_tree *tree;
2367 tree = &BTRFS_I(page->mapping->host)->io_tree;
2368 return extent_read_full_page(tree, page, btrfs_get_extent);
2369 }
2370
2371 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
2372 {
2373 struct extent_io_tree *tree;
2374
2375
2376 if (current->flags & PF_MEMALLOC) {
2377 redirty_page_for_writepage(wbc, page);
2378 unlock_page(page);
2379 return 0;
2380 }
2381 tree = &BTRFS_I(page->mapping->host)->io_tree;
2382 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
2383 }
2384
2385 static int btrfs_writepages(struct address_space *mapping,
2386 struct writeback_control *wbc)
2387 {
2388 struct extent_io_tree *tree;
2389 tree = &BTRFS_I(mapping->host)->io_tree;
2390 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
2391 }
2392
2393 static int
2394 btrfs_readpages(struct file *file, struct address_space *mapping,
2395 struct list_head *pages, unsigned nr_pages)
2396 {
2397 struct extent_io_tree *tree;
2398 tree = &BTRFS_I(mapping->host)->io_tree;
2399 return extent_readpages(tree, mapping, pages, nr_pages,
2400 btrfs_get_extent);
2401 }
2402
2403 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
2404 {
2405 struct extent_io_tree *tree;
2406 struct extent_map_tree *map;
2407 int ret;
2408
2409 tree = &BTRFS_I(page->mapping->host)->io_tree;
2410 map = &BTRFS_I(page->mapping->host)->extent_tree;
2411 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
2412 if (ret == 1) {
2413 ClearPagePrivate(page);
2414 set_page_private(page, 0);
2415 page_cache_release(page);
2416 }
2417 return ret;
2418 }
2419
2420 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
2421 {
2422 struct extent_io_tree *tree;
2423
2424 tree = &BTRFS_I(page->mapping->host)->io_tree;
2425 extent_invalidatepage(tree, page, offset);
2426 btrfs_releasepage(page, GFP_NOFS);
2427 }
2428
2429 /*
2430 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
2431 * called from a page fault handler when a page is first dirtied. Hence we must
2432 * be careful to check for EOF conditions here. We set the page up correctly
2433 * for a written page which means we get ENOSPC checking when writing into
2434 * holes and correct delalloc and unwritten extent mapping on filesystems that
2435 * support these features.
2436 *
2437 * We are not allowed to take the i_mutex here so we have to play games to
2438 * protect against truncate races as the page could now be beyond EOF. Because
2439 * vmtruncate() writes the inode size before removing pages, once we have the
2440 * page lock we can determine safely if the page is beyond EOF. If it is not
2441 * beyond EOF, then the page is guaranteed safe against truncation until we
2442 * unlock the page.
2443 */
2444 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
2445 {
2446 struct inode *inode = fdentry(vma->vm_file)->d_inode;
2447 struct btrfs_root *root = BTRFS_I(inode)->root;
2448 unsigned long end;
2449 loff_t size;
2450 int ret;
2451 u64 page_start;
2452
2453 mutex_lock(&root->fs_info->fs_mutex);
2454 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
2455 mutex_unlock(&root->fs_info->fs_mutex);
2456 if (ret)
2457 goto out;
2458
2459 ret = -EINVAL;
2460
2461 lock_page(page);
2462 wait_on_page_writeback(page);
2463 size = i_size_read(inode);
2464 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2465
2466 if ((page->mapping != inode->i_mapping) ||
2467 (page_start > size)) {
2468 /* page got truncated out from underneath us */
2469 goto out_unlock;
2470 }
2471
2472 /* page is wholly or partially inside EOF */
2473 if (page_start + PAGE_CACHE_SIZE > size)
2474 end = size & ~PAGE_CACHE_MASK;
2475 else
2476 end = PAGE_CACHE_SIZE;
2477
2478 ret = btrfs_cow_one_page(inode, page, end);
2479
2480 out_unlock:
2481 unlock_page(page);
2482 out:
2483 return ret;
2484 }
2485
2486 static void btrfs_truncate(struct inode *inode)
2487 {
2488 struct btrfs_root *root = BTRFS_I(inode)->root;
2489 int ret;
2490 struct btrfs_trans_handle *trans;
2491 unsigned long nr;
2492
2493 if (!S_ISREG(inode->i_mode))
2494 return;
2495 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2496 return;
2497
2498 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2499
2500 mutex_lock(&root->fs_info->fs_mutex);
2501 trans = btrfs_start_transaction(root, 1);
2502 btrfs_set_trans_block_group(trans, inode);
2503
2504 /* FIXME, add redo link to tree so we don't leak on crash */
2505 ret = btrfs_truncate_in_trans(trans, root, inode,
2506 BTRFS_EXTENT_DATA_KEY);
2507 btrfs_update_inode(trans, root, inode);
2508 nr = trans->blocks_used;
2509
2510 ret = btrfs_end_transaction(trans, root);
2511 BUG_ON(ret);
2512 mutex_unlock(&root->fs_info->fs_mutex);
2513 btrfs_btree_balance_dirty(root, nr);
2514 btrfs_throttle(root);
2515 }
2516
2517 static int noinline create_subvol(struct btrfs_root *root, char *name,
2518 int namelen)
2519 {
2520 struct btrfs_trans_handle *trans;
2521 struct btrfs_key key;
2522 struct btrfs_root_item root_item;
2523 struct btrfs_inode_item *inode_item;
2524 struct extent_buffer *leaf;
2525 struct btrfs_root *new_root = root;
2526 struct inode *inode;
2527 struct inode *dir;
2528 int ret;
2529 int err;
2530 u64 objectid;
2531 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
2532 unsigned long nr = 1;
2533
2534 mutex_lock(&root->fs_info->fs_mutex);
2535 ret = btrfs_check_free_space(root, 1, 0);
2536 if (ret)
2537 goto fail_commit;
2538
2539 trans = btrfs_start_transaction(root, 1);
2540 BUG_ON(!trans);
2541
2542 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2543 0, &objectid);
2544 if (ret)
2545 goto fail;
2546
2547 leaf = __btrfs_alloc_free_block(trans, root, root->leafsize,
2548 objectid, trans->transid, 0, 0,
2549 0, 0);
2550 if (IS_ERR(leaf))
2551 return PTR_ERR(leaf);
2552
2553 btrfs_set_header_nritems(leaf, 0);
2554 btrfs_set_header_level(leaf, 0);
2555 btrfs_set_header_bytenr(leaf, leaf->start);
2556 btrfs_set_header_generation(leaf, trans->transid);
2557 btrfs_set_header_owner(leaf, objectid);
2558
2559 write_extent_buffer(leaf, root->fs_info->fsid,
2560 (unsigned long)btrfs_header_fsid(leaf),
2561 BTRFS_FSID_SIZE);
2562 btrfs_mark_buffer_dirty(leaf);
2563
2564 inode_item = &root_item.inode;
2565 memset(inode_item, 0, sizeof(*inode_item));
2566 inode_item->generation = cpu_to_le64(1);
2567 inode_item->size = cpu_to_le64(3);
2568 inode_item->nlink = cpu_to_le32(1);
2569 inode_item->nblocks = cpu_to_le64(1);
2570 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
2571
2572 btrfs_set_root_bytenr(&root_item, leaf->start);
2573 btrfs_set_root_level(&root_item, 0);
2574 btrfs_set_root_refs(&root_item, 1);
2575 btrfs_set_root_used(&root_item, 0);
2576
2577 memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
2578 root_item.drop_level = 0;
2579
2580 free_extent_buffer(leaf);
2581 leaf = NULL;
2582
2583 btrfs_set_root_dirid(&root_item, new_dirid);
2584
2585 key.objectid = objectid;
2586 key.offset = 1;
2587 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2588 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2589 &root_item);
2590 if (ret)
2591 goto fail;
2592
2593 /*
2594 * insert the directory item
2595 */
2596 key.offset = (u64)-1;
2597 dir = root->fs_info->sb->s_root->d_inode;
2598 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2599 name, namelen, dir->i_ino, &key,
2600 BTRFS_FT_DIR);
2601 if (ret)
2602 goto fail;
2603
2604 ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
2605 name, namelen, objectid,
2606 root->fs_info->sb->s_root->d_inode->i_ino);
2607 if (ret)
2608 goto fail;
2609
2610 ret = btrfs_commit_transaction(trans, root);
2611 if (ret)
2612 goto fail_commit;
2613
2614 new_root = btrfs_read_fs_root(root->fs_info, &key, name, namelen);
2615 BUG_ON(!new_root);
2616
2617 trans = btrfs_start_transaction(new_root, 1);
2618 BUG_ON(!trans);
2619
2620 inode = btrfs_new_inode(trans, new_root, "..", 2, new_dirid,
2621 new_dirid,
2622 BTRFS_I(dir)->block_group, S_IFDIR | 0700);
2623 if (IS_ERR(inode))
2624 goto fail;
2625 inode->i_op = &btrfs_dir_inode_operations;
2626 inode->i_fop = &btrfs_dir_file_operations;
2627 new_root->inode = inode;
2628
2629 ret = btrfs_insert_inode_ref(trans, new_root, "..", 2, new_dirid,
2630 new_dirid);
2631 inode->i_nlink = 1;
2632 inode->i_size = 0;
2633 ret = btrfs_update_inode(trans, new_root, inode);
2634 if (ret)
2635 goto fail;
2636 fail:
2637 nr = trans->blocks_used;
2638 err = btrfs_commit_transaction(trans, new_root);
2639 if (err && !ret)
2640 ret = err;
2641 fail_commit:
2642 mutex_unlock(&root->fs_info->fs_mutex);
2643 btrfs_btree_balance_dirty(root, nr);
2644 btrfs_throttle(root);
2645 return ret;
2646 }
2647
2648 static int create_snapshot(struct btrfs_root *root, char *name, int namelen)
2649 {
2650 struct btrfs_pending_snapshot *pending_snapshot;
2651 struct btrfs_trans_handle *trans;
2652 int ret;
2653 int err;
2654 unsigned long nr = 0;
2655
2656 if (!root->ref_cows)
2657 return -EINVAL;
2658
2659 mutex_lock(&root->fs_info->fs_mutex);
2660 ret = btrfs_check_free_space(root, 1, 0);
2661 if (ret)
2662 goto fail_unlock;
2663
2664 pending_snapshot = kmalloc(sizeof(*pending_snapshot), GFP_NOFS);
2665 if (!pending_snapshot) {
2666 ret = -ENOMEM;
2667 goto fail_unlock;
2668 }
2669 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
2670 if (!pending_snapshot->name) {
2671 ret = -ENOMEM;
2672 kfree(pending_snapshot);
2673 goto fail_unlock;
2674 }
2675 memcpy(pending_snapshot->name, name, namelen);
2676 pending_snapshot->name[namelen] = '\0';
2677 trans = btrfs_start_transaction(root, 1);
2678 BUG_ON(!trans);
2679 pending_snapshot->root = root;
2680 list_add(&pending_snapshot->list,
2681 &trans->transaction->pending_snapshots);
2682 ret = btrfs_update_inode(trans, root, root->inode);
2683 err = btrfs_commit_transaction(trans, root);
2684
2685 fail_unlock:
2686 mutex_unlock(&root->fs_info->fs_mutex);
2687 btrfs_btree_balance_dirty(root, nr);
2688 btrfs_throttle(root);
2689 return ret;
2690 }
2691
2692 unsigned long btrfs_force_ra(struct address_space *mapping,
2693 struct file_ra_state *ra, struct file *file,
2694 pgoff_t offset, pgoff_t last_index)
2695 {
2696 pgoff_t req_size;
2697
2698 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2699 req_size = last_index - offset + 1;
2700 offset = page_cache_readahead(mapping, ra, file, offset, req_size);
2701 return offset;
2702 #else
2703 req_size = min(last_index - offset + 1, (pgoff_t)128);
2704 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
2705 return offset + req_size;
2706 #endif
2707 }
2708
2709 int btrfs_defrag_file(struct file *file) {
2710 struct inode *inode = fdentry(file)->d_inode;
2711 struct btrfs_root *root = BTRFS_I(inode)->root;
2712 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2713 struct page *page;
2714 unsigned long last_index;
2715 unsigned long ra_index = 0;
2716 u64 page_start;
2717 u64 page_end;
2718 unsigned long i;
2719 int ret;
2720
2721 mutex_lock(&root->fs_info->fs_mutex);
2722 ret = btrfs_check_free_space(root, inode->i_size, 0);
2723 mutex_unlock(&root->fs_info->fs_mutex);
2724 if (ret)
2725 return -ENOSPC;
2726
2727 mutex_lock(&inode->i_mutex);
2728 last_index = inode->i_size >> PAGE_CACHE_SHIFT;
2729 for (i = 0; i <= last_index; i++) {
2730 if (i == ra_index) {
2731 ra_index = btrfs_force_ra(inode->i_mapping,
2732 &file->f_ra,
2733 file, ra_index, last_index);
2734 }
2735 page = grab_cache_page(inode->i_mapping, i);
2736 if (!page)
2737 goto out_unlock;
2738 if (!PageUptodate(page)) {
2739 btrfs_readpage(NULL, page);
2740 lock_page(page);
2741 if (!PageUptodate(page)) {
2742 unlock_page(page);
2743 page_cache_release(page);
2744 goto out_unlock;
2745 }
2746 }
2747 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2748 page_end = page_start + PAGE_CACHE_SIZE - 1;
2749
2750 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2751 set_extent_delalloc(io_tree, page_start,
2752 page_end, GFP_NOFS);
2753
2754 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2755 set_page_dirty(page);
2756 unlock_page(page);
2757 page_cache_release(page);
2758 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
2759 }
2760
2761 out_unlock:
2762 mutex_unlock(&inode->i_mutex);
2763 return 0;
2764 }
2765
2766 static int btrfs_ioctl_resize(struct btrfs_root *root, void __user *arg)
2767 {
2768 u64 new_size;
2769 u64 old_size;
2770 struct btrfs_ioctl_vol_args *vol_args;
2771 struct btrfs_trans_handle *trans;
2772 char *sizestr;
2773 int ret = 0;
2774 int namelen;
2775 int mod = 0;
2776
2777 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
2778
2779 if (!vol_args)
2780 return -ENOMEM;
2781
2782 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
2783 ret = -EFAULT;
2784 goto out;
2785 }
2786 namelen = strlen(vol_args->name);
2787 if (namelen > BTRFS_VOL_NAME_MAX) {
2788 ret = -EINVAL;
2789 goto out;
2790 }
2791
2792 sizestr = vol_args->name;
2793 if (!strcmp(sizestr, "max"))
2794 new_size = root->fs_info->sb->s_bdev->bd_inode->i_size;
2795 else {
2796 if (sizestr[0] == '-') {
2797 mod = -1;
2798 sizestr++;
2799 } else if (sizestr[0] == '+') {
2800 mod = 1;
2801 sizestr++;
2802 }
2803 new_size = btrfs_parse_size(sizestr);
2804 if (new_size == 0) {
2805 ret = -EINVAL;
2806 goto out;
2807 }
2808 }
2809
2810 mutex_lock(&root->fs_info->fs_mutex);
2811 old_size = btrfs_super_total_bytes(&root->fs_info->super_copy);
2812
2813 if (mod < 0) {
2814 if (new_size > old_size) {
2815 ret = -EINVAL;
2816 goto out_unlock;
2817 }
2818 new_size = old_size - new_size;
2819 } else if (mod > 0) {
2820 new_size = old_size + new_size;
2821 }
2822
2823 if (new_size < 256 * 1024 * 1024) {
2824 ret = -EINVAL;
2825 goto out_unlock;
2826 }
2827 if (new_size > root->fs_info->sb->s_bdev->bd_inode->i_size) {
2828 ret = -EFBIG;
2829 goto out_unlock;
2830 }
2831
2832 do_div(new_size, root->sectorsize);
2833 new_size *= root->sectorsize;
2834
2835 printk("new size is %Lu\n", new_size);
2836 if (new_size > old_size) {
2837 trans = btrfs_start_transaction(root, 1);
2838 ret = btrfs_grow_extent_tree(trans, root, new_size);
2839 btrfs_commit_transaction(trans, root);
2840 } else {
2841 ret = btrfs_shrink_extent_tree(root, new_size);
2842 }
2843
2844 out_unlock:
2845 mutex_unlock(&root->fs_info->fs_mutex);
2846 out:
2847 kfree(vol_args);
2848 return ret;
2849 }
2850
2851 static int noinline btrfs_ioctl_snap_create(struct btrfs_root *root,
2852 void __user *arg)
2853 {
2854 struct btrfs_ioctl_vol_args *vol_args;
2855 struct btrfs_dir_item *di;
2856 struct btrfs_path *path;
2857 u64 root_dirid;
2858 int namelen;
2859 int ret;
2860
2861 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
2862
2863 if (!vol_args)
2864 return -ENOMEM;
2865
2866 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
2867 ret = -EFAULT;
2868 goto out;
2869 }
2870
2871 namelen = strlen(vol_args->name);
2872 if (namelen > BTRFS_VOL_NAME_MAX) {
2873 ret = -EINVAL;
2874 goto out;
2875 }
2876 if (strchr(vol_args->name, '/')) {
2877 ret = -EINVAL;
2878 goto out;
2879 }
2880
2881 path = btrfs_alloc_path();
2882 if (!path) {
2883 ret = -ENOMEM;
2884 goto out;
2885 }
2886
2887 root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
2888 mutex_lock(&root->fs_info->fs_mutex);
2889 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
2890 path, root_dirid,
2891 vol_args->name, namelen, 0);
2892 mutex_unlock(&root->fs_info->fs_mutex);
2893 btrfs_free_path(path);
2894
2895 if (di && !IS_ERR(di)) {
2896 ret = -EEXIST;
2897 goto out;
2898 }
2899
2900 if (IS_ERR(di)) {
2901 ret = PTR_ERR(di);
2902 goto out;
2903 }
2904
2905 if (root == root->fs_info->tree_root)
2906 ret = create_subvol(root, vol_args->name, namelen);
2907 else
2908 ret = create_snapshot(root, vol_args->name, namelen);
2909 out:
2910 kfree(vol_args);
2911 return ret;
2912 }
2913
2914 static int btrfs_ioctl_defrag(struct file *file)
2915 {
2916 struct inode *inode = fdentry(file)->d_inode;
2917 struct btrfs_root *root = BTRFS_I(inode)->root;
2918
2919 switch (inode->i_mode & S_IFMT) {
2920 case S_IFDIR:
2921 mutex_lock(&root->fs_info->fs_mutex);
2922 btrfs_defrag_root(root, 0);
2923 btrfs_defrag_root(root->fs_info->extent_root, 0);
2924 mutex_unlock(&root->fs_info->fs_mutex);
2925 break;
2926 case S_IFREG:
2927 btrfs_defrag_file(file);
2928 break;
2929 }
2930
2931 return 0;
2932 }
2933
2934 long btrfs_ioctl(struct file *file, unsigned int
2935 cmd, unsigned long arg)
2936 {
2937 struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
2938
2939 switch (cmd) {
2940 case BTRFS_IOC_SNAP_CREATE:
2941 return btrfs_ioctl_snap_create(root, (void __user *)arg);
2942 case BTRFS_IOC_DEFRAG:
2943 return btrfs_ioctl_defrag(file);
2944 case BTRFS_IOC_RESIZE:
2945 return btrfs_ioctl_resize(root, (void __user *)arg);
2946 }
2947
2948 return -ENOTTY;
2949 }
2950
2951 /*
2952 * Called inside transaction, so use GFP_NOFS
2953 */
2954 struct inode *btrfs_alloc_inode(struct super_block *sb)
2955 {
2956 struct btrfs_inode *ei;
2957
2958 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
2959 if (!ei)
2960 return NULL;
2961 ei->last_trans = 0;
2962 ei->ordered_trans = 0;
2963 return &ei->vfs_inode;
2964 }
2965
2966 void btrfs_destroy_inode(struct inode *inode)
2967 {
2968 WARN_ON(!list_empty(&inode->i_dentry));
2969 WARN_ON(inode->i_data.nrpages);
2970
2971 btrfs_drop_extent_cache(inode, 0, (u64)-1);
2972 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
2973 }
2974
2975 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2976 static void init_once(struct kmem_cache * cachep, void *foo)
2977 #else
2978 static void init_once(void * foo, struct kmem_cache * cachep,
2979 unsigned long flags)
2980 #endif
2981 {
2982 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
2983
2984 inode_init_once(&ei->vfs_inode);
2985 }
2986
2987 void btrfs_destroy_cachep(void)
2988 {
2989 if (btrfs_inode_cachep)
2990 kmem_cache_destroy(btrfs_inode_cachep);
2991 if (btrfs_trans_handle_cachep)
2992 kmem_cache_destroy(btrfs_trans_handle_cachep);
2993 if (btrfs_transaction_cachep)
2994 kmem_cache_destroy(btrfs_transaction_cachep);
2995 if (btrfs_bit_radix_cachep)
2996 kmem_cache_destroy(btrfs_bit_radix_cachep);
2997 if (btrfs_path_cachep)
2998 kmem_cache_destroy(btrfs_path_cachep);
2999 }
3000
3001 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
3002 unsigned long extra_flags,
3003 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
3004 void (*ctor)(struct kmem_cache *, void *)
3005 #else
3006 void (*ctor)(void *, struct kmem_cache *,
3007 unsigned long)
3008 #endif
3009 )
3010 {
3011 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
3012 SLAB_MEM_SPREAD | extra_flags), ctor
3013 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
3014 ,NULL
3015 #endif
3016 );
3017 }
3018
3019 int btrfs_init_cachep(void)
3020 {
3021 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
3022 sizeof(struct btrfs_inode),
3023 0, init_once);
3024 if (!btrfs_inode_cachep)
3025 goto fail;
3026 btrfs_trans_handle_cachep =
3027 btrfs_cache_create("btrfs_trans_handle_cache",
3028 sizeof(struct btrfs_trans_handle),
3029 0, NULL);
3030 if (!btrfs_trans_handle_cachep)
3031 goto fail;
3032 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
3033 sizeof(struct btrfs_transaction),
3034 0, NULL);
3035 if (!btrfs_transaction_cachep)
3036 goto fail;
3037 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
3038 sizeof(struct btrfs_path),
3039 0, NULL);
3040 if (!btrfs_path_cachep)
3041 goto fail;
3042 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
3043 SLAB_DESTROY_BY_RCU, NULL);
3044 if (!btrfs_bit_radix_cachep)
3045 goto fail;
3046 return 0;
3047 fail:
3048 btrfs_destroy_cachep();
3049 return -ENOMEM;
3050 }
3051
3052 static int btrfs_getattr(struct vfsmount *mnt,
3053 struct dentry *dentry, struct kstat *stat)
3054 {
3055 struct inode *inode = dentry->d_inode;
3056 generic_fillattr(inode, stat);
3057 stat->blksize = PAGE_CACHE_SIZE;
3058 stat->blocks = inode->i_blocks + (BTRFS_I(inode)->delalloc_bytes >> 9);
3059 return 0;
3060 }
3061
3062 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
3063 struct inode * new_dir,struct dentry *new_dentry)
3064 {
3065 struct btrfs_trans_handle *trans;
3066 struct btrfs_root *root = BTRFS_I(old_dir)->root;
3067 struct inode *new_inode = new_dentry->d_inode;
3068 struct inode *old_inode = old_dentry->d_inode;
3069 struct timespec ctime = CURRENT_TIME;
3070 struct btrfs_path *path;
3071 int ret;
3072
3073 if (S_ISDIR(old_inode->i_mode) && new_inode &&
3074 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
3075 return -ENOTEMPTY;
3076 }
3077
3078 mutex_lock(&root->fs_info->fs_mutex);
3079 ret = btrfs_check_free_space(root, 1, 0);
3080 if (ret)
3081 goto out_unlock;
3082
3083 trans = btrfs_start_transaction(root, 1);
3084
3085 btrfs_set_trans_block_group(trans, new_dir);
3086 path = btrfs_alloc_path();
3087 if (!path) {
3088 ret = -ENOMEM;
3089 goto out_fail;
3090 }
3091
3092 old_dentry->d_inode->i_nlink++;
3093 old_dir->i_ctime = old_dir->i_mtime = ctime;
3094 new_dir->i_ctime = new_dir->i_mtime = ctime;
3095 old_inode->i_ctime = ctime;
3096
3097 ret = btrfs_unlink_trans(trans, root, old_dir, old_dentry);
3098 if (ret)
3099 goto out_fail;
3100
3101 if (new_inode) {
3102 new_inode->i_ctime = CURRENT_TIME;
3103 ret = btrfs_unlink_trans(trans, root, new_dir, new_dentry);
3104 if (ret)
3105 goto out_fail;
3106 }
3107 ret = btrfs_add_link(trans, new_dentry, old_inode, 1);
3108 if (ret)
3109 goto out_fail;
3110
3111 out_fail:
3112 btrfs_free_path(path);
3113 btrfs_end_transaction(trans, root);
3114 out_unlock:
3115 mutex_unlock(&root->fs_info->fs_mutex);
3116 return ret;
3117 }
3118
3119 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
3120 const char *symname)
3121 {
3122 struct btrfs_trans_handle *trans;
3123 struct btrfs_root *root = BTRFS_I(dir)->root;
3124 struct btrfs_path *path;
3125 struct btrfs_key key;
3126 struct inode *inode = NULL;
3127 int err;
3128 int drop_inode = 0;
3129 u64 objectid;
3130 int name_len;
3131 int datasize;
3132 unsigned long ptr;
3133 struct btrfs_file_extent_item *ei;
3134 struct extent_buffer *leaf;
3135 unsigned long nr = 0;
3136
3137 name_len = strlen(symname) + 1;
3138 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
3139 return -ENAMETOOLONG;
3140
3141 mutex_lock(&root->fs_info->fs_mutex);
3142 err = btrfs_check_free_space(root, 1, 0);
3143 if (err)
3144 goto out_fail;
3145
3146 trans = btrfs_start_transaction(root, 1);
3147 btrfs_set_trans_block_group(trans, dir);
3148
3149 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3150 if (err) {
3151 err = -ENOSPC;
3152 goto out_unlock;
3153 }
3154
3155 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
3156 dentry->d_name.len,
3157 dentry->d_parent->d_inode->i_ino, objectid,
3158 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO);
3159 err = PTR_ERR(inode);
3160 if (IS_ERR(inode))
3161 goto out_unlock;
3162
3163 btrfs_set_trans_block_group(trans, inode);
3164 err = btrfs_add_nondir(trans, dentry, inode, 0);
3165 if (err)
3166 drop_inode = 1;
3167 else {
3168 inode->i_mapping->a_ops = &btrfs_aops;
3169 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3170 inode->i_fop = &btrfs_file_operations;
3171 inode->i_op = &btrfs_file_inode_operations;
3172 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3173 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3174 inode->i_mapping, GFP_NOFS);
3175 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3176 inode->i_mapping, GFP_NOFS);
3177 BTRFS_I(inode)->delalloc_bytes = 0;
3178 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3179 }
3180 dir->i_sb->s_dirt = 1;
3181 btrfs_update_inode_block_group(trans, inode);
3182 btrfs_update_inode_block_group(trans, dir);
3183 if (drop_inode)
3184 goto out_unlock;
3185
3186 path = btrfs_alloc_path();
3187 BUG_ON(!path);
3188 key.objectid = inode->i_ino;
3189 key.offset = 0;
3190 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
3191 datasize = btrfs_file_extent_calc_inline_size(name_len);
3192 err = btrfs_insert_empty_item(trans, root, path, &key,
3193 datasize);
3194 if (err) {
3195 drop_inode = 1;
3196 goto out_unlock;
3197 }
3198 leaf = path->nodes[0];
3199 ei = btrfs_item_ptr(leaf, path->slots[0],
3200 struct btrfs_file_extent_item);
3201 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
3202 btrfs_set_file_extent_type(leaf, ei,
3203 BTRFS_FILE_EXTENT_INLINE);
3204 ptr = btrfs_file_extent_inline_start(ei);
3205 write_extent_buffer(leaf, symname, ptr, name_len);
3206 btrfs_mark_buffer_dirty(leaf);
3207 btrfs_free_path(path);
3208
3209 inode->i_op = &btrfs_symlink_inode_operations;
3210 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3211 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3212 inode->i_size = name_len - 1;
3213 err = btrfs_update_inode(trans, root, inode);
3214 if (err)
3215 drop_inode = 1;
3216
3217 out_unlock:
3218 nr = trans->blocks_used;
3219 btrfs_end_transaction(trans, root);
3220 out_fail:
3221 mutex_unlock(&root->fs_info->fs_mutex);
3222 if (drop_inode) {
3223 inode_dec_link_count(inode);
3224 iput(inode);
3225 }
3226 btrfs_btree_balance_dirty(root, nr);
3227 btrfs_throttle(root);
3228 return err;
3229 }
3230
3231 static int btrfs_permission(struct inode *inode, int mask,
3232 struct nameidata *nd)
3233 {
3234 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
3235 return -EACCES;
3236 return generic_permission(inode, mask, NULL);
3237 }
3238
3239 static struct inode_operations btrfs_dir_inode_operations = {
3240 .lookup = btrfs_lookup,
3241 .create = btrfs_create,
3242 .unlink = btrfs_unlink,
3243 .link = btrfs_link,
3244 .mkdir = btrfs_mkdir,
3245 .rmdir = btrfs_rmdir,
3246 .rename = btrfs_rename,
3247 .symlink = btrfs_symlink,
3248 .setattr = btrfs_setattr,
3249 .mknod = btrfs_mknod,
3250 .setxattr = generic_setxattr,
3251 .getxattr = generic_getxattr,
3252 .listxattr = btrfs_listxattr,
3253 .removexattr = generic_removexattr,
3254 .permission = btrfs_permission,
3255 };
3256 static struct inode_operations btrfs_dir_ro_inode_operations = {
3257 .lookup = btrfs_lookup,
3258 .permission = btrfs_permission,
3259 };
3260 static struct file_operations btrfs_dir_file_operations = {
3261 .llseek = generic_file_llseek,
3262 .read = generic_read_dir,
3263 .readdir = btrfs_readdir,
3264 .unlocked_ioctl = btrfs_ioctl,
3265 #ifdef CONFIG_COMPAT
3266 .compat_ioctl = btrfs_ioctl,
3267 #endif
3268 };
3269
3270 static struct extent_io_ops btrfs_extent_io_ops = {
3271 .fill_delalloc = run_delalloc_range,
3272 .submit_bio_hook = btrfs_submit_bio_hook,
3273 .merge_bio_hook = btrfs_merge_bio_hook,
3274 .readpage_io_hook = btrfs_readpage_io_hook,
3275 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
3276 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
3277 .set_bit_hook = btrfs_set_bit_hook,
3278 .clear_bit_hook = btrfs_clear_bit_hook,
3279 };
3280
3281 static struct address_space_operations btrfs_aops = {
3282 .readpage = btrfs_readpage,
3283 .writepage = btrfs_writepage,
3284 .writepages = btrfs_writepages,
3285 .readpages = btrfs_readpages,
3286 .sync_page = block_sync_page,
3287 .bmap = btrfs_bmap,
3288 .direct_IO = btrfs_direct_IO,
3289 .invalidatepage = btrfs_invalidatepage,
3290 .releasepage = btrfs_releasepage,
3291 .set_page_dirty = __set_page_dirty_nobuffers,
3292 };
3293
3294 static struct address_space_operations btrfs_symlink_aops = {
3295 .readpage = btrfs_readpage,
3296 .writepage = btrfs_writepage,
3297 .invalidatepage = btrfs_invalidatepage,
3298 .releasepage = btrfs_releasepage,
3299 };
3300
3301 static struct inode_operations btrfs_file_inode_operations = {
3302 .truncate = btrfs_truncate,
3303 .getattr = btrfs_getattr,
3304 .setattr = btrfs_setattr,
3305 .setxattr = generic_setxattr,
3306 .getxattr = generic_getxattr,
3307 .listxattr = btrfs_listxattr,
3308 .removexattr = generic_removexattr,
3309 .permission = btrfs_permission,
3310 };
3311 static struct inode_operations btrfs_special_inode_operations = {
3312 .getattr = btrfs_getattr,
3313 .setattr = btrfs_setattr,
3314 .permission = btrfs_permission,
3315 };
3316 static struct inode_operations btrfs_symlink_inode_operations = {
3317 .readlink = generic_readlink,
3318 .follow_link = page_follow_link_light,
3319 .put_link = page_put_link,
3320 .permission = btrfs_permission,
3321 };
This page took 0.097882 seconds and 6 git commands to generate.