Fix btrfs_del_ordered_inode to allow forcing the drop during unlinks
[deliverable/linux.git] / fs / btrfs / inode.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/version.h>
38 #include <linux/xattr.h>
39 #include "ctree.h"
40 #include "disk-io.h"
41 #include "transaction.h"
42 #include "btrfs_inode.h"
43 #include "ioctl.h"
44 #include "print-tree.h"
45 #include "volumes.h"
46
47 struct btrfs_iget_args {
48 u64 ino;
49 struct btrfs_root *root;
50 };
51
52 static struct inode_operations btrfs_dir_inode_operations;
53 static struct inode_operations btrfs_symlink_inode_operations;
54 static struct inode_operations btrfs_dir_ro_inode_operations;
55 static struct inode_operations btrfs_special_inode_operations;
56 static struct inode_operations btrfs_file_inode_operations;
57 static struct address_space_operations btrfs_aops;
58 static struct address_space_operations btrfs_symlink_aops;
59 static struct file_operations btrfs_dir_file_operations;
60 static struct extent_io_ops btrfs_extent_io_ops;
61
62 static struct kmem_cache *btrfs_inode_cachep;
63 struct kmem_cache *btrfs_trans_handle_cachep;
64 struct kmem_cache *btrfs_transaction_cachep;
65 struct kmem_cache *btrfs_bit_radix_cachep;
66 struct kmem_cache *btrfs_path_cachep;
67
68 #define S_SHIFT 12
69 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
70 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
71 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
72 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
73 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
74 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
75 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
76 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
77 };
78
79 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
80 int for_del)
81 {
82 u64 total;
83 u64 used;
84 u64 thresh;
85 unsigned long flags;
86 int ret = 0;
87
88 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
89 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
90 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
91 if (for_del)
92 thresh = total * 90;
93 else
94 thresh = total * 85;
95
96 do_div(thresh, 100);
97
98 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
99 ret = -ENOSPC;
100 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
101 return ret;
102 }
103
104 static int cow_file_range(struct inode *inode, u64 start, u64 end)
105 {
106 struct btrfs_root *root = BTRFS_I(inode)->root;
107 struct btrfs_trans_handle *trans;
108 u64 alloc_hint = 0;
109 u64 num_bytes;
110 u64 cur_alloc_size;
111 u64 blocksize = root->sectorsize;
112 u64 orig_start = start;
113 u64 orig_num_bytes;
114 struct btrfs_key ins;
115 int ret;
116
117 trans = btrfs_start_transaction(root, 1);
118 BUG_ON(!trans);
119 btrfs_set_trans_block_group(trans, inode);
120
121 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
122 num_bytes = max(blocksize, num_bytes);
123 ret = btrfs_drop_extents(trans, root, inode,
124 start, start + num_bytes, start, &alloc_hint);
125 orig_num_bytes = num_bytes;
126
127 if (alloc_hint == EXTENT_MAP_INLINE)
128 goto out;
129
130 BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy));
131
132 while(num_bytes > 0) {
133 cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
134 ret = btrfs_alloc_extent(trans, root, cur_alloc_size,
135 root->sectorsize,
136 root->root_key.objectid,
137 trans->transid,
138 inode->i_ino, start, 0,
139 alloc_hint, (u64)-1, &ins, 1);
140 if (ret) {
141 WARN_ON(1);
142 goto out;
143 }
144 cur_alloc_size = ins.offset;
145 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
146 start, ins.objectid, ins.offset,
147 ins.offset, 0);
148 inode->i_blocks += ins.offset >> 9;
149 btrfs_check_file(root, inode);
150 if (num_bytes < cur_alloc_size) {
151 printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes,
152 cur_alloc_size);
153 break;
154 }
155 num_bytes -= cur_alloc_size;
156 alloc_hint = ins.objectid + ins.offset;
157 start += cur_alloc_size;
158 }
159 btrfs_drop_extent_cache(inode, orig_start,
160 orig_start + orig_num_bytes - 1);
161 btrfs_add_ordered_inode(inode);
162 btrfs_update_inode(trans, root, inode);
163 out:
164 btrfs_end_transaction(trans, root);
165 return ret;
166 }
167
168 static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end)
169 {
170 u64 extent_start;
171 u64 extent_end;
172 u64 bytenr;
173 u64 cow_end;
174 u64 loops = 0;
175 u64 total_fs_bytes;
176 struct btrfs_root *root = BTRFS_I(inode)->root;
177 struct btrfs_block_group_cache *block_group;
178 struct extent_buffer *leaf;
179 int found_type;
180 struct btrfs_path *path;
181 struct btrfs_file_extent_item *item;
182 int ret;
183 int err;
184 struct btrfs_key found_key;
185
186 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
187 path = btrfs_alloc_path();
188 BUG_ON(!path);
189 again:
190 ret = btrfs_lookup_file_extent(NULL, root, path,
191 inode->i_ino, start, 0);
192 if (ret < 0) {
193 btrfs_free_path(path);
194 return ret;
195 }
196
197 cow_end = end;
198 if (ret != 0) {
199 if (path->slots[0] == 0)
200 goto not_found;
201 path->slots[0]--;
202 }
203
204 leaf = path->nodes[0];
205 item = btrfs_item_ptr(leaf, path->slots[0],
206 struct btrfs_file_extent_item);
207
208 /* are we inside the extent that was found? */
209 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
210 found_type = btrfs_key_type(&found_key);
211 if (found_key.objectid != inode->i_ino ||
212 found_type != BTRFS_EXTENT_DATA_KEY)
213 goto not_found;
214
215 found_type = btrfs_file_extent_type(leaf, item);
216 extent_start = found_key.offset;
217 if (found_type == BTRFS_FILE_EXTENT_REG) {
218 u64 extent_num_bytes;
219
220 extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item);
221 extent_end = extent_start + extent_num_bytes;
222 err = 0;
223
224 if (loops && start != extent_start)
225 goto not_found;
226
227 if (start < extent_start || start >= extent_end)
228 goto not_found;
229
230 cow_end = min(end, extent_end - 1);
231 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
232 if (bytenr == 0)
233 goto not_found;
234
235 if (btrfs_count_snapshots_in_path(root, path, inode->i_ino,
236 bytenr) != 1) {
237 goto not_found;
238 }
239
240 /*
241 * we may be called by the resizer, make sure we're inside
242 * the limits of the FS
243 */
244 block_group = btrfs_lookup_block_group(root->fs_info,
245 bytenr);
246 if (!block_group || block_group->ro)
247 goto not_found;
248
249 start = extent_end;
250 } else {
251 goto not_found;
252 }
253 loop:
254 if (start > end) {
255 btrfs_free_path(path);
256 return 0;
257 }
258 btrfs_release_path(root, path);
259 loops++;
260 goto again;
261
262 not_found:
263 cow_file_range(inode, start, end);
264 start = end + 1;
265 goto loop;
266 }
267
268 static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
269 {
270 struct btrfs_root *root = BTRFS_I(inode)->root;
271 int ret;
272
273 if (btrfs_test_opt(root, NODATACOW) ||
274 btrfs_test_flag(inode, NODATACOW))
275 ret = run_delalloc_nocow(inode, start, end);
276 else
277 ret = cow_file_range(inode, start, end);
278
279 return ret;
280 }
281
282 int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
283 unsigned long old, unsigned long bits)
284 {
285 unsigned long flags;
286 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
287 struct btrfs_root *root = BTRFS_I(inode)->root;
288 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
289 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
290 root->fs_info->delalloc_bytes += end - start + 1;
291 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
292 }
293 return 0;
294 }
295
296 int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
297 unsigned long old, unsigned long bits)
298 {
299 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
300 struct btrfs_root *root = BTRFS_I(inode)->root;
301 unsigned long flags;
302
303 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
304 if (end - start + 1 > root->fs_info->delalloc_bytes) {
305 printk("warning: delalloc account %Lu %Lu\n",
306 end - start + 1, root->fs_info->delalloc_bytes);
307 root->fs_info->delalloc_bytes = 0;
308 BTRFS_I(inode)->delalloc_bytes = 0;
309 } else {
310 root->fs_info->delalloc_bytes -= end - start + 1;
311 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
312 }
313 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
314 }
315 return 0;
316 }
317
318 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
319 size_t size, struct bio *bio)
320 {
321 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
322 struct btrfs_mapping_tree *map_tree;
323 u64 logical = bio->bi_sector << 9;
324 u64 length = 0;
325 u64 map_length;
326 int ret;
327
328 length = bio->bi_size;
329 map_tree = &root->fs_info->mapping_tree;
330 map_length = length;
331 ret = btrfs_map_block(map_tree, READ, logical,
332 &map_length, NULL, 0);
333
334 if (map_length < length + size) {
335 return 1;
336 }
337 return 0;
338 }
339
340 int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
341 int mirror_num)
342 {
343 struct btrfs_root *root = BTRFS_I(inode)->root;
344 struct btrfs_trans_handle *trans;
345 int ret = 0;
346 char *sums = NULL;
347
348 ret = btrfs_csum_one_bio(root, bio, &sums);
349 BUG_ON(ret);
350
351 trans = btrfs_start_transaction(root, 1);
352
353 btrfs_set_trans_block_group(trans, inode);
354 btrfs_csum_file_blocks(trans, root, inode, bio, sums);
355
356 ret = btrfs_end_transaction(trans, root);
357 BUG_ON(ret);
358
359 kfree(sums);
360
361 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
362 }
363
364 int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
365 int mirror_num)
366 {
367 struct btrfs_root *root = BTRFS_I(inode)->root;
368 int ret = 0;
369
370 if (!(rw & (1 << BIO_RW))) {
371 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
372 BUG_ON(ret);
373 goto mapit;
374 }
375
376 if (btrfs_test_opt(root, NODATASUM) ||
377 btrfs_test_flag(inode, NODATASUM)) {
378 goto mapit;
379 }
380
381 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
382 inode, rw, bio, mirror_num,
383 __btrfs_submit_bio_hook);
384 mapit:
385 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
386 }
387
388 int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
389 {
390 int ret = 0;
391 struct inode *inode = page->mapping->host;
392 struct btrfs_root *root = BTRFS_I(inode)->root;
393 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
394 struct btrfs_csum_item *item;
395 struct btrfs_path *path = NULL;
396 u32 csum;
397
398 if (btrfs_test_opt(root, NODATASUM) ||
399 btrfs_test_flag(inode, NODATASUM))
400 return 0;
401
402 path = btrfs_alloc_path();
403 item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0);
404 if (IS_ERR(item)) {
405 ret = PTR_ERR(item);
406 /* a csum that isn't present is a preallocated region. */
407 if (ret == -ENOENT || ret == -EFBIG)
408 ret = 0;
409 csum = 0;
410 printk("no csum found for inode %lu start %Lu\n", inode->i_ino, start);
411 goto out;
412 }
413 read_extent_buffer(path->nodes[0], &csum, (unsigned long)item,
414 BTRFS_CRC32_SIZE);
415 set_state_private(io_tree, start, csum);
416 out:
417 if (path)
418 btrfs_free_path(path);
419 return ret;
420 }
421
422 struct io_failure_record {
423 struct page *page;
424 u64 start;
425 u64 len;
426 u64 logical;
427 int last_mirror;
428 };
429
430 int btrfs_io_failed_hook(struct bio *failed_bio,
431 struct page *page, u64 start, u64 end,
432 struct extent_state *state)
433 {
434 struct io_failure_record *failrec = NULL;
435 u64 private;
436 struct extent_map *em;
437 struct inode *inode = page->mapping->host;
438 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
439 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
440 struct bio *bio;
441 int num_copies;
442 int ret;
443 int rw;
444 u64 logical;
445
446 ret = get_state_private(failure_tree, start, &private);
447 if (ret) {
448 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
449 if (!failrec)
450 return -ENOMEM;
451 failrec->start = start;
452 failrec->len = end - start + 1;
453 failrec->last_mirror = 0;
454
455 spin_lock(&em_tree->lock);
456 em = lookup_extent_mapping(em_tree, start, failrec->len);
457 if (em->start > start || em->start + em->len < start) {
458 free_extent_map(em);
459 em = NULL;
460 }
461 spin_unlock(&em_tree->lock);
462
463 if (!em || IS_ERR(em)) {
464 kfree(failrec);
465 return -EIO;
466 }
467 logical = start - em->start;
468 logical = em->block_start + logical;
469 failrec->logical = logical;
470 free_extent_map(em);
471 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
472 EXTENT_DIRTY, GFP_NOFS);
473 set_state_private(failure_tree, start,
474 (u64)(unsigned long)failrec);
475 } else {
476 failrec = (struct io_failure_record *)(unsigned long)private;
477 }
478 num_copies = btrfs_num_copies(
479 &BTRFS_I(inode)->root->fs_info->mapping_tree,
480 failrec->logical, failrec->len);
481 failrec->last_mirror++;
482 if (!state) {
483 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
484 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
485 failrec->start,
486 EXTENT_LOCKED);
487 if (state && state->start != failrec->start)
488 state = NULL;
489 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
490 }
491 if (!state || failrec->last_mirror > num_copies) {
492 set_state_private(failure_tree, failrec->start, 0);
493 clear_extent_bits(failure_tree, failrec->start,
494 failrec->start + failrec->len - 1,
495 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
496 kfree(failrec);
497 return -EIO;
498 }
499 bio = bio_alloc(GFP_NOFS, 1);
500 bio->bi_private = state;
501 bio->bi_end_io = failed_bio->bi_end_io;
502 bio->bi_sector = failrec->logical >> 9;
503 bio->bi_bdev = failed_bio->bi_bdev;
504 bio->bi_size = 0;
505 bio_add_page(bio, page, failrec->len, start - page_offset(page));
506 if (failed_bio->bi_rw & (1 << BIO_RW))
507 rw = WRITE;
508 else
509 rw = READ;
510
511 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
512 failrec->last_mirror);
513 return 0;
514 }
515
516 int btrfs_clean_io_failures(struct inode *inode, u64 start)
517 {
518 u64 private;
519 u64 private_failure;
520 struct io_failure_record *failure;
521 int ret;
522
523 private = 0;
524 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
525 (u64)-1, 1, EXTENT_DIRTY)) {
526 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
527 start, &private_failure);
528 if (ret == 0) {
529 failure = (struct io_failure_record *)(unsigned long)
530 private_failure;
531 set_state_private(&BTRFS_I(inode)->io_failure_tree,
532 failure->start, 0);
533 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
534 failure->start,
535 failure->start + failure->len - 1,
536 EXTENT_DIRTY | EXTENT_LOCKED,
537 GFP_NOFS);
538 kfree(failure);
539 }
540 }
541 return 0;
542 }
543
544 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
545 struct extent_state *state)
546 {
547 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
548 struct inode *inode = page->mapping->host;
549 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
550 char *kaddr;
551 u64 private = ~(u32)0;
552 int ret;
553 struct btrfs_root *root = BTRFS_I(inode)->root;
554 u32 csum = ~(u32)0;
555 unsigned long flags;
556
557 if (btrfs_test_opt(root, NODATASUM) ||
558 btrfs_test_flag(inode, NODATASUM))
559 return 0;
560 if (state && state->start == start) {
561 private = state->private;
562 ret = 0;
563 } else {
564 ret = get_state_private(io_tree, start, &private);
565 }
566 local_irq_save(flags);
567 kaddr = kmap_atomic(page, KM_IRQ0);
568 if (ret) {
569 goto zeroit;
570 }
571 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
572 btrfs_csum_final(csum, (char *)&csum);
573 if (csum != private) {
574 goto zeroit;
575 }
576 kunmap_atomic(kaddr, KM_IRQ0);
577 local_irq_restore(flags);
578
579 /* if the io failure tree for this inode is non-empty,
580 * check to see if we've recovered from a failed IO
581 */
582 btrfs_clean_io_failures(inode, start);
583 return 0;
584
585 zeroit:
586 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
587 page->mapping->host->i_ino, (unsigned long long)start, csum,
588 private);
589 memset(kaddr + offset, 1, end - start + 1);
590 flush_dcache_page(page);
591 kunmap_atomic(kaddr, KM_IRQ0);
592 local_irq_restore(flags);
593 if (private == 0)
594 return 0;
595 return -EIO;
596 }
597
598 void btrfs_read_locked_inode(struct inode *inode)
599 {
600 struct btrfs_path *path;
601 struct extent_buffer *leaf;
602 struct btrfs_inode_item *inode_item;
603 struct btrfs_timespec *tspec;
604 struct btrfs_root *root = BTRFS_I(inode)->root;
605 struct btrfs_key location;
606 u64 alloc_group_block;
607 u32 rdev;
608 int ret;
609
610 path = btrfs_alloc_path();
611 BUG_ON(!path);
612 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
613
614 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
615 if (ret)
616 goto make_bad;
617
618 leaf = path->nodes[0];
619 inode_item = btrfs_item_ptr(leaf, path->slots[0],
620 struct btrfs_inode_item);
621
622 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
623 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
624 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
625 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
626 inode->i_size = btrfs_inode_size(leaf, inode_item);
627
628 tspec = btrfs_inode_atime(inode_item);
629 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
630 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
631
632 tspec = btrfs_inode_mtime(inode_item);
633 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
634 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
635
636 tspec = btrfs_inode_ctime(inode_item);
637 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
638 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
639
640 inode->i_blocks = btrfs_inode_nblocks(leaf, inode_item);
641 inode->i_generation = btrfs_inode_generation(leaf, inode_item);
642 inode->i_rdev = 0;
643 rdev = btrfs_inode_rdev(leaf, inode_item);
644
645 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
646 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
647 alloc_group_block);
648 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
649 if (!BTRFS_I(inode)->block_group) {
650 BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
651 NULL, 0,
652 BTRFS_BLOCK_GROUP_METADATA, 0);
653 }
654 btrfs_free_path(path);
655 inode_item = NULL;
656
657 switch (inode->i_mode & S_IFMT) {
658 case S_IFREG:
659 inode->i_mapping->a_ops = &btrfs_aops;
660 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
661 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
662 inode->i_fop = &btrfs_file_operations;
663 inode->i_op = &btrfs_file_inode_operations;
664 break;
665 case S_IFDIR:
666 inode->i_fop = &btrfs_dir_file_operations;
667 if (root == root->fs_info->tree_root)
668 inode->i_op = &btrfs_dir_ro_inode_operations;
669 else
670 inode->i_op = &btrfs_dir_inode_operations;
671 break;
672 case S_IFLNK:
673 inode->i_op = &btrfs_symlink_inode_operations;
674 inode->i_mapping->a_ops = &btrfs_symlink_aops;
675 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
676 break;
677 default:
678 init_special_inode(inode, inode->i_mode, rdev);
679 break;
680 }
681 return;
682
683 make_bad:
684 btrfs_free_path(path);
685 make_bad_inode(inode);
686 }
687
688 static void fill_inode_item(struct extent_buffer *leaf,
689 struct btrfs_inode_item *item,
690 struct inode *inode)
691 {
692 btrfs_set_inode_uid(leaf, item, inode->i_uid);
693 btrfs_set_inode_gid(leaf, item, inode->i_gid);
694 btrfs_set_inode_size(leaf, item, inode->i_size);
695 btrfs_set_inode_mode(leaf, item, inode->i_mode);
696 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
697
698 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
699 inode->i_atime.tv_sec);
700 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
701 inode->i_atime.tv_nsec);
702
703 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
704 inode->i_mtime.tv_sec);
705 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
706 inode->i_mtime.tv_nsec);
707
708 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
709 inode->i_ctime.tv_sec);
710 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
711 inode->i_ctime.tv_nsec);
712
713 btrfs_set_inode_nblocks(leaf, item, inode->i_blocks);
714 btrfs_set_inode_generation(leaf, item, inode->i_generation);
715 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
716 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
717 btrfs_set_inode_block_group(leaf, item,
718 BTRFS_I(inode)->block_group->key.objectid);
719 }
720
721 int btrfs_update_inode(struct btrfs_trans_handle *trans,
722 struct btrfs_root *root,
723 struct inode *inode)
724 {
725 struct btrfs_inode_item *inode_item;
726 struct btrfs_path *path;
727 struct extent_buffer *leaf;
728 int ret;
729
730 path = btrfs_alloc_path();
731 BUG_ON(!path);
732 ret = btrfs_lookup_inode(trans, root, path,
733 &BTRFS_I(inode)->location, 1);
734 if (ret) {
735 if (ret > 0)
736 ret = -ENOENT;
737 goto failed;
738 }
739
740 leaf = path->nodes[0];
741 inode_item = btrfs_item_ptr(leaf, path->slots[0],
742 struct btrfs_inode_item);
743
744 fill_inode_item(leaf, inode_item, inode);
745 btrfs_mark_buffer_dirty(leaf);
746 btrfs_set_inode_last_trans(trans, inode);
747 ret = 0;
748 failed:
749 btrfs_free_path(path);
750 return ret;
751 }
752
753
754 static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
755 struct btrfs_root *root,
756 struct inode *dir,
757 struct dentry *dentry)
758 {
759 struct btrfs_path *path;
760 const char *name = dentry->d_name.name;
761 int name_len = dentry->d_name.len;
762 int ret = 0;
763 struct extent_buffer *leaf;
764 struct btrfs_dir_item *di;
765 struct btrfs_key key;
766
767 path = btrfs_alloc_path();
768 if (!path) {
769 ret = -ENOMEM;
770 goto err;
771 }
772
773 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
774 name, name_len, -1);
775 if (IS_ERR(di)) {
776 ret = PTR_ERR(di);
777 goto err;
778 }
779 if (!di) {
780 ret = -ENOENT;
781 goto err;
782 }
783 leaf = path->nodes[0];
784 btrfs_dir_item_key_to_cpu(leaf, di, &key);
785 ret = btrfs_delete_one_dir_name(trans, root, path, di);
786 if (ret)
787 goto err;
788 btrfs_release_path(root, path);
789
790 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
791 key.objectid, name, name_len, -1);
792 if (IS_ERR(di)) {
793 ret = PTR_ERR(di);
794 goto err;
795 }
796 if (!di) {
797 ret = -ENOENT;
798 goto err;
799 }
800 ret = btrfs_delete_one_dir_name(trans, root, path, di);
801 btrfs_release_path(root, path);
802
803 dentry->d_inode->i_ctime = dir->i_ctime;
804 ret = btrfs_del_inode_ref(trans, root, name, name_len,
805 dentry->d_inode->i_ino,
806 dentry->d_parent->d_inode->i_ino);
807 if (ret) {
808 printk("failed to delete reference to %.*s, "
809 "inode %lu parent %lu\n", name_len, name,
810 dentry->d_inode->i_ino,
811 dentry->d_parent->d_inode->i_ino);
812 }
813 err:
814 btrfs_free_path(path);
815 if (!ret) {
816 dir->i_size -= name_len * 2;
817 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
818 btrfs_update_inode(trans, root, dir);
819 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
820 dentry->d_inode->i_nlink--;
821 #else
822 drop_nlink(dentry->d_inode);
823 #endif
824 ret = btrfs_update_inode(trans, root, dentry->d_inode);
825 dir->i_sb->s_dirt = 1;
826 }
827 return ret;
828 }
829
830 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
831 {
832 struct btrfs_root *root;
833 struct btrfs_trans_handle *trans;
834 struct inode *inode = dentry->d_inode;
835 int ret;
836 unsigned long nr = 0;
837
838 root = BTRFS_I(dir)->root;
839
840 ret = btrfs_check_free_space(root, 1, 1);
841 if (ret)
842 goto fail;
843
844 trans = btrfs_start_transaction(root, 1);
845
846 btrfs_set_trans_block_group(trans, dir);
847 ret = btrfs_unlink_trans(trans, root, dir, dentry);
848 nr = trans->blocks_used;
849
850 if (inode->i_nlink == 0) {
851 /* if the inode isn't linked anywhere,
852 * we don't need to worry about
853 * data=ordered
854 */
855 btrfs_del_ordered_inode(inode, 1);
856 }
857
858 btrfs_end_transaction(trans, root);
859 fail:
860 btrfs_btree_balance_dirty(root, nr);
861 btrfs_throttle(root);
862 return ret;
863 }
864
865 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
866 {
867 struct inode *inode = dentry->d_inode;
868 int err = 0;
869 int ret;
870 struct btrfs_root *root = BTRFS_I(dir)->root;
871 struct btrfs_trans_handle *trans;
872 unsigned long nr = 0;
873
874 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
875 return -ENOTEMPTY;
876 }
877
878 ret = btrfs_check_free_space(root, 1, 1);
879 if (ret)
880 goto fail;
881
882 trans = btrfs_start_transaction(root, 1);
883 btrfs_set_trans_block_group(trans, dir);
884
885 /* now the directory is empty */
886 err = btrfs_unlink_trans(trans, root, dir, dentry);
887 if (!err) {
888 inode->i_size = 0;
889 }
890
891 nr = trans->blocks_used;
892 ret = btrfs_end_transaction(trans, root);
893 fail:
894 btrfs_btree_balance_dirty(root, nr);
895 btrfs_throttle(root);
896
897 if (ret && !err)
898 err = ret;
899 return err;
900 }
901
902 /*
903 * this can truncate away extent items, csum items and directory items.
904 * It starts at a high offset and removes keys until it can't find
905 * any higher than i_size.
906 *
907 * csum items that cross the new i_size are truncated to the new size
908 * as well.
909 */
910 static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
911 struct btrfs_root *root,
912 struct inode *inode,
913 u32 min_type)
914 {
915 int ret;
916 struct btrfs_path *path;
917 struct btrfs_key key;
918 struct btrfs_key found_key;
919 u32 found_type;
920 struct extent_buffer *leaf;
921 struct btrfs_file_extent_item *fi;
922 u64 extent_start = 0;
923 u64 extent_num_bytes = 0;
924 u64 item_end = 0;
925 u64 root_gen = 0;
926 u64 root_owner = 0;
927 int found_extent;
928 int del_item;
929 int pending_del_nr = 0;
930 int pending_del_slot = 0;
931 int extent_type = -1;
932 u64 mask = root->sectorsize - 1;
933
934 btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1);
935 path = btrfs_alloc_path();
936 path->reada = -1;
937 BUG_ON(!path);
938
939 /* FIXME, add redo link to tree so we don't leak on crash */
940 key.objectid = inode->i_ino;
941 key.offset = (u64)-1;
942 key.type = (u8)-1;
943
944 btrfs_init_path(path);
945 search_again:
946 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
947 if (ret < 0) {
948 goto error;
949 }
950 if (ret > 0) {
951 BUG_ON(path->slots[0] == 0);
952 path->slots[0]--;
953 }
954
955 while(1) {
956 fi = NULL;
957 leaf = path->nodes[0];
958 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
959 found_type = btrfs_key_type(&found_key);
960
961 if (found_key.objectid != inode->i_ino)
962 break;
963
964 if (found_type < min_type)
965 break;
966
967 item_end = found_key.offset;
968 if (found_type == BTRFS_EXTENT_DATA_KEY) {
969 fi = btrfs_item_ptr(leaf, path->slots[0],
970 struct btrfs_file_extent_item);
971 extent_type = btrfs_file_extent_type(leaf, fi);
972 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
973 item_end +=
974 btrfs_file_extent_num_bytes(leaf, fi);
975 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
976 struct btrfs_item *item = btrfs_item_nr(leaf,
977 path->slots[0]);
978 item_end += btrfs_file_extent_inline_len(leaf,
979 item);
980 }
981 item_end--;
982 }
983 if (found_type == BTRFS_CSUM_ITEM_KEY) {
984 ret = btrfs_csum_truncate(trans, root, path,
985 inode->i_size);
986 BUG_ON(ret);
987 }
988 if (item_end < inode->i_size) {
989 if (found_type == BTRFS_DIR_ITEM_KEY) {
990 found_type = BTRFS_INODE_ITEM_KEY;
991 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
992 found_type = BTRFS_CSUM_ITEM_KEY;
993 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
994 found_type = BTRFS_XATTR_ITEM_KEY;
995 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
996 found_type = BTRFS_INODE_REF_KEY;
997 } else if (found_type) {
998 found_type--;
999 } else {
1000 break;
1001 }
1002 btrfs_set_key_type(&key, found_type);
1003 goto next;
1004 }
1005 if (found_key.offset >= inode->i_size)
1006 del_item = 1;
1007 else
1008 del_item = 0;
1009 found_extent = 0;
1010
1011 /* FIXME, shrink the extent if the ref count is only 1 */
1012 if (found_type != BTRFS_EXTENT_DATA_KEY)
1013 goto delete;
1014
1015 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
1016 u64 num_dec;
1017 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
1018 if (!del_item) {
1019 u64 orig_num_bytes =
1020 btrfs_file_extent_num_bytes(leaf, fi);
1021 extent_num_bytes = inode->i_size -
1022 found_key.offset + root->sectorsize - 1;
1023 extent_num_bytes = extent_num_bytes &
1024 ~((u64)root->sectorsize - 1);
1025 btrfs_set_file_extent_num_bytes(leaf, fi,
1026 extent_num_bytes);
1027 num_dec = (orig_num_bytes -
1028 extent_num_bytes);
1029 if (extent_start != 0)
1030 dec_i_blocks(inode, num_dec);
1031 btrfs_mark_buffer_dirty(leaf);
1032 } else {
1033 extent_num_bytes =
1034 btrfs_file_extent_disk_num_bytes(leaf,
1035 fi);
1036 /* FIXME blocksize != 4096 */
1037 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
1038 if (extent_start != 0) {
1039 found_extent = 1;
1040 dec_i_blocks(inode, num_dec);
1041 }
1042 root_gen = btrfs_header_generation(leaf);
1043 root_owner = btrfs_header_owner(leaf);
1044 }
1045 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1046 if (!del_item) {
1047 u32 newsize = inode->i_size - found_key.offset;
1048 dec_i_blocks(inode, item_end + 1 -
1049 found_key.offset - newsize);
1050 newsize =
1051 btrfs_file_extent_calc_inline_size(newsize);
1052 ret = btrfs_truncate_item(trans, root, path,
1053 newsize, 1);
1054 BUG_ON(ret);
1055 } else {
1056 dec_i_blocks(inode, item_end + 1 -
1057 found_key.offset);
1058 }
1059 }
1060 delete:
1061 if (del_item) {
1062 if (!pending_del_nr) {
1063 /* no pending yet, add ourselves */
1064 pending_del_slot = path->slots[0];
1065 pending_del_nr = 1;
1066 } else if (pending_del_nr &&
1067 path->slots[0] + 1 == pending_del_slot) {
1068 /* hop on the pending chunk */
1069 pending_del_nr++;
1070 pending_del_slot = path->slots[0];
1071 } else {
1072 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
1073 }
1074 } else {
1075 break;
1076 }
1077 if (found_extent) {
1078 ret = btrfs_free_extent(trans, root, extent_start,
1079 extent_num_bytes,
1080 root_owner,
1081 root_gen, inode->i_ino,
1082 found_key.offset, 0);
1083 BUG_ON(ret);
1084 }
1085 next:
1086 if (path->slots[0] == 0) {
1087 if (pending_del_nr)
1088 goto del_pending;
1089 btrfs_release_path(root, path);
1090 goto search_again;
1091 }
1092
1093 path->slots[0]--;
1094 if (pending_del_nr &&
1095 path->slots[0] + 1 != pending_del_slot) {
1096 struct btrfs_key debug;
1097 del_pending:
1098 btrfs_item_key_to_cpu(path->nodes[0], &debug,
1099 pending_del_slot);
1100 ret = btrfs_del_items(trans, root, path,
1101 pending_del_slot,
1102 pending_del_nr);
1103 BUG_ON(ret);
1104 pending_del_nr = 0;
1105 btrfs_release_path(root, path);
1106 goto search_again;
1107 }
1108 }
1109 ret = 0;
1110 error:
1111 if (pending_del_nr) {
1112 ret = btrfs_del_items(trans, root, path, pending_del_slot,
1113 pending_del_nr);
1114 }
1115 btrfs_free_path(path);
1116 inode->i_sb->s_dirt = 1;
1117 return ret;
1118 }
1119
1120 static int btrfs_cow_one_page(struct inode *inode, struct page *page,
1121 size_t zero_start)
1122 {
1123 char *kaddr;
1124 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1125 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1126 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1127 int ret = 0;
1128
1129 WARN_ON(!PageLocked(page));
1130 set_page_extent_mapped(page);
1131
1132 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
1133 set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
1134 page_end, GFP_NOFS);
1135
1136 if (zero_start != PAGE_CACHE_SIZE) {
1137 kaddr = kmap(page);
1138 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
1139 flush_dcache_page(page);
1140 kunmap(page);
1141 }
1142 set_page_dirty(page);
1143 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
1144
1145 return ret;
1146 }
1147
1148 /*
1149 * taken from block_truncate_page, but does cow as it zeros out
1150 * any bytes left in the last page in the file.
1151 */
1152 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
1153 {
1154 struct inode *inode = mapping->host;
1155 struct btrfs_root *root = BTRFS_I(inode)->root;
1156 u32 blocksize = root->sectorsize;
1157 pgoff_t index = from >> PAGE_CACHE_SHIFT;
1158 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1159 struct page *page;
1160 int ret = 0;
1161 u64 page_start;
1162
1163 if ((offset & (blocksize - 1)) == 0)
1164 goto out;
1165
1166 ret = -ENOMEM;
1167 again:
1168 page = grab_cache_page(mapping, index);
1169 if (!page)
1170 goto out;
1171 if (!PageUptodate(page)) {
1172 ret = btrfs_readpage(NULL, page);
1173 lock_page(page);
1174 if (page->mapping != mapping) {
1175 unlock_page(page);
1176 page_cache_release(page);
1177 goto again;
1178 }
1179 if (!PageUptodate(page)) {
1180 ret = -EIO;
1181 goto out;
1182 }
1183 }
1184
1185 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1186 wait_on_page_writeback(page);
1187 ret = btrfs_cow_one_page(inode, page, offset);
1188
1189 unlock_page(page);
1190 page_cache_release(page);
1191 out:
1192 return ret;
1193 }
1194
1195 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
1196 {
1197 struct inode *inode = dentry->d_inode;
1198 int err;
1199
1200 err = inode_change_ok(inode, attr);
1201 if (err)
1202 return err;
1203
1204 if (S_ISREG(inode->i_mode) &&
1205 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
1206 struct btrfs_trans_handle *trans;
1207 struct btrfs_root *root = BTRFS_I(inode)->root;
1208 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1209
1210 u64 mask = root->sectorsize - 1;
1211 u64 hole_start = (inode->i_size + mask) & ~mask;
1212 u64 block_end = (attr->ia_size + mask) & ~mask;
1213 u64 hole_size;
1214 u64 alloc_hint = 0;
1215
1216 if (attr->ia_size <= hole_start)
1217 goto out;
1218
1219 err = btrfs_check_free_space(root, 1, 0);
1220 if (err)
1221 goto fail;
1222
1223 btrfs_truncate_page(inode->i_mapping, inode->i_size);
1224
1225 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1226 hole_size = block_end - hole_start;
1227
1228 trans = btrfs_start_transaction(root, 1);
1229 btrfs_set_trans_block_group(trans, inode);
1230 err = btrfs_drop_extents(trans, root, inode,
1231 hole_start, block_end, hole_start,
1232 &alloc_hint);
1233
1234 if (alloc_hint != EXTENT_MAP_INLINE) {
1235 err = btrfs_insert_file_extent(trans, root,
1236 inode->i_ino,
1237 hole_start, 0, 0,
1238 hole_size, 0);
1239 btrfs_drop_extent_cache(inode, hole_start,
1240 (u64)-1);
1241 btrfs_check_file(root, inode);
1242 }
1243 btrfs_end_transaction(trans, root);
1244 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1245 if (err)
1246 return err;
1247 }
1248 out:
1249 err = inode_setattr(inode, attr);
1250 fail:
1251 return err;
1252 }
1253
1254 void btrfs_delete_inode(struct inode *inode)
1255 {
1256 struct btrfs_trans_handle *trans;
1257 struct btrfs_root *root = BTRFS_I(inode)->root;
1258 unsigned long nr;
1259 int ret;
1260
1261 truncate_inode_pages(&inode->i_data, 0);
1262 if (is_bad_inode(inode)) {
1263 goto no_delete;
1264 }
1265
1266 inode->i_size = 0;
1267 trans = btrfs_start_transaction(root, 1);
1268
1269 btrfs_set_trans_block_group(trans, inode);
1270 ret = btrfs_truncate_in_trans(trans, root, inode, 0);
1271 if (ret)
1272 goto no_delete_lock;
1273
1274 nr = trans->blocks_used;
1275 clear_inode(inode);
1276
1277 btrfs_end_transaction(trans, root);
1278 btrfs_btree_balance_dirty(root, nr);
1279 return;
1280
1281 no_delete_lock:
1282 nr = trans->blocks_used;
1283 btrfs_end_transaction(trans, root);
1284 btrfs_btree_balance_dirty(root, nr);
1285 no_delete:
1286 clear_inode(inode);
1287 }
1288
1289 /*
1290 * this returns the key found in the dir entry in the location pointer.
1291 * If no dir entries were found, location->objectid is 0.
1292 */
1293 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
1294 struct btrfs_key *location)
1295 {
1296 const char *name = dentry->d_name.name;
1297 int namelen = dentry->d_name.len;
1298 struct btrfs_dir_item *di;
1299 struct btrfs_path *path;
1300 struct btrfs_root *root = BTRFS_I(dir)->root;
1301 int ret = 0;
1302
1303 if (namelen == 1 && strcmp(name, ".") == 0) {
1304 location->objectid = dir->i_ino;
1305 location->type = BTRFS_INODE_ITEM_KEY;
1306 location->offset = 0;
1307 return 0;
1308 }
1309 path = btrfs_alloc_path();
1310 BUG_ON(!path);
1311
1312 if (namelen == 2 && strcmp(name, "..") == 0) {
1313 struct btrfs_key key;
1314 struct extent_buffer *leaf;
1315 u32 nritems;
1316 int slot;
1317
1318 key.objectid = dir->i_ino;
1319 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1320 key.offset = 0;
1321 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1322 BUG_ON(ret == 0);
1323 ret = 0;
1324
1325 leaf = path->nodes[0];
1326 slot = path->slots[0];
1327 nritems = btrfs_header_nritems(leaf);
1328 if (slot >= nritems)
1329 goto out_err;
1330
1331 btrfs_item_key_to_cpu(leaf, &key, slot);
1332 if (key.objectid != dir->i_ino ||
1333 key.type != BTRFS_INODE_REF_KEY) {
1334 goto out_err;
1335 }
1336 location->objectid = key.offset;
1337 location->type = BTRFS_INODE_ITEM_KEY;
1338 location->offset = 0;
1339 goto out;
1340 }
1341
1342 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
1343 namelen, 0);
1344 if (IS_ERR(di))
1345 ret = PTR_ERR(di);
1346 if (!di || IS_ERR(di)) {
1347 goto out_err;
1348 }
1349 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
1350 out:
1351 btrfs_free_path(path);
1352 return ret;
1353 out_err:
1354 location->objectid = 0;
1355 goto out;
1356 }
1357
1358 /*
1359 * when we hit a tree root in a directory, the btrfs part of the inode
1360 * needs to be changed to reflect the root directory of the tree root. This
1361 * is kind of like crossing a mount point.
1362 */
1363 static int fixup_tree_root_location(struct btrfs_root *root,
1364 struct btrfs_key *location,
1365 struct btrfs_root **sub_root,
1366 struct dentry *dentry)
1367 {
1368 struct btrfs_path *path;
1369 struct btrfs_root_item *ri;
1370
1371 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
1372 return 0;
1373 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1374 return 0;
1375
1376 path = btrfs_alloc_path();
1377 BUG_ON(!path);
1378
1379 *sub_root = btrfs_read_fs_root(root->fs_info, location,
1380 dentry->d_name.name,
1381 dentry->d_name.len);
1382 if (IS_ERR(*sub_root))
1383 return PTR_ERR(*sub_root);
1384
1385 ri = &(*sub_root)->root_item;
1386 location->objectid = btrfs_root_dirid(ri);
1387 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1388 location->offset = 0;
1389
1390 btrfs_free_path(path);
1391 return 0;
1392 }
1393
1394 static int btrfs_init_locked_inode(struct inode *inode, void *p)
1395 {
1396 struct btrfs_iget_args *args = p;
1397 inode->i_ino = args->ino;
1398 BTRFS_I(inode)->root = args->root;
1399 BTRFS_I(inode)->delalloc_bytes = 0;
1400 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1401 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1402 inode->i_mapping, GFP_NOFS);
1403 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1404 inode->i_mapping, GFP_NOFS);
1405 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
1406 return 0;
1407 }
1408
1409 static int btrfs_find_actor(struct inode *inode, void *opaque)
1410 {
1411 struct btrfs_iget_args *args = opaque;
1412 return (args->ino == inode->i_ino &&
1413 args->root == BTRFS_I(inode)->root);
1414 }
1415
1416 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
1417 u64 root_objectid)
1418 {
1419 struct btrfs_iget_args args;
1420 args.ino = objectid;
1421 args.root = btrfs_lookup_fs_root(btrfs_sb(s)->fs_info, root_objectid);
1422
1423 if (!args.root)
1424 return NULL;
1425
1426 return ilookup5(s, objectid, btrfs_find_actor, (void *)&args);
1427 }
1428
1429 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
1430 struct btrfs_root *root)
1431 {
1432 struct inode *inode;
1433 struct btrfs_iget_args args;
1434 args.ino = objectid;
1435 args.root = root;
1436
1437 inode = iget5_locked(s, objectid, btrfs_find_actor,
1438 btrfs_init_locked_inode,
1439 (void *)&args);
1440 return inode;
1441 }
1442
1443 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
1444 struct nameidata *nd)
1445 {
1446 struct inode * inode;
1447 struct btrfs_inode *bi = BTRFS_I(dir);
1448 struct btrfs_root *root = bi->root;
1449 struct btrfs_root *sub_root = root;
1450 struct btrfs_key location;
1451 int ret;
1452
1453 if (dentry->d_name.len > BTRFS_NAME_LEN)
1454 return ERR_PTR(-ENAMETOOLONG);
1455
1456 ret = btrfs_inode_by_name(dir, dentry, &location);
1457
1458 if (ret < 0)
1459 return ERR_PTR(ret);
1460
1461 inode = NULL;
1462 if (location.objectid) {
1463 ret = fixup_tree_root_location(root, &location, &sub_root,
1464 dentry);
1465 if (ret < 0)
1466 return ERR_PTR(ret);
1467 if (ret > 0)
1468 return ERR_PTR(-ENOENT);
1469 inode = btrfs_iget_locked(dir->i_sb, location.objectid,
1470 sub_root);
1471 if (!inode)
1472 return ERR_PTR(-EACCES);
1473 if (inode->i_state & I_NEW) {
1474 /* the inode and parent dir are two different roots */
1475 if (sub_root != root) {
1476 igrab(inode);
1477 sub_root->inode = inode;
1478 }
1479 BTRFS_I(inode)->root = sub_root;
1480 memcpy(&BTRFS_I(inode)->location, &location,
1481 sizeof(location));
1482 btrfs_read_locked_inode(inode);
1483 unlock_new_inode(inode);
1484 }
1485 }
1486 return d_splice_alias(inode, dentry);
1487 }
1488
1489 static unsigned char btrfs_filetype_table[] = {
1490 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
1491 };
1492
1493 static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1494 {
1495 struct inode *inode = filp->f_dentry->d_inode;
1496 struct btrfs_root *root = BTRFS_I(inode)->root;
1497 struct btrfs_item *item;
1498 struct btrfs_dir_item *di;
1499 struct btrfs_key key;
1500 struct btrfs_key found_key;
1501 struct btrfs_path *path;
1502 int ret;
1503 u32 nritems;
1504 struct extent_buffer *leaf;
1505 int slot;
1506 int advance;
1507 unsigned char d_type;
1508 int over = 0;
1509 u32 di_cur;
1510 u32 di_total;
1511 u32 di_len;
1512 int key_type = BTRFS_DIR_INDEX_KEY;
1513 char tmp_name[32];
1514 char *name_ptr;
1515 int name_len;
1516
1517 /* FIXME, use a real flag for deciding about the key type */
1518 if (root->fs_info->tree_root == root)
1519 key_type = BTRFS_DIR_ITEM_KEY;
1520
1521 /* special case for "." */
1522 if (filp->f_pos == 0) {
1523 over = filldir(dirent, ".", 1,
1524 1, inode->i_ino,
1525 DT_DIR);
1526 if (over)
1527 return 0;
1528 filp->f_pos = 1;
1529 }
1530
1531 key.objectid = inode->i_ino;
1532 path = btrfs_alloc_path();
1533 path->reada = 2;
1534
1535 /* special case for .., just use the back ref */
1536 if (filp->f_pos == 1) {
1537 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1538 key.offset = 0;
1539 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1540 BUG_ON(ret == 0);
1541 leaf = path->nodes[0];
1542 slot = path->slots[0];
1543 nritems = btrfs_header_nritems(leaf);
1544 if (slot >= nritems) {
1545 btrfs_release_path(root, path);
1546 goto read_dir_items;
1547 }
1548 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1549 btrfs_release_path(root, path);
1550 if (found_key.objectid != key.objectid ||
1551 found_key.type != BTRFS_INODE_REF_KEY)
1552 goto read_dir_items;
1553 over = filldir(dirent, "..", 2,
1554 2, found_key.offset, DT_DIR);
1555 if (over)
1556 goto nopos;
1557 filp->f_pos = 2;
1558 }
1559
1560 read_dir_items:
1561 btrfs_set_key_type(&key, key_type);
1562 key.offset = filp->f_pos;
1563
1564 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1565 if (ret < 0)
1566 goto err;
1567 advance = 0;
1568 while(1) {
1569 leaf = path->nodes[0];
1570 nritems = btrfs_header_nritems(leaf);
1571 slot = path->slots[0];
1572 if (advance || slot >= nritems) {
1573 if (slot >= nritems -1) {
1574 ret = btrfs_next_leaf(root, path);
1575 if (ret)
1576 break;
1577 leaf = path->nodes[0];
1578 nritems = btrfs_header_nritems(leaf);
1579 slot = path->slots[0];
1580 } else {
1581 slot++;
1582 path->slots[0]++;
1583 }
1584 }
1585 advance = 1;
1586 item = btrfs_item_nr(leaf, slot);
1587 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1588
1589 if (found_key.objectid != key.objectid)
1590 break;
1591 if (btrfs_key_type(&found_key) != key_type)
1592 break;
1593 if (found_key.offset < filp->f_pos)
1594 continue;
1595
1596 filp->f_pos = found_key.offset;
1597 advance = 1;
1598 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
1599 di_cur = 0;
1600 di_total = btrfs_item_size(leaf, item);
1601 while(di_cur < di_total) {
1602 struct btrfs_key location;
1603
1604 name_len = btrfs_dir_name_len(leaf, di);
1605 if (name_len < 32) {
1606 name_ptr = tmp_name;
1607 } else {
1608 name_ptr = kmalloc(name_len, GFP_NOFS);
1609 BUG_ON(!name_ptr);
1610 }
1611 read_extent_buffer(leaf, name_ptr,
1612 (unsigned long)(di + 1), name_len);
1613
1614 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
1615 btrfs_dir_item_key_to_cpu(leaf, di, &location);
1616 over = filldir(dirent, name_ptr, name_len,
1617 found_key.offset,
1618 location.objectid,
1619 d_type);
1620
1621 if (name_ptr != tmp_name)
1622 kfree(name_ptr);
1623
1624 if (over)
1625 goto nopos;
1626 di_len = btrfs_dir_name_len(leaf, di) +
1627 btrfs_dir_data_len(leaf, di) +sizeof(*di);
1628 di_cur += di_len;
1629 di = (struct btrfs_dir_item *)((char *)di + di_len);
1630 }
1631 }
1632 if (key_type == BTRFS_DIR_INDEX_KEY)
1633 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
1634 else
1635 filp->f_pos++;
1636 nopos:
1637 ret = 0;
1638 err:
1639 btrfs_free_path(path);
1640 return ret;
1641 }
1642
1643 int btrfs_write_inode(struct inode *inode, int wait)
1644 {
1645 struct btrfs_root *root = BTRFS_I(inode)->root;
1646 struct btrfs_trans_handle *trans;
1647 int ret = 0;
1648
1649 if (wait) {
1650 trans = btrfs_start_transaction(root, 1);
1651 btrfs_set_trans_block_group(trans, inode);
1652 ret = btrfs_commit_transaction(trans, root);
1653 }
1654 return ret;
1655 }
1656
1657 /*
1658 * This is somewhat expensive, updating the tree every time the
1659 * inode changes. But, it is most likely to find the inode in cache.
1660 * FIXME, needs more benchmarking...there are no reasons other than performance
1661 * to keep or drop this code.
1662 */
1663 void btrfs_dirty_inode(struct inode *inode)
1664 {
1665 struct btrfs_root *root = BTRFS_I(inode)->root;
1666 struct btrfs_trans_handle *trans;
1667
1668 trans = btrfs_start_transaction(root, 1);
1669 btrfs_set_trans_block_group(trans, inode);
1670 btrfs_update_inode(trans, root, inode);
1671 btrfs_end_transaction(trans, root);
1672 }
1673
1674 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
1675 struct btrfs_root *root,
1676 const char *name, int name_len,
1677 u64 ref_objectid,
1678 u64 objectid,
1679 struct btrfs_block_group_cache *group,
1680 int mode)
1681 {
1682 struct inode *inode;
1683 struct btrfs_inode_item *inode_item;
1684 struct btrfs_block_group_cache *new_inode_group;
1685 struct btrfs_key *location;
1686 struct btrfs_path *path;
1687 struct btrfs_inode_ref *ref;
1688 struct btrfs_key key[2];
1689 u32 sizes[2];
1690 unsigned long ptr;
1691 int ret;
1692 int owner;
1693
1694 path = btrfs_alloc_path();
1695 BUG_ON(!path);
1696
1697 inode = new_inode(root->fs_info->sb);
1698 if (!inode)
1699 return ERR_PTR(-ENOMEM);
1700
1701 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1702 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1703 inode->i_mapping, GFP_NOFS);
1704 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1705 inode->i_mapping, GFP_NOFS);
1706 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
1707 BTRFS_I(inode)->delalloc_bytes = 0;
1708 BTRFS_I(inode)->root = root;
1709
1710 if (mode & S_IFDIR)
1711 owner = 0;
1712 else
1713 owner = 1;
1714 new_inode_group = btrfs_find_block_group(root, group, 0,
1715 BTRFS_BLOCK_GROUP_METADATA, owner);
1716 if (!new_inode_group) {
1717 printk("find_block group failed\n");
1718 new_inode_group = group;
1719 }
1720 BTRFS_I(inode)->block_group = new_inode_group;
1721 BTRFS_I(inode)->flags = 0;
1722
1723 key[0].objectid = objectid;
1724 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
1725 key[0].offset = 0;
1726
1727 key[1].objectid = objectid;
1728 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
1729 key[1].offset = ref_objectid;
1730
1731 sizes[0] = sizeof(struct btrfs_inode_item);
1732 sizes[1] = name_len + sizeof(*ref);
1733
1734 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
1735 if (ret != 0)
1736 goto fail;
1737
1738 if (objectid > root->highest_inode)
1739 root->highest_inode = objectid;
1740
1741 inode->i_uid = current->fsuid;
1742 inode->i_gid = current->fsgid;
1743 inode->i_mode = mode;
1744 inode->i_ino = objectid;
1745 inode->i_blocks = 0;
1746 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1747 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1748 struct btrfs_inode_item);
1749 fill_inode_item(path->nodes[0], inode_item, inode);
1750
1751 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
1752 struct btrfs_inode_ref);
1753 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
1754 ptr = (unsigned long)(ref + 1);
1755 write_extent_buffer(path->nodes[0], name, ptr, name_len);
1756
1757 btrfs_mark_buffer_dirty(path->nodes[0]);
1758 btrfs_free_path(path);
1759
1760 location = &BTRFS_I(inode)->location;
1761 location->objectid = objectid;
1762 location->offset = 0;
1763 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1764
1765 insert_inode_hash(inode);
1766 return inode;
1767 fail:
1768 btrfs_free_path(path);
1769 return ERR_PTR(ret);
1770 }
1771
1772 static inline u8 btrfs_inode_type(struct inode *inode)
1773 {
1774 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
1775 }
1776
1777 static int btrfs_add_link(struct btrfs_trans_handle *trans,
1778 struct dentry *dentry, struct inode *inode,
1779 int add_backref)
1780 {
1781 int ret;
1782 struct btrfs_key key;
1783 struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
1784 struct inode *parent_inode;
1785
1786 key.objectid = inode->i_ino;
1787 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1788 key.offset = 0;
1789
1790 ret = btrfs_insert_dir_item(trans, root,
1791 dentry->d_name.name, dentry->d_name.len,
1792 dentry->d_parent->d_inode->i_ino,
1793 &key, btrfs_inode_type(inode));
1794 if (ret == 0) {
1795 if (add_backref) {
1796 ret = btrfs_insert_inode_ref(trans, root,
1797 dentry->d_name.name,
1798 dentry->d_name.len,
1799 inode->i_ino,
1800 dentry->d_parent->d_inode->i_ino);
1801 }
1802 parent_inode = dentry->d_parent->d_inode;
1803 parent_inode->i_size += dentry->d_name.len * 2;
1804 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1805 ret = btrfs_update_inode(trans, root,
1806 dentry->d_parent->d_inode);
1807 }
1808 return ret;
1809 }
1810
1811 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
1812 struct dentry *dentry, struct inode *inode,
1813 int backref)
1814 {
1815 int err = btrfs_add_link(trans, dentry, inode, backref);
1816 if (!err) {
1817 d_instantiate(dentry, inode);
1818 return 0;
1819 }
1820 if (err > 0)
1821 err = -EEXIST;
1822 return err;
1823 }
1824
1825 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
1826 int mode, dev_t rdev)
1827 {
1828 struct btrfs_trans_handle *trans;
1829 struct btrfs_root *root = BTRFS_I(dir)->root;
1830 struct inode *inode = NULL;
1831 int err;
1832 int drop_inode = 0;
1833 u64 objectid;
1834 unsigned long nr = 0;
1835
1836 if (!new_valid_dev(rdev))
1837 return -EINVAL;
1838
1839 err = btrfs_check_free_space(root, 1, 0);
1840 if (err)
1841 goto fail;
1842
1843 trans = btrfs_start_transaction(root, 1);
1844 btrfs_set_trans_block_group(trans, dir);
1845
1846 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1847 if (err) {
1848 err = -ENOSPC;
1849 goto out_unlock;
1850 }
1851
1852 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1853 dentry->d_name.len,
1854 dentry->d_parent->d_inode->i_ino, objectid,
1855 BTRFS_I(dir)->block_group, mode);
1856 err = PTR_ERR(inode);
1857 if (IS_ERR(inode))
1858 goto out_unlock;
1859
1860 btrfs_set_trans_block_group(trans, inode);
1861 err = btrfs_add_nondir(trans, dentry, inode, 0);
1862 if (err)
1863 drop_inode = 1;
1864 else {
1865 inode->i_op = &btrfs_special_inode_operations;
1866 init_special_inode(inode, inode->i_mode, rdev);
1867 btrfs_update_inode(trans, root, inode);
1868 }
1869 dir->i_sb->s_dirt = 1;
1870 btrfs_update_inode_block_group(trans, inode);
1871 btrfs_update_inode_block_group(trans, dir);
1872 out_unlock:
1873 nr = trans->blocks_used;
1874 btrfs_end_transaction(trans, root);
1875 fail:
1876 if (drop_inode) {
1877 inode_dec_link_count(inode);
1878 iput(inode);
1879 }
1880 btrfs_btree_balance_dirty(root, nr);
1881 btrfs_throttle(root);
1882 return err;
1883 }
1884
1885 static int btrfs_create(struct inode *dir, struct dentry *dentry,
1886 int mode, struct nameidata *nd)
1887 {
1888 struct btrfs_trans_handle *trans;
1889 struct btrfs_root *root = BTRFS_I(dir)->root;
1890 struct inode *inode = NULL;
1891 int err;
1892 int drop_inode = 0;
1893 unsigned long nr = 0;
1894 u64 objectid;
1895
1896 err = btrfs_check_free_space(root, 1, 0);
1897 if (err)
1898 goto fail;
1899 trans = btrfs_start_transaction(root, 1);
1900 btrfs_set_trans_block_group(trans, dir);
1901
1902 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1903 if (err) {
1904 err = -ENOSPC;
1905 goto out_unlock;
1906 }
1907
1908 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1909 dentry->d_name.len,
1910 dentry->d_parent->d_inode->i_ino,
1911 objectid, BTRFS_I(dir)->block_group, mode);
1912 err = PTR_ERR(inode);
1913 if (IS_ERR(inode))
1914 goto out_unlock;
1915
1916 btrfs_set_trans_block_group(trans, inode);
1917 err = btrfs_add_nondir(trans, dentry, inode, 0);
1918 if (err)
1919 drop_inode = 1;
1920 else {
1921 inode->i_mapping->a_ops = &btrfs_aops;
1922 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1923 inode->i_fop = &btrfs_file_operations;
1924 inode->i_op = &btrfs_file_inode_operations;
1925 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1926 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1927 inode->i_mapping, GFP_NOFS);
1928 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1929 inode->i_mapping, GFP_NOFS);
1930 BTRFS_I(inode)->delalloc_bytes = 0;
1931 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
1932 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1933 }
1934 dir->i_sb->s_dirt = 1;
1935 btrfs_update_inode_block_group(trans, inode);
1936 btrfs_update_inode_block_group(trans, dir);
1937 out_unlock:
1938 nr = trans->blocks_used;
1939 btrfs_end_transaction(trans, root);
1940 fail:
1941 if (drop_inode) {
1942 inode_dec_link_count(inode);
1943 iput(inode);
1944 }
1945 btrfs_btree_balance_dirty(root, nr);
1946 btrfs_throttle(root);
1947 return err;
1948 }
1949
1950 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
1951 struct dentry *dentry)
1952 {
1953 struct btrfs_trans_handle *trans;
1954 struct btrfs_root *root = BTRFS_I(dir)->root;
1955 struct inode *inode = old_dentry->d_inode;
1956 unsigned long nr = 0;
1957 int err;
1958 int drop_inode = 0;
1959
1960 if (inode->i_nlink == 0)
1961 return -ENOENT;
1962
1963 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1964 inode->i_nlink++;
1965 #else
1966 inc_nlink(inode);
1967 #endif
1968 err = btrfs_check_free_space(root, 1, 0);
1969 if (err)
1970 goto fail;
1971 trans = btrfs_start_transaction(root, 1);
1972
1973 btrfs_set_trans_block_group(trans, dir);
1974 atomic_inc(&inode->i_count);
1975 err = btrfs_add_nondir(trans, dentry, inode, 1);
1976
1977 if (err)
1978 drop_inode = 1;
1979
1980 dir->i_sb->s_dirt = 1;
1981 btrfs_update_inode_block_group(trans, dir);
1982 err = btrfs_update_inode(trans, root, inode);
1983
1984 if (err)
1985 drop_inode = 1;
1986
1987 nr = trans->blocks_used;
1988 btrfs_end_transaction(trans, root);
1989 fail:
1990 if (drop_inode) {
1991 inode_dec_link_count(inode);
1992 iput(inode);
1993 }
1994 btrfs_btree_balance_dirty(root, nr);
1995 btrfs_throttle(root);
1996 return err;
1997 }
1998
1999 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2000 {
2001 struct inode *inode = NULL;
2002 struct btrfs_trans_handle *trans;
2003 struct btrfs_root *root = BTRFS_I(dir)->root;
2004 int err = 0;
2005 int drop_on_err = 0;
2006 u64 objectid = 0;
2007 unsigned long nr = 1;
2008
2009 err = btrfs_check_free_space(root, 1, 0);
2010 if (err)
2011 goto out_unlock;
2012
2013 trans = btrfs_start_transaction(root, 1);
2014 btrfs_set_trans_block_group(trans, dir);
2015
2016 if (IS_ERR(trans)) {
2017 err = PTR_ERR(trans);
2018 goto out_unlock;
2019 }
2020
2021 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
2022 if (err) {
2023 err = -ENOSPC;
2024 goto out_unlock;
2025 }
2026
2027 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
2028 dentry->d_name.len,
2029 dentry->d_parent->d_inode->i_ino, objectid,
2030 BTRFS_I(dir)->block_group, S_IFDIR | mode);
2031 if (IS_ERR(inode)) {
2032 err = PTR_ERR(inode);
2033 goto out_fail;
2034 }
2035
2036 drop_on_err = 1;
2037 inode->i_op = &btrfs_dir_inode_operations;
2038 inode->i_fop = &btrfs_dir_file_operations;
2039 btrfs_set_trans_block_group(trans, inode);
2040
2041 inode->i_size = 0;
2042 err = btrfs_update_inode(trans, root, inode);
2043 if (err)
2044 goto out_fail;
2045
2046 err = btrfs_add_link(trans, dentry, inode, 0);
2047 if (err)
2048 goto out_fail;
2049
2050 d_instantiate(dentry, inode);
2051 drop_on_err = 0;
2052 dir->i_sb->s_dirt = 1;
2053 btrfs_update_inode_block_group(trans, inode);
2054 btrfs_update_inode_block_group(trans, dir);
2055
2056 out_fail:
2057 nr = trans->blocks_used;
2058 btrfs_end_transaction(trans, root);
2059
2060 out_unlock:
2061 if (drop_on_err)
2062 iput(inode);
2063 btrfs_btree_balance_dirty(root, nr);
2064 btrfs_throttle(root);
2065 return err;
2066 }
2067
2068 static int merge_extent_mapping(struct extent_map_tree *em_tree,
2069 struct extent_map *existing,
2070 struct extent_map *em)
2071 {
2072 u64 start_diff;
2073 u64 new_end;
2074 int ret = 0;
2075 int real_blocks = existing->block_start < EXTENT_MAP_LAST_BYTE;
2076
2077 if (real_blocks && em->block_start >= EXTENT_MAP_LAST_BYTE)
2078 goto invalid;
2079
2080 if (!real_blocks && em->block_start != existing->block_start)
2081 goto invalid;
2082
2083 new_end = max(existing->start + existing->len, em->start + em->len);
2084
2085 if (existing->start >= em->start) {
2086 if (em->start + em->len < existing->start)
2087 goto invalid;
2088
2089 start_diff = existing->start - em->start;
2090 if (real_blocks && em->block_start + start_diff !=
2091 existing->block_start)
2092 goto invalid;
2093
2094 em->len = new_end - em->start;
2095
2096 remove_extent_mapping(em_tree, existing);
2097 /* free for the tree */
2098 free_extent_map(existing);
2099 ret = add_extent_mapping(em_tree, em);
2100
2101 } else if (em->start > existing->start) {
2102
2103 if (existing->start + existing->len < em->start)
2104 goto invalid;
2105
2106 start_diff = em->start - existing->start;
2107 if (real_blocks && existing->block_start + start_diff !=
2108 em->block_start)
2109 goto invalid;
2110
2111 remove_extent_mapping(em_tree, existing);
2112 em->block_start = existing->block_start;
2113 em->start = existing->start;
2114 em->len = new_end - existing->start;
2115 free_extent_map(existing);
2116
2117 ret = add_extent_mapping(em_tree, em);
2118 } else {
2119 goto invalid;
2120 }
2121 return ret;
2122
2123 invalid:
2124 printk("invalid extent map merge [%Lu %Lu %Lu] [%Lu %Lu %Lu]\n",
2125 existing->start, existing->len, existing->block_start,
2126 em->start, em->len, em->block_start);
2127 return -EIO;
2128 }
2129
2130 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
2131 size_t pg_offset, u64 start, u64 len,
2132 int create)
2133 {
2134 int ret;
2135 int err = 0;
2136 u64 bytenr;
2137 u64 extent_start = 0;
2138 u64 extent_end = 0;
2139 u64 objectid = inode->i_ino;
2140 u32 found_type;
2141 struct btrfs_path *path;
2142 struct btrfs_root *root = BTRFS_I(inode)->root;
2143 struct btrfs_file_extent_item *item;
2144 struct extent_buffer *leaf;
2145 struct btrfs_key found_key;
2146 struct extent_map *em = NULL;
2147 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2148 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2149 struct btrfs_trans_handle *trans = NULL;
2150
2151 path = btrfs_alloc_path();
2152 BUG_ON(!path);
2153
2154 again:
2155 spin_lock(&em_tree->lock);
2156 em = lookup_extent_mapping(em_tree, start, len);
2157 if (em)
2158 em->bdev = root->fs_info->fs_devices->latest_bdev;
2159 spin_unlock(&em_tree->lock);
2160
2161 if (em) {
2162 if (em->start > start || em->start + em->len <= start)
2163 free_extent_map(em);
2164 else if (em->block_start == EXTENT_MAP_INLINE && page)
2165 free_extent_map(em);
2166 else
2167 goto out;
2168 }
2169 em = alloc_extent_map(GFP_NOFS);
2170 if (!em) {
2171 err = -ENOMEM;
2172 goto out;
2173 }
2174
2175 em->start = EXTENT_MAP_HOLE;
2176 em->len = (u64)-1;
2177 em->bdev = root->fs_info->fs_devices->latest_bdev;
2178 ret = btrfs_lookup_file_extent(trans, root, path,
2179 objectid, start, trans != NULL);
2180 if (ret < 0) {
2181 err = ret;
2182 goto out;
2183 }
2184
2185 if (ret != 0) {
2186 if (path->slots[0] == 0)
2187 goto not_found;
2188 path->slots[0]--;
2189 }
2190
2191 leaf = path->nodes[0];
2192 item = btrfs_item_ptr(leaf, path->slots[0],
2193 struct btrfs_file_extent_item);
2194 /* are we inside the extent that was found? */
2195 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2196 found_type = btrfs_key_type(&found_key);
2197 if (found_key.objectid != objectid ||
2198 found_type != BTRFS_EXTENT_DATA_KEY) {
2199 goto not_found;
2200 }
2201
2202 found_type = btrfs_file_extent_type(leaf, item);
2203 extent_start = found_key.offset;
2204 if (found_type == BTRFS_FILE_EXTENT_REG) {
2205 extent_end = extent_start +
2206 btrfs_file_extent_num_bytes(leaf, item);
2207 err = 0;
2208 if (start < extent_start || start >= extent_end) {
2209 em->start = start;
2210 if (start < extent_start) {
2211 if (start + len <= extent_start)
2212 goto not_found;
2213 em->len = extent_end - extent_start;
2214 } else {
2215 em->len = len;
2216 }
2217 goto not_found_em;
2218 }
2219 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
2220 if (bytenr == 0) {
2221 em->start = extent_start;
2222 em->len = extent_end - extent_start;
2223 em->block_start = EXTENT_MAP_HOLE;
2224 goto insert;
2225 }
2226 bytenr += btrfs_file_extent_offset(leaf, item);
2227 em->block_start = bytenr;
2228 em->start = extent_start;
2229 em->len = extent_end - extent_start;
2230 goto insert;
2231 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
2232 u64 page_start;
2233 unsigned long ptr;
2234 char *map;
2235 size_t size;
2236 size_t extent_offset;
2237 size_t copy_size;
2238
2239 size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf,
2240 path->slots[0]));
2241 extent_end = (extent_start + size + root->sectorsize - 1) &
2242 ~((u64)root->sectorsize - 1);
2243 if (start < extent_start || start >= extent_end) {
2244 em->start = start;
2245 if (start < extent_start) {
2246 if (start + len <= extent_start)
2247 goto not_found;
2248 em->len = extent_end - extent_start;
2249 } else {
2250 em->len = len;
2251 }
2252 goto not_found_em;
2253 }
2254 em->block_start = EXTENT_MAP_INLINE;
2255
2256 if (!page) {
2257 em->start = extent_start;
2258 em->len = size;
2259 goto out;
2260 }
2261
2262 page_start = page_offset(page) + pg_offset;
2263 extent_offset = page_start - extent_start;
2264 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
2265 size - extent_offset);
2266 em->start = extent_start + extent_offset;
2267 em->len = (copy_size + root->sectorsize - 1) &
2268 ~((u64)root->sectorsize - 1);
2269 map = kmap(page);
2270 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
2271 if (create == 0 && !PageUptodate(page)) {
2272 read_extent_buffer(leaf, map + pg_offset, ptr,
2273 copy_size);
2274 flush_dcache_page(page);
2275 } else if (create && PageUptodate(page)) {
2276 if (!trans) {
2277 kunmap(page);
2278 free_extent_map(em);
2279 em = NULL;
2280 btrfs_release_path(root, path);
2281 trans = btrfs_start_transaction(root, 1);
2282 goto again;
2283 }
2284 write_extent_buffer(leaf, map + pg_offset, ptr,
2285 copy_size);
2286 btrfs_mark_buffer_dirty(leaf);
2287 }
2288 kunmap(page);
2289 set_extent_uptodate(io_tree, em->start,
2290 extent_map_end(em) - 1, GFP_NOFS);
2291 goto insert;
2292 } else {
2293 printk("unkknown found_type %d\n", found_type);
2294 WARN_ON(1);
2295 }
2296 not_found:
2297 em->start = start;
2298 em->len = len;
2299 not_found_em:
2300 em->block_start = EXTENT_MAP_HOLE;
2301 insert:
2302 btrfs_release_path(root, path);
2303 if (em->start > start || extent_map_end(em) <= start) {
2304 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
2305 err = -EIO;
2306 goto out;
2307 }
2308
2309 err = 0;
2310 spin_lock(&em_tree->lock);
2311 ret = add_extent_mapping(em_tree, em);
2312 /* it is possible that someone inserted the extent into the tree
2313 * while we had the lock dropped. It is also possible that
2314 * an overlapping map exists in the tree
2315 */
2316 if (ret == -EEXIST) {
2317 struct extent_map *existing;
2318 existing = lookup_extent_mapping(em_tree, start, len);
2319 if (existing && (existing->start > start ||
2320 existing->start + existing->len <= start)) {
2321 free_extent_map(existing);
2322 existing = NULL;
2323 }
2324 if (!existing) {
2325 existing = lookup_extent_mapping(em_tree, em->start,
2326 em->len);
2327 if (existing) {
2328 err = merge_extent_mapping(em_tree, existing,
2329 em);
2330 free_extent_map(existing);
2331 if (err) {
2332 free_extent_map(em);
2333 em = NULL;
2334 }
2335 } else {
2336 err = -EIO;
2337 printk("failing to insert %Lu %Lu\n",
2338 start, len);
2339 free_extent_map(em);
2340 em = NULL;
2341 }
2342 } else {
2343 free_extent_map(em);
2344 em = existing;
2345 }
2346 }
2347 spin_unlock(&em_tree->lock);
2348 out:
2349 btrfs_free_path(path);
2350 if (trans) {
2351 ret = btrfs_end_transaction(trans, root);
2352 if (!err)
2353 err = ret;
2354 }
2355 if (err) {
2356 free_extent_map(em);
2357 WARN_ON(1);
2358 return ERR_PTR(err);
2359 }
2360 return em;
2361 }
2362
2363 #if 0 /* waiting for O_DIRECT reads */
2364 static int btrfs_get_block(struct inode *inode, sector_t iblock,
2365 struct buffer_head *bh_result, int create)
2366 {
2367 struct extent_map *em;
2368 u64 start = (u64)iblock << inode->i_blkbits;
2369 struct btrfs_multi_bio *multi = NULL;
2370 struct btrfs_root *root = BTRFS_I(inode)->root;
2371 u64 len;
2372 u64 logical;
2373 u64 map_length;
2374 int ret = 0;
2375
2376 em = btrfs_get_extent(inode, NULL, 0, start, bh_result->b_size, 0);
2377
2378 if (!em || IS_ERR(em))
2379 goto out;
2380
2381 if (em->start > start || em->start + em->len <= start) {
2382 goto out;
2383 }
2384
2385 if (em->block_start == EXTENT_MAP_INLINE) {
2386 ret = -EINVAL;
2387 goto out;
2388 }
2389
2390 len = em->start + em->len - start;
2391 len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size)));
2392
2393 if (em->block_start == EXTENT_MAP_HOLE ||
2394 em->block_start == EXTENT_MAP_DELALLOC) {
2395 bh_result->b_size = len;
2396 goto out;
2397 }
2398
2399 logical = start - em->start;
2400 logical = em->block_start + logical;
2401
2402 map_length = len;
2403 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
2404 logical, &map_length, &multi, 0);
2405 BUG_ON(ret);
2406 bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits;
2407 bh_result->b_size = min(map_length, len);
2408
2409 bh_result->b_bdev = multi->stripes[0].dev->bdev;
2410 set_buffer_mapped(bh_result);
2411 kfree(multi);
2412 out:
2413 free_extent_map(em);
2414 return ret;
2415 }
2416 #endif
2417
2418 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
2419 const struct iovec *iov, loff_t offset,
2420 unsigned long nr_segs)
2421 {
2422 return -EINVAL;
2423 #if 0
2424 struct file *file = iocb->ki_filp;
2425 struct inode *inode = file->f_mapping->host;
2426
2427 if (rw == WRITE)
2428 return -EINVAL;
2429
2430 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
2431 offset, nr_segs, btrfs_get_block, NULL);
2432 #endif
2433 }
2434
2435 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
2436 {
2437 return extent_bmap(mapping, iblock, btrfs_get_extent);
2438 }
2439
2440 int btrfs_readpage(struct file *file, struct page *page)
2441 {
2442 struct extent_io_tree *tree;
2443 tree = &BTRFS_I(page->mapping->host)->io_tree;
2444 return extent_read_full_page(tree, page, btrfs_get_extent);
2445 }
2446
2447 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
2448 {
2449 struct extent_io_tree *tree;
2450
2451
2452 if (current->flags & PF_MEMALLOC) {
2453 redirty_page_for_writepage(wbc, page);
2454 unlock_page(page);
2455 return 0;
2456 }
2457 tree = &BTRFS_I(page->mapping->host)->io_tree;
2458 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
2459 }
2460
2461 static int btrfs_writepages(struct address_space *mapping,
2462 struct writeback_control *wbc)
2463 {
2464 struct extent_io_tree *tree;
2465 tree = &BTRFS_I(mapping->host)->io_tree;
2466 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
2467 }
2468
2469 static int
2470 btrfs_readpages(struct file *file, struct address_space *mapping,
2471 struct list_head *pages, unsigned nr_pages)
2472 {
2473 struct extent_io_tree *tree;
2474 tree = &BTRFS_I(mapping->host)->io_tree;
2475 return extent_readpages(tree, mapping, pages, nr_pages,
2476 btrfs_get_extent);
2477 }
2478
2479 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
2480 {
2481 struct extent_io_tree *tree;
2482 struct extent_map_tree *map;
2483 int ret;
2484
2485 tree = &BTRFS_I(page->mapping->host)->io_tree;
2486 map = &BTRFS_I(page->mapping->host)->extent_tree;
2487 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
2488 if (ret == 1) {
2489 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
2490 ClearPagePrivate(page);
2491 set_page_private(page, 0);
2492 page_cache_release(page);
2493 }
2494 return ret;
2495 }
2496
2497 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
2498 {
2499 struct extent_io_tree *tree;
2500
2501 tree = &BTRFS_I(page->mapping->host)->io_tree;
2502 extent_invalidatepage(tree, page, offset);
2503 btrfs_releasepage(page, GFP_NOFS);
2504 if (PagePrivate(page)) {
2505 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
2506 ClearPagePrivate(page);
2507 set_page_private(page, 0);
2508 page_cache_release(page);
2509 }
2510 }
2511
2512 /*
2513 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
2514 * called from a page fault handler when a page is first dirtied. Hence we must
2515 * be careful to check for EOF conditions here. We set the page up correctly
2516 * for a written page which means we get ENOSPC checking when writing into
2517 * holes and correct delalloc and unwritten extent mapping on filesystems that
2518 * support these features.
2519 *
2520 * We are not allowed to take the i_mutex here so we have to play games to
2521 * protect against truncate races as the page could now be beyond EOF. Because
2522 * vmtruncate() writes the inode size before removing pages, once we have the
2523 * page lock we can determine safely if the page is beyond EOF. If it is not
2524 * beyond EOF, then the page is guaranteed safe against truncation until we
2525 * unlock the page.
2526 */
2527 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
2528 {
2529 struct inode *inode = fdentry(vma->vm_file)->d_inode;
2530 struct btrfs_root *root = BTRFS_I(inode)->root;
2531 unsigned long end;
2532 loff_t size;
2533 int ret;
2534 u64 page_start;
2535
2536 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
2537 if (ret)
2538 goto out;
2539
2540 ret = -EINVAL;
2541
2542 lock_page(page);
2543 wait_on_page_writeback(page);
2544 size = i_size_read(inode);
2545 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2546
2547 if ((page->mapping != inode->i_mapping) ||
2548 (page_start > size)) {
2549 /* page got truncated out from underneath us */
2550 goto out_unlock;
2551 }
2552
2553 /* page is wholly or partially inside EOF */
2554 if (page_start + PAGE_CACHE_SIZE > size)
2555 end = size & ~PAGE_CACHE_MASK;
2556 else
2557 end = PAGE_CACHE_SIZE;
2558
2559 ret = btrfs_cow_one_page(inode, page, end);
2560
2561 out_unlock:
2562 unlock_page(page);
2563 out:
2564 return ret;
2565 }
2566
2567 static void btrfs_truncate(struct inode *inode)
2568 {
2569 struct btrfs_root *root = BTRFS_I(inode)->root;
2570 int ret;
2571 struct btrfs_trans_handle *trans;
2572 unsigned long nr;
2573
2574 if (!S_ISREG(inode->i_mode))
2575 return;
2576 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2577 return;
2578
2579 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2580
2581 trans = btrfs_start_transaction(root, 1);
2582 btrfs_set_trans_block_group(trans, inode);
2583
2584 /* FIXME, add redo link to tree so we don't leak on crash */
2585 ret = btrfs_truncate_in_trans(trans, root, inode,
2586 BTRFS_EXTENT_DATA_KEY);
2587 btrfs_update_inode(trans, root, inode);
2588 nr = trans->blocks_used;
2589
2590 ret = btrfs_end_transaction(trans, root);
2591 BUG_ON(ret);
2592 btrfs_btree_balance_dirty(root, nr);
2593 btrfs_throttle(root);
2594 }
2595
2596 /*
2597 * Invalidate a single dcache entry at the root of the filesystem.
2598 * Needed after creation of snapshot or subvolume.
2599 */
2600 void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name,
2601 int namelen)
2602 {
2603 struct dentry *alias, *entry;
2604 struct qstr qstr;
2605
2606 alias = d_find_alias(root->fs_info->sb->s_root->d_inode);
2607 if (alias) {
2608 qstr.name = name;
2609 qstr.len = namelen;
2610 /* change me if btrfs ever gets a d_hash operation */
2611 qstr.hash = full_name_hash(qstr.name, qstr.len);
2612 entry = d_lookup(alias, &qstr);
2613 dput(alias);
2614 if (entry) {
2615 d_invalidate(entry);
2616 dput(entry);
2617 }
2618 }
2619 }
2620
2621 int btrfs_create_subvol_root(struct btrfs_root *new_root,
2622 struct btrfs_trans_handle *trans, u64 new_dirid,
2623 struct btrfs_block_group_cache *block_group)
2624 {
2625 struct inode *inode;
2626 int ret;
2627
2628 inode = btrfs_new_inode(trans, new_root, "..", 2, new_dirid,
2629 new_dirid, block_group, S_IFDIR | 0700);
2630 if (IS_ERR(inode))
2631 return PTR_ERR(inode);
2632 inode->i_op = &btrfs_dir_inode_operations;
2633 inode->i_fop = &btrfs_dir_file_operations;
2634 new_root->inode = inode;
2635
2636 ret = btrfs_insert_inode_ref(trans, new_root, "..", 2, new_dirid,
2637 new_dirid);
2638 inode->i_nlink = 1;
2639 inode->i_size = 0;
2640
2641 return btrfs_update_inode(trans, new_root, inode);
2642 }
2643
2644 unsigned long btrfs_force_ra(struct address_space *mapping,
2645 struct file_ra_state *ra, struct file *file,
2646 pgoff_t offset, pgoff_t last_index)
2647 {
2648 pgoff_t req_size = last_index - offset + 1;
2649
2650 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2651 offset = page_cache_readahead(mapping, ra, file, offset, req_size);
2652 return offset;
2653 #else
2654 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
2655 return offset + req_size;
2656 #endif
2657 }
2658
2659 struct inode *btrfs_alloc_inode(struct super_block *sb)
2660 {
2661 struct btrfs_inode *ei;
2662
2663 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
2664 if (!ei)
2665 return NULL;
2666 ei->last_trans = 0;
2667 ei->ordered_trans = 0;
2668 return &ei->vfs_inode;
2669 }
2670
2671 void btrfs_destroy_inode(struct inode *inode)
2672 {
2673 WARN_ON(!list_empty(&inode->i_dentry));
2674 WARN_ON(inode->i_data.nrpages);
2675
2676 btrfs_drop_extent_cache(inode, 0, (u64)-1);
2677 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
2678 }
2679
2680 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2681 static void init_once(struct kmem_cache * cachep, void *foo)
2682 #else
2683 static void init_once(void * foo, struct kmem_cache * cachep,
2684 unsigned long flags)
2685 #endif
2686 {
2687 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
2688
2689 inode_init_once(&ei->vfs_inode);
2690 }
2691
2692 void btrfs_destroy_cachep(void)
2693 {
2694 if (btrfs_inode_cachep)
2695 kmem_cache_destroy(btrfs_inode_cachep);
2696 if (btrfs_trans_handle_cachep)
2697 kmem_cache_destroy(btrfs_trans_handle_cachep);
2698 if (btrfs_transaction_cachep)
2699 kmem_cache_destroy(btrfs_transaction_cachep);
2700 if (btrfs_bit_radix_cachep)
2701 kmem_cache_destroy(btrfs_bit_radix_cachep);
2702 if (btrfs_path_cachep)
2703 kmem_cache_destroy(btrfs_path_cachep);
2704 }
2705
2706 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
2707 unsigned long extra_flags,
2708 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2709 void (*ctor)(struct kmem_cache *, void *)
2710 #else
2711 void (*ctor)(void *, struct kmem_cache *,
2712 unsigned long)
2713 #endif
2714 )
2715 {
2716 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
2717 SLAB_MEM_SPREAD | extra_flags), ctor
2718 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2719 ,NULL
2720 #endif
2721 );
2722 }
2723
2724 int btrfs_init_cachep(void)
2725 {
2726 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
2727 sizeof(struct btrfs_inode),
2728 0, init_once);
2729 if (!btrfs_inode_cachep)
2730 goto fail;
2731 btrfs_trans_handle_cachep =
2732 btrfs_cache_create("btrfs_trans_handle_cache",
2733 sizeof(struct btrfs_trans_handle),
2734 0, NULL);
2735 if (!btrfs_trans_handle_cachep)
2736 goto fail;
2737 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
2738 sizeof(struct btrfs_transaction),
2739 0, NULL);
2740 if (!btrfs_transaction_cachep)
2741 goto fail;
2742 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
2743 sizeof(struct btrfs_path),
2744 0, NULL);
2745 if (!btrfs_path_cachep)
2746 goto fail;
2747 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
2748 SLAB_DESTROY_BY_RCU, NULL);
2749 if (!btrfs_bit_radix_cachep)
2750 goto fail;
2751 return 0;
2752 fail:
2753 btrfs_destroy_cachep();
2754 return -ENOMEM;
2755 }
2756
2757 static int btrfs_getattr(struct vfsmount *mnt,
2758 struct dentry *dentry, struct kstat *stat)
2759 {
2760 struct inode *inode = dentry->d_inode;
2761 generic_fillattr(inode, stat);
2762 stat->blksize = PAGE_CACHE_SIZE;
2763 stat->blocks = inode->i_blocks + (BTRFS_I(inode)->delalloc_bytes >> 9);
2764 return 0;
2765 }
2766
2767 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
2768 struct inode * new_dir,struct dentry *new_dentry)
2769 {
2770 struct btrfs_trans_handle *trans;
2771 struct btrfs_root *root = BTRFS_I(old_dir)->root;
2772 struct inode *new_inode = new_dentry->d_inode;
2773 struct inode *old_inode = old_dentry->d_inode;
2774 struct timespec ctime = CURRENT_TIME;
2775 int ret;
2776
2777 if (S_ISDIR(old_inode->i_mode) && new_inode &&
2778 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
2779 return -ENOTEMPTY;
2780 }
2781
2782 ret = btrfs_check_free_space(root, 1, 0);
2783 if (ret)
2784 goto out_unlock;
2785
2786 trans = btrfs_start_transaction(root, 1);
2787
2788 btrfs_set_trans_block_group(trans, new_dir);
2789
2790 old_dentry->d_inode->i_nlink++;
2791 old_dir->i_ctime = old_dir->i_mtime = ctime;
2792 new_dir->i_ctime = new_dir->i_mtime = ctime;
2793 old_inode->i_ctime = ctime;
2794
2795 ret = btrfs_unlink_trans(trans, root, old_dir, old_dentry);
2796 if (ret)
2797 goto out_fail;
2798
2799 if (new_inode) {
2800 new_inode->i_ctime = CURRENT_TIME;
2801 ret = btrfs_unlink_trans(trans, root, new_dir, new_dentry);
2802 if (ret)
2803 goto out_fail;
2804 }
2805 ret = btrfs_add_link(trans, new_dentry, old_inode, 1);
2806 if (ret)
2807 goto out_fail;
2808
2809 out_fail:
2810 btrfs_end_transaction(trans, root);
2811 out_unlock:
2812 return ret;
2813 }
2814
2815 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
2816 const char *symname)
2817 {
2818 struct btrfs_trans_handle *trans;
2819 struct btrfs_root *root = BTRFS_I(dir)->root;
2820 struct btrfs_path *path;
2821 struct btrfs_key key;
2822 struct inode *inode = NULL;
2823 int err;
2824 int drop_inode = 0;
2825 u64 objectid;
2826 int name_len;
2827 int datasize;
2828 unsigned long ptr;
2829 struct btrfs_file_extent_item *ei;
2830 struct extent_buffer *leaf;
2831 unsigned long nr = 0;
2832
2833 name_len = strlen(symname) + 1;
2834 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
2835 return -ENAMETOOLONG;
2836
2837 err = btrfs_check_free_space(root, 1, 0);
2838 if (err)
2839 goto out_fail;
2840
2841 trans = btrfs_start_transaction(root, 1);
2842 btrfs_set_trans_block_group(trans, dir);
2843
2844 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
2845 if (err) {
2846 err = -ENOSPC;
2847 goto out_unlock;
2848 }
2849
2850 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
2851 dentry->d_name.len,
2852 dentry->d_parent->d_inode->i_ino, objectid,
2853 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO);
2854 err = PTR_ERR(inode);
2855 if (IS_ERR(inode))
2856 goto out_unlock;
2857
2858 btrfs_set_trans_block_group(trans, inode);
2859 err = btrfs_add_nondir(trans, dentry, inode, 0);
2860 if (err)
2861 drop_inode = 1;
2862 else {
2863 inode->i_mapping->a_ops = &btrfs_aops;
2864 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2865 inode->i_fop = &btrfs_file_operations;
2866 inode->i_op = &btrfs_file_inode_operations;
2867 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2868 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2869 inode->i_mapping, GFP_NOFS);
2870 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
2871 inode->i_mapping, GFP_NOFS);
2872 BTRFS_I(inode)->delalloc_bytes = 0;
2873 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
2874 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2875 }
2876 dir->i_sb->s_dirt = 1;
2877 btrfs_update_inode_block_group(trans, inode);
2878 btrfs_update_inode_block_group(trans, dir);
2879 if (drop_inode)
2880 goto out_unlock;
2881
2882 path = btrfs_alloc_path();
2883 BUG_ON(!path);
2884 key.objectid = inode->i_ino;
2885 key.offset = 0;
2886 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
2887 datasize = btrfs_file_extent_calc_inline_size(name_len);
2888 err = btrfs_insert_empty_item(trans, root, path, &key,
2889 datasize);
2890 if (err) {
2891 drop_inode = 1;
2892 goto out_unlock;
2893 }
2894 leaf = path->nodes[0];
2895 ei = btrfs_item_ptr(leaf, path->slots[0],
2896 struct btrfs_file_extent_item);
2897 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
2898 btrfs_set_file_extent_type(leaf, ei,
2899 BTRFS_FILE_EXTENT_INLINE);
2900 ptr = btrfs_file_extent_inline_start(ei);
2901 write_extent_buffer(leaf, symname, ptr, name_len);
2902 btrfs_mark_buffer_dirty(leaf);
2903 btrfs_free_path(path);
2904
2905 inode->i_op = &btrfs_symlink_inode_operations;
2906 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2907 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2908 inode->i_size = name_len - 1;
2909 err = btrfs_update_inode(trans, root, inode);
2910 if (err)
2911 drop_inode = 1;
2912
2913 out_unlock:
2914 nr = trans->blocks_used;
2915 btrfs_end_transaction(trans, root);
2916 out_fail:
2917 if (drop_inode) {
2918 inode_dec_link_count(inode);
2919 iput(inode);
2920 }
2921 btrfs_btree_balance_dirty(root, nr);
2922 btrfs_throttle(root);
2923 return err;
2924 }
2925
2926 static int btrfs_permission(struct inode *inode, int mask,
2927 struct nameidata *nd)
2928 {
2929 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
2930 return -EACCES;
2931 return generic_permission(inode, mask, NULL);
2932 }
2933
2934 static struct inode_operations btrfs_dir_inode_operations = {
2935 .lookup = btrfs_lookup,
2936 .create = btrfs_create,
2937 .unlink = btrfs_unlink,
2938 .link = btrfs_link,
2939 .mkdir = btrfs_mkdir,
2940 .rmdir = btrfs_rmdir,
2941 .rename = btrfs_rename,
2942 .symlink = btrfs_symlink,
2943 .setattr = btrfs_setattr,
2944 .mknod = btrfs_mknod,
2945 .setxattr = generic_setxattr,
2946 .getxattr = generic_getxattr,
2947 .listxattr = btrfs_listxattr,
2948 .removexattr = generic_removexattr,
2949 .permission = btrfs_permission,
2950 };
2951 static struct inode_operations btrfs_dir_ro_inode_operations = {
2952 .lookup = btrfs_lookup,
2953 .permission = btrfs_permission,
2954 };
2955 static struct file_operations btrfs_dir_file_operations = {
2956 .llseek = generic_file_llseek,
2957 .read = generic_read_dir,
2958 .readdir = btrfs_readdir,
2959 .unlocked_ioctl = btrfs_ioctl,
2960 #ifdef CONFIG_COMPAT
2961 .compat_ioctl = btrfs_ioctl,
2962 #endif
2963 .release = btrfs_release_file,
2964 };
2965
2966 static struct extent_io_ops btrfs_extent_io_ops = {
2967 .fill_delalloc = run_delalloc_range,
2968 .submit_bio_hook = btrfs_submit_bio_hook,
2969 .merge_bio_hook = btrfs_merge_bio_hook,
2970 .readpage_io_hook = btrfs_readpage_io_hook,
2971 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
2972 .readpage_io_failed_hook = btrfs_io_failed_hook,
2973 .set_bit_hook = btrfs_set_bit_hook,
2974 .clear_bit_hook = btrfs_clear_bit_hook,
2975 };
2976
2977 static struct address_space_operations btrfs_aops = {
2978 .readpage = btrfs_readpage,
2979 .writepage = btrfs_writepage,
2980 .writepages = btrfs_writepages,
2981 .readpages = btrfs_readpages,
2982 .sync_page = block_sync_page,
2983 .bmap = btrfs_bmap,
2984 .direct_IO = btrfs_direct_IO,
2985 .invalidatepage = btrfs_invalidatepage,
2986 .releasepage = btrfs_releasepage,
2987 .set_page_dirty = __set_page_dirty_nobuffers,
2988 };
2989
2990 static struct address_space_operations btrfs_symlink_aops = {
2991 .readpage = btrfs_readpage,
2992 .writepage = btrfs_writepage,
2993 .invalidatepage = btrfs_invalidatepage,
2994 .releasepage = btrfs_releasepage,
2995 };
2996
2997 static struct inode_operations btrfs_file_inode_operations = {
2998 .truncate = btrfs_truncate,
2999 .getattr = btrfs_getattr,
3000 .setattr = btrfs_setattr,
3001 .setxattr = generic_setxattr,
3002 .getxattr = generic_getxattr,
3003 .listxattr = btrfs_listxattr,
3004 .removexattr = generic_removexattr,
3005 .permission = btrfs_permission,
3006 };
3007 static struct inode_operations btrfs_special_inode_operations = {
3008 .getattr = btrfs_getattr,
3009 .setattr = btrfs_setattr,
3010 .permission = btrfs_permission,
3011 };
3012 static struct inode_operations btrfs_symlink_inode_operations = {
3013 .readlink = generic_readlink,
3014 .follow_link = page_follow_link_light,
3015 .put_link = page_put_link,
3016 .permission = btrfs_permission,
3017 };
This page took 0.130428 seconds and 6 git commands to generate.