Btrfs: Remove debugging statements from the invalidatepage calls
[deliverable/linux.git] / fs / btrfs / inode.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/fs.h>
22 #include <linux/pagemap.h>
23 #include <linux/highmem.h>
24 #include <linux/time.h>
25 #include <linux/init.h>
26 #include <linux/string.h>
27 #include <linux/smp_lock.h>
28 #include <linux/backing-dev.h>
29 #include <linux/mpage.h>
30 #include <linux/swap.h>
31 #include <linux/writeback.h>
32 #include <linux/statfs.h>
33 #include <linux/compat.h>
34 #include <linux/bit_spinlock.h>
35 #include <linux/version.h>
36 #include <linux/xattr.h>
37 #include "ctree.h"
38 #include "disk-io.h"
39 #include "transaction.h"
40 #include "btrfs_inode.h"
41 #include "ioctl.h"
42 #include "print-tree.h"
43 #include "volumes.h"
44
45 struct btrfs_iget_args {
46 u64 ino;
47 struct btrfs_root *root;
48 };
49
50 static struct inode_operations btrfs_dir_inode_operations;
51 static struct inode_operations btrfs_symlink_inode_operations;
52 static struct inode_operations btrfs_dir_ro_inode_operations;
53 static struct inode_operations btrfs_special_inode_operations;
54 static struct inode_operations btrfs_file_inode_operations;
55 static struct address_space_operations btrfs_aops;
56 static struct address_space_operations btrfs_symlink_aops;
57 static struct file_operations btrfs_dir_file_operations;
58 static struct extent_io_ops btrfs_extent_io_ops;
59
60 static struct kmem_cache *btrfs_inode_cachep;
61 struct kmem_cache *btrfs_trans_handle_cachep;
62 struct kmem_cache *btrfs_transaction_cachep;
63 struct kmem_cache *btrfs_bit_radix_cachep;
64 struct kmem_cache *btrfs_path_cachep;
65
66 #define S_SHIFT 12
67 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
68 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
69 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
70 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
71 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
72 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
73 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
74 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
75 };
76
77 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
78 int for_del)
79 {
80 u64 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
81 u64 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
82 u64 thresh;
83 int ret = 0;
84
85 if (for_del)
86 thresh = total * 90;
87 else
88 thresh = total * 85;
89
90 do_div(thresh, 100);
91
92 spin_lock(&root->fs_info->delalloc_lock);
93 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
94 ret = -ENOSPC;
95 spin_unlock(&root->fs_info->delalloc_lock);
96 return ret;
97 }
98
99 static int cow_file_range(struct inode *inode, u64 start, u64 end)
100 {
101 struct btrfs_root *root = BTRFS_I(inode)->root;
102 struct btrfs_trans_handle *trans;
103 u64 alloc_hint = 0;
104 u64 num_bytes;
105 u64 cur_alloc_size;
106 u64 blocksize = root->sectorsize;
107 u64 orig_start = start;
108 u64 orig_num_bytes;
109 struct btrfs_key ins;
110 int ret;
111
112 trans = btrfs_start_transaction(root, 1);
113 BUG_ON(!trans);
114 btrfs_set_trans_block_group(trans, inode);
115
116 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
117 num_bytes = max(blocksize, num_bytes);
118 ret = btrfs_drop_extents(trans, root, inode,
119 start, start + num_bytes, start, &alloc_hint);
120 orig_num_bytes = num_bytes;
121
122 if (alloc_hint == EXTENT_MAP_INLINE)
123 goto out;
124
125 BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy));
126
127 while(num_bytes > 0) {
128 cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
129 ret = btrfs_alloc_extent(trans, root, cur_alloc_size,
130 root->sectorsize,
131 root->root_key.objectid,
132 trans->transid,
133 inode->i_ino, start, 0,
134 alloc_hint, (u64)-1, &ins, 1);
135 if (ret) {
136 WARN_ON(1);
137 goto out;
138 }
139 cur_alloc_size = ins.offset;
140 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
141 start, ins.objectid, ins.offset,
142 ins.offset);
143 inode->i_blocks += ins.offset >> 9;
144 btrfs_check_file(root, inode);
145 if (num_bytes < cur_alloc_size) {
146 printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes,
147 cur_alloc_size);
148 break;
149 }
150 num_bytes -= cur_alloc_size;
151 alloc_hint = ins.objectid + ins.offset;
152 start += cur_alloc_size;
153 }
154 btrfs_drop_extent_cache(inode, orig_start,
155 orig_start + orig_num_bytes - 1);
156 btrfs_add_ordered_inode(inode);
157 btrfs_update_inode(trans, root, inode);
158 out:
159 btrfs_end_transaction(trans, root);
160 return ret;
161 }
162
163 static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end)
164 {
165 u64 extent_start;
166 u64 extent_end;
167 u64 bytenr;
168 u64 cow_end;
169 u64 loops = 0;
170 u64 total_fs_bytes;
171 struct btrfs_root *root = BTRFS_I(inode)->root;
172 struct extent_buffer *leaf;
173 int found_type;
174 struct btrfs_path *path;
175 struct btrfs_file_extent_item *item;
176 int ret;
177 int err;
178 struct btrfs_key found_key;
179
180 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
181 path = btrfs_alloc_path();
182 BUG_ON(!path);
183 again:
184 ret = btrfs_lookup_file_extent(NULL, root, path,
185 inode->i_ino, start, 0);
186 if (ret < 0) {
187 btrfs_free_path(path);
188 return ret;
189 }
190
191 cow_end = end;
192 if (ret != 0) {
193 if (path->slots[0] == 0)
194 goto not_found;
195 path->slots[0]--;
196 }
197
198 leaf = path->nodes[0];
199 item = btrfs_item_ptr(leaf, path->slots[0],
200 struct btrfs_file_extent_item);
201
202 /* are we inside the extent that was found? */
203 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
204 found_type = btrfs_key_type(&found_key);
205 if (found_key.objectid != inode->i_ino ||
206 found_type != BTRFS_EXTENT_DATA_KEY) {
207 goto not_found;
208 }
209
210 found_type = btrfs_file_extent_type(leaf, item);
211 extent_start = found_key.offset;
212 if (found_type == BTRFS_FILE_EXTENT_REG) {
213 u64 extent_num_bytes;
214
215 extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item);
216 extent_end = extent_start + extent_num_bytes;
217 err = 0;
218
219 if (loops && start != extent_start)
220 goto not_found;
221
222 if (start < extent_start || start >= extent_end)
223 goto not_found;
224
225 cow_end = min(end, extent_end - 1);
226 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
227 if (bytenr == 0)
228 goto not_found;
229
230 /*
231 * we may be called by the resizer, make sure we're inside
232 * the limits of the FS
233 */
234 if (bytenr + extent_num_bytes > total_fs_bytes)
235 goto not_found;
236
237 if (btrfs_count_snapshots_in_path(root, path, bytenr) != 1) {
238 goto not_found;
239 }
240
241 start = extent_end;
242 } else {
243 goto not_found;
244 }
245 loop:
246 if (start > end) {
247 btrfs_free_path(path);
248 return 0;
249 }
250 btrfs_release_path(root, path);
251 loops++;
252 goto again;
253
254 not_found:
255 cow_file_range(inode, start, cow_end);
256 start = cow_end + 1;
257 goto loop;
258 }
259
260 static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
261 {
262 struct btrfs_root *root = BTRFS_I(inode)->root;
263 int ret;
264 mutex_lock(&root->fs_info->fs_mutex);
265 if (btrfs_test_opt(root, NODATACOW) ||
266 btrfs_test_flag(inode, NODATACOW))
267 ret = run_delalloc_nocow(inode, start, end);
268 else
269 ret = cow_file_range(inode, start, end);
270
271 mutex_unlock(&root->fs_info->fs_mutex);
272 return ret;
273 }
274
275 int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
276 unsigned long old, unsigned long bits)
277 {
278 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
279 struct btrfs_root *root = BTRFS_I(inode)->root;
280 spin_lock(&root->fs_info->delalloc_lock);
281 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
282 root->fs_info->delalloc_bytes += end - start + 1;
283 spin_unlock(&root->fs_info->delalloc_lock);
284 }
285 return 0;
286 }
287
288 int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
289 unsigned long old, unsigned long bits)
290 {
291 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
292 struct btrfs_root *root = BTRFS_I(inode)->root;
293 spin_lock(&root->fs_info->delalloc_lock);
294 if (end - start + 1 > root->fs_info->delalloc_bytes) {
295 printk("warning: delalloc account %Lu %Lu\n",
296 end - start + 1, root->fs_info->delalloc_bytes);
297 root->fs_info->delalloc_bytes = 0;
298 BTRFS_I(inode)->delalloc_bytes = 0;
299 } else {
300 root->fs_info->delalloc_bytes -= end - start + 1;
301 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
302 }
303 spin_unlock(&root->fs_info->delalloc_lock);
304 }
305 return 0;
306 }
307
308 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
309 size_t size, struct bio *bio)
310 {
311 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
312 struct btrfs_mapping_tree *map_tree;
313 u64 logical = bio->bi_sector << 9;
314 u64 length = 0;
315 u64 map_length;
316 struct bio_vec *bvec;
317 int i;
318 int ret;
319
320 bio_for_each_segment(bvec, bio, i) {
321 length += bvec->bv_len;
322 }
323 map_tree = &root->fs_info->mapping_tree;
324 map_length = length;
325 ret = btrfs_map_block(map_tree, READ, logical,
326 &map_length, NULL, 0);
327
328 if (map_length < length + size) {
329 return 1;
330 }
331 return 0;
332 }
333
334 int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
335 int mirror_num)
336 {
337 struct btrfs_root *root = BTRFS_I(inode)->root;
338 struct btrfs_trans_handle *trans;
339 int ret = 0;
340 char *sums = NULL;
341
342 ret = btrfs_csum_one_bio(root, bio, &sums);
343 BUG_ON(ret);
344
345 mutex_lock(&root->fs_info->fs_mutex);
346 trans = btrfs_start_transaction(root, 1);
347
348 btrfs_set_trans_block_group(trans, inode);
349 btrfs_csum_file_blocks(trans, root, inode, bio, sums);
350
351 ret = btrfs_end_transaction(trans, root);
352 BUG_ON(ret);
353 mutex_unlock(&root->fs_info->fs_mutex);
354
355 kfree(sums);
356
357 return btrfs_map_bio(root, rw, bio, mirror_num);
358 }
359
360 int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
361 int mirror_num)
362 {
363 struct btrfs_root *root = BTRFS_I(inode)->root;
364 int ret = 0;
365
366 if (!(rw & (1 << BIO_RW))) {
367 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
368 BUG_ON(ret);
369 goto mapit;
370 }
371
372 if (btrfs_test_opt(root, NODATASUM) ||
373 btrfs_test_flag(inode, NODATASUM)) {
374 goto mapit;
375 }
376
377 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
378 inode, rw, bio, mirror_num,
379 __btrfs_submit_bio_hook);
380 mapit:
381 return btrfs_map_bio(root, rw, bio, mirror_num);
382 }
383
384 int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
385 {
386 int ret = 0;
387 struct inode *inode = page->mapping->host;
388 struct btrfs_root *root = BTRFS_I(inode)->root;
389 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
390 struct btrfs_csum_item *item;
391 struct btrfs_path *path = NULL;
392 u32 csum;
393
394 if (btrfs_test_opt(root, NODATASUM) ||
395 btrfs_test_flag(inode, NODATASUM))
396 return 0;
397
398 mutex_lock(&root->fs_info->fs_mutex);
399 path = btrfs_alloc_path();
400 item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0);
401 if (IS_ERR(item)) {
402 ret = PTR_ERR(item);
403 /* a csum that isn't present is a preallocated region. */
404 if (ret == -ENOENT || ret == -EFBIG)
405 ret = 0;
406 csum = 0;
407 printk("no csum found for inode %lu start %Lu\n", inode->i_ino, start);
408 goto out;
409 }
410 read_extent_buffer(path->nodes[0], &csum, (unsigned long)item,
411 BTRFS_CRC32_SIZE);
412 set_state_private(io_tree, start, csum);
413 out:
414 if (path)
415 btrfs_free_path(path);
416 mutex_unlock(&root->fs_info->fs_mutex);
417 return ret;
418 }
419
420 struct io_failure_record {
421 struct page *page;
422 u64 start;
423 u64 len;
424 u64 logical;
425 int last_mirror;
426 };
427
428 int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
429 struct page *page, u64 start, u64 end,
430 struct extent_state *state)
431 {
432 struct io_failure_record *failrec = NULL;
433 u64 private;
434 struct extent_map *em;
435 struct inode *inode = page->mapping->host;
436 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
437 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
438 struct bio *bio;
439 int num_copies;
440 int ret;
441 u64 logical;
442
443 ret = get_state_private(failure_tree, start, &private);
444 if (ret) {
445 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
446 if (!failrec)
447 return -ENOMEM;
448 failrec->start = start;
449 failrec->len = end - start + 1;
450 failrec->last_mirror = 0;
451
452 spin_lock(&em_tree->lock);
453 em = lookup_extent_mapping(em_tree, start, failrec->len);
454 if (em->start > start || em->start + em->len < start) {
455 free_extent_map(em);
456 em = NULL;
457 }
458 spin_unlock(&em_tree->lock);
459
460 if (!em || IS_ERR(em)) {
461 kfree(failrec);
462 return -EIO;
463 }
464 logical = start - em->start;
465 logical = em->block_start + logical;
466 failrec->logical = logical;
467 free_extent_map(em);
468 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
469 EXTENT_DIRTY, GFP_NOFS);
470 set_state_private(failure_tree, start,
471 (u64)(unsigned long)failrec);
472 } else {
473 failrec = (struct io_failure_record *)(unsigned long)private;
474 }
475 num_copies = btrfs_num_copies(
476 &BTRFS_I(inode)->root->fs_info->mapping_tree,
477 failrec->logical, failrec->len);
478 failrec->last_mirror++;
479 if (!state) {
480 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
481 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
482 failrec->start,
483 EXTENT_LOCKED);
484 if (state && state->start != failrec->start)
485 state = NULL;
486 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
487 }
488 if (!state || failrec->last_mirror > num_copies) {
489 set_state_private(failure_tree, failrec->start, 0);
490 clear_extent_bits(failure_tree, failrec->start,
491 failrec->start + failrec->len - 1,
492 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
493 kfree(failrec);
494 return -EIO;
495 }
496 bio = bio_alloc(GFP_NOFS, 1);
497 bio->bi_private = state;
498 bio->bi_end_io = failed_bio->bi_end_io;
499 bio->bi_sector = failrec->logical >> 9;
500 bio->bi_bdev = failed_bio->bi_bdev;
501 bio_add_page(bio, page, failrec->len, start - page_offset(page));
502 btrfs_submit_bio_hook(inode, READ, bio, failrec->last_mirror);
503 return 0;
504 }
505
506 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
507 struct extent_state *state)
508 {
509 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
510 struct inode *inode = page->mapping->host;
511 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
512 char *kaddr;
513 u64 private = ~(u32)0;
514 int ret;
515 struct btrfs_root *root = BTRFS_I(inode)->root;
516 u32 csum = ~(u32)0;
517 unsigned long flags;
518
519 if (btrfs_test_opt(root, NODATASUM) ||
520 btrfs_test_flag(inode, NODATASUM))
521 return 0;
522 if (state && state->start == start) {
523 private = state->private;
524 ret = 0;
525 } else {
526 ret = get_state_private(io_tree, start, &private);
527 }
528 local_irq_save(flags);
529 kaddr = kmap_atomic(page, KM_IRQ0);
530 if (ret) {
531 goto zeroit;
532 }
533 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
534 btrfs_csum_final(csum, (char *)&csum);
535 if (csum != private) {
536 goto zeroit;
537 }
538 kunmap_atomic(kaddr, KM_IRQ0);
539 local_irq_restore(flags);
540
541 /* if the io failure tree for this inode is non-empty,
542 * check to see if we've recovered from a failed IO
543 */
544 private = 0;
545 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
546 (u64)-1, 1, EXTENT_DIRTY)) {
547 u64 private_failure;
548 struct io_failure_record *failure;
549 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
550 start, &private_failure);
551 if (ret == 0) {
552 failure = (struct io_failure_record *)(unsigned long)
553 private_failure;
554 set_state_private(&BTRFS_I(inode)->io_failure_tree,
555 failure->start, 0);
556 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
557 failure->start,
558 failure->start + failure->len - 1,
559 EXTENT_DIRTY | EXTENT_LOCKED,
560 GFP_NOFS);
561 kfree(failure);
562 }
563 }
564 return 0;
565
566 zeroit:
567 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
568 page->mapping->host->i_ino, (unsigned long long)start, csum,
569 private);
570 memset(kaddr + offset, 1, end - start + 1);
571 flush_dcache_page(page);
572 kunmap_atomic(kaddr, KM_IRQ0);
573 local_irq_restore(flags);
574 if (private == 0)
575 return 0;
576 return -EIO;
577 }
578
579 void btrfs_read_locked_inode(struct inode *inode)
580 {
581 struct btrfs_path *path;
582 struct extent_buffer *leaf;
583 struct btrfs_inode_item *inode_item;
584 struct btrfs_timespec *tspec;
585 struct btrfs_root *root = BTRFS_I(inode)->root;
586 struct btrfs_key location;
587 u64 alloc_group_block;
588 u32 rdev;
589 int ret;
590
591 path = btrfs_alloc_path();
592 BUG_ON(!path);
593 mutex_lock(&root->fs_info->fs_mutex);
594 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
595
596 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
597 if (ret)
598 goto make_bad;
599
600 leaf = path->nodes[0];
601 inode_item = btrfs_item_ptr(leaf, path->slots[0],
602 struct btrfs_inode_item);
603
604 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
605 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
606 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
607 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
608 inode->i_size = btrfs_inode_size(leaf, inode_item);
609
610 tspec = btrfs_inode_atime(inode_item);
611 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
612 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
613
614 tspec = btrfs_inode_mtime(inode_item);
615 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
616 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
617
618 tspec = btrfs_inode_ctime(inode_item);
619 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
620 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
621
622 inode->i_blocks = btrfs_inode_nblocks(leaf, inode_item);
623 inode->i_generation = btrfs_inode_generation(leaf, inode_item);
624 inode->i_rdev = 0;
625 rdev = btrfs_inode_rdev(leaf, inode_item);
626
627 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
628 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
629 alloc_group_block);
630 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
631 if (!BTRFS_I(inode)->block_group) {
632 BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
633 NULL, 0,
634 BTRFS_BLOCK_GROUP_METADATA, 0);
635 }
636 btrfs_free_path(path);
637 inode_item = NULL;
638
639 mutex_unlock(&root->fs_info->fs_mutex);
640
641 switch (inode->i_mode & S_IFMT) {
642 case S_IFREG:
643 inode->i_mapping->a_ops = &btrfs_aops;
644 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
645 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
646 inode->i_fop = &btrfs_file_operations;
647 inode->i_op = &btrfs_file_inode_operations;
648 break;
649 case S_IFDIR:
650 inode->i_fop = &btrfs_dir_file_operations;
651 if (root == root->fs_info->tree_root)
652 inode->i_op = &btrfs_dir_ro_inode_operations;
653 else
654 inode->i_op = &btrfs_dir_inode_operations;
655 break;
656 case S_IFLNK:
657 inode->i_op = &btrfs_symlink_inode_operations;
658 inode->i_mapping->a_ops = &btrfs_symlink_aops;
659 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
660 break;
661 default:
662 init_special_inode(inode, inode->i_mode, rdev);
663 break;
664 }
665 return;
666
667 make_bad:
668 btrfs_release_path(root, path);
669 btrfs_free_path(path);
670 mutex_unlock(&root->fs_info->fs_mutex);
671 make_bad_inode(inode);
672 }
673
674 static void fill_inode_item(struct extent_buffer *leaf,
675 struct btrfs_inode_item *item,
676 struct inode *inode)
677 {
678 btrfs_set_inode_uid(leaf, item, inode->i_uid);
679 btrfs_set_inode_gid(leaf, item, inode->i_gid);
680 btrfs_set_inode_size(leaf, item, inode->i_size);
681 btrfs_set_inode_mode(leaf, item, inode->i_mode);
682 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
683
684 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
685 inode->i_atime.tv_sec);
686 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
687 inode->i_atime.tv_nsec);
688
689 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
690 inode->i_mtime.tv_sec);
691 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
692 inode->i_mtime.tv_nsec);
693
694 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
695 inode->i_ctime.tv_sec);
696 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
697 inode->i_ctime.tv_nsec);
698
699 btrfs_set_inode_nblocks(leaf, item, inode->i_blocks);
700 btrfs_set_inode_generation(leaf, item, inode->i_generation);
701 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
702 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
703 btrfs_set_inode_block_group(leaf, item,
704 BTRFS_I(inode)->block_group->key.objectid);
705 }
706
707 int btrfs_update_inode(struct btrfs_trans_handle *trans,
708 struct btrfs_root *root,
709 struct inode *inode)
710 {
711 struct btrfs_inode_item *inode_item;
712 struct btrfs_path *path;
713 struct extent_buffer *leaf;
714 int ret;
715
716 path = btrfs_alloc_path();
717 BUG_ON(!path);
718 ret = btrfs_lookup_inode(trans, root, path,
719 &BTRFS_I(inode)->location, 1);
720 if (ret) {
721 if (ret > 0)
722 ret = -ENOENT;
723 goto failed;
724 }
725
726 leaf = path->nodes[0];
727 inode_item = btrfs_item_ptr(leaf, path->slots[0],
728 struct btrfs_inode_item);
729
730 fill_inode_item(leaf, inode_item, inode);
731 btrfs_mark_buffer_dirty(leaf);
732 btrfs_set_inode_last_trans(trans, inode);
733 ret = 0;
734 failed:
735 btrfs_release_path(root, path);
736 btrfs_free_path(path);
737 return ret;
738 }
739
740
741 static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
742 struct btrfs_root *root,
743 struct inode *dir,
744 struct dentry *dentry)
745 {
746 struct btrfs_path *path;
747 const char *name = dentry->d_name.name;
748 int name_len = dentry->d_name.len;
749 int ret = 0;
750 struct extent_buffer *leaf;
751 struct btrfs_dir_item *di;
752 struct btrfs_key key;
753
754 path = btrfs_alloc_path();
755 if (!path) {
756 ret = -ENOMEM;
757 goto err;
758 }
759
760 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
761 name, name_len, -1);
762 if (IS_ERR(di)) {
763 ret = PTR_ERR(di);
764 goto err;
765 }
766 if (!di) {
767 ret = -ENOENT;
768 goto err;
769 }
770 leaf = path->nodes[0];
771 btrfs_dir_item_key_to_cpu(leaf, di, &key);
772 ret = btrfs_delete_one_dir_name(trans, root, path, di);
773 if (ret)
774 goto err;
775 btrfs_release_path(root, path);
776
777 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
778 key.objectid, name, name_len, -1);
779 if (IS_ERR(di)) {
780 ret = PTR_ERR(di);
781 goto err;
782 }
783 if (!di) {
784 ret = -ENOENT;
785 goto err;
786 }
787 ret = btrfs_delete_one_dir_name(trans, root, path, di);
788
789 dentry->d_inode->i_ctime = dir->i_ctime;
790 ret = btrfs_del_inode_ref(trans, root, name, name_len,
791 dentry->d_inode->i_ino,
792 dentry->d_parent->d_inode->i_ino);
793 if (ret) {
794 printk("failed to delete reference to %.*s, "
795 "inode %lu parent %lu\n", name_len, name,
796 dentry->d_inode->i_ino,
797 dentry->d_parent->d_inode->i_ino);
798 }
799 err:
800 btrfs_free_path(path);
801 if (!ret) {
802 dir->i_size -= name_len * 2;
803 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
804 btrfs_update_inode(trans, root, dir);
805 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
806 dentry->d_inode->i_nlink--;
807 #else
808 drop_nlink(dentry->d_inode);
809 #endif
810 ret = btrfs_update_inode(trans, root, dentry->d_inode);
811 dir->i_sb->s_dirt = 1;
812 }
813 return ret;
814 }
815
816 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
817 {
818 struct btrfs_root *root;
819 struct btrfs_trans_handle *trans;
820 struct inode *inode = dentry->d_inode;
821 int ret;
822 unsigned long nr = 0;
823
824 root = BTRFS_I(dir)->root;
825 mutex_lock(&root->fs_info->fs_mutex);
826
827 ret = btrfs_check_free_space(root, 1, 1);
828 if (ret)
829 goto fail;
830
831 trans = btrfs_start_transaction(root, 1);
832
833 btrfs_set_trans_block_group(trans, dir);
834 ret = btrfs_unlink_trans(trans, root, dir, dentry);
835 nr = trans->blocks_used;
836
837 if (inode->i_nlink == 0) {
838 int found;
839 /* if the inode isn't linked anywhere,
840 * we don't need to worry about
841 * data=ordered
842 */
843 found = btrfs_del_ordered_inode(inode);
844 if (found == 1) {
845 atomic_dec(&inode->i_count);
846 }
847 }
848
849 btrfs_end_transaction(trans, root);
850 fail:
851 mutex_unlock(&root->fs_info->fs_mutex);
852 btrfs_btree_balance_dirty(root, nr);
853 btrfs_throttle(root);
854 return ret;
855 }
856
857 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
858 {
859 struct inode *inode = dentry->d_inode;
860 int err = 0;
861 int ret;
862 struct btrfs_root *root = BTRFS_I(dir)->root;
863 struct btrfs_trans_handle *trans;
864 unsigned long nr = 0;
865
866 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
867 return -ENOTEMPTY;
868
869 mutex_lock(&root->fs_info->fs_mutex);
870 ret = btrfs_check_free_space(root, 1, 1);
871 if (ret)
872 goto fail;
873
874 trans = btrfs_start_transaction(root, 1);
875 btrfs_set_trans_block_group(trans, dir);
876
877 /* now the directory is empty */
878 err = btrfs_unlink_trans(trans, root, dir, dentry);
879 if (!err) {
880 inode->i_size = 0;
881 }
882
883 nr = trans->blocks_used;
884 ret = btrfs_end_transaction(trans, root);
885 fail:
886 mutex_unlock(&root->fs_info->fs_mutex);
887 btrfs_btree_balance_dirty(root, nr);
888 btrfs_throttle(root);
889
890 if (ret && !err)
891 err = ret;
892 return err;
893 }
894
895 /*
896 * this can truncate away extent items, csum items and directory items.
897 * It starts at a high offset and removes keys until it can't find
898 * any higher than i_size.
899 *
900 * csum items that cross the new i_size are truncated to the new size
901 * as well.
902 */
903 static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
904 struct btrfs_root *root,
905 struct inode *inode,
906 u32 min_type)
907 {
908 int ret;
909 struct btrfs_path *path;
910 struct btrfs_key key;
911 struct btrfs_key found_key;
912 u32 found_type;
913 struct extent_buffer *leaf;
914 struct btrfs_file_extent_item *fi;
915 u64 extent_start = 0;
916 u64 extent_num_bytes = 0;
917 u64 item_end = 0;
918 u64 root_gen = 0;
919 u64 root_owner = 0;
920 int found_extent;
921 int del_item;
922 int pending_del_nr = 0;
923 int pending_del_slot = 0;
924 int extent_type = -1;
925 u64 mask = root->sectorsize - 1;
926
927 btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1);
928 path = btrfs_alloc_path();
929 path->reada = -1;
930 BUG_ON(!path);
931
932 /* FIXME, add redo link to tree so we don't leak on crash */
933 key.objectid = inode->i_ino;
934 key.offset = (u64)-1;
935 key.type = (u8)-1;
936
937 btrfs_init_path(path);
938 search_again:
939 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
940 if (ret < 0) {
941 goto error;
942 }
943 if (ret > 0) {
944 BUG_ON(path->slots[0] == 0);
945 path->slots[0]--;
946 }
947
948 while(1) {
949 fi = NULL;
950 leaf = path->nodes[0];
951 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
952 found_type = btrfs_key_type(&found_key);
953
954 if (found_key.objectid != inode->i_ino)
955 break;
956
957 if (found_type < min_type)
958 break;
959
960 item_end = found_key.offset;
961 if (found_type == BTRFS_EXTENT_DATA_KEY) {
962 fi = btrfs_item_ptr(leaf, path->slots[0],
963 struct btrfs_file_extent_item);
964 extent_type = btrfs_file_extent_type(leaf, fi);
965 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
966 item_end +=
967 btrfs_file_extent_num_bytes(leaf, fi);
968 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
969 struct btrfs_item *item = btrfs_item_nr(leaf,
970 path->slots[0]);
971 item_end += btrfs_file_extent_inline_len(leaf,
972 item);
973 }
974 item_end--;
975 }
976 if (found_type == BTRFS_CSUM_ITEM_KEY) {
977 ret = btrfs_csum_truncate(trans, root, path,
978 inode->i_size);
979 BUG_ON(ret);
980 }
981 if (item_end < inode->i_size) {
982 if (found_type == BTRFS_DIR_ITEM_KEY) {
983 found_type = BTRFS_INODE_ITEM_KEY;
984 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
985 found_type = BTRFS_CSUM_ITEM_KEY;
986 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
987 found_type = BTRFS_XATTR_ITEM_KEY;
988 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
989 found_type = BTRFS_INODE_REF_KEY;
990 } else if (found_type) {
991 found_type--;
992 } else {
993 break;
994 }
995 btrfs_set_key_type(&key, found_type);
996 goto next;
997 }
998 if (found_key.offset >= inode->i_size)
999 del_item = 1;
1000 else
1001 del_item = 0;
1002 found_extent = 0;
1003
1004 /* FIXME, shrink the extent if the ref count is only 1 */
1005 if (found_type != BTRFS_EXTENT_DATA_KEY)
1006 goto delete;
1007
1008 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
1009 u64 num_dec;
1010 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
1011 if (!del_item) {
1012 u64 orig_num_bytes =
1013 btrfs_file_extent_num_bytes(leaf, fi);
1014 extent_num_bytes = inode->i_size -
1015 found_key.offset + root->sectorsize - 1;
1016 extent_num_bytes = extent_num_bytes &
1017 ~((u64)root->sectorsize - 1);
1018 btrfs_set_file_extent_num_bytes(leaf, fi,
1019 extent_num_bytes);
1020 num_dec = (orig_num_bytes -
1021 extent_num_bytes);
1022 if (extent_start != 0)
1023 dec_i_blocks(inode, num_dec);
1024 btrfs_mark_buffer_dirty(leaf);
1025 } else {
1026 extent_num_bytes =
1027 btrfs_file_extent_disk_num_bytes(leaf,
1028 fi);
1029 /* FIXME blocksize != 4096 */
1030 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
1031 if (extent_start != 0) {
1032 found_extent = 1;
1033 dec_i_blocks(inode, num_dec);
1034 }
1035 root_gen = btrfs_header_generation(leaf);
1036 root_owner = btrfs_header_owner(leaf);
1037 }
1038 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1039 if (!del_item) {
1040 u32 newsize = inode->i_size - found_key.offset;
1041 dec_i_blocks(inode, item_end + 1 -
1042 found_key.offset - newsize);
1043 newsize =
1044 btrfs_file_extent_calc_inline_size(newsize);
1045 ret = btrfs_truncate_item(trans, root, path,
1046 newsize, 1);
1047 BUG_ON(ret);
1048 } else {
1049 dec_i_blocks(inode, item_end + 1 -
1050 found_key.offset);
1051 }
1052 }
1053 delete:
1054 if (del_item) {
1055 if (!pending_del_nr) {
1056 /* no pending yet, add ourselves */
1057 pending_del_slot = path->slots[0];
1058 pending_del_nr = 1;
1059 } else if (pending_del_nr &&
1060 path->slots[0] + 1 == pending_del_slot) {
1061 /* hop on the pending chunk */
1062 pending_del_nr++;
1063 pending_del_slot = path->slots[0];
1064 } else {
1065 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
1066 }
1067 } else {
1068 break;
1069 }
1070 if (found_extent) {
1071 ret = btrfs_free_extent(trans, root, extent_start,
1072 extent_num_bytes,
1073 root_owner,
1074 root_gen, inode->i_ino,
1075 found_key.offset, 0);
1076 BUG_ON(ret);
1077 }
1078 next:
1079 if (path->slots[0] == 0) {
1080 if (pending_del_nr)
1081 goto del_pending;
1082 btrfs_release_path(root, path);
1083 goto search_again;
1084 }
1085
1086 path->slots[0]--;
1087 if (pending_del_nr &&
1088 path->slots[0] + 1 != pending_del_slot) {
1089 struct btrfs_key debug;
1090 del_pending:
1091 btrfs_item_key_to_cpu(path->nodes[0], &debug,
1092 pending_del_slot);
1093 ret = btrfs_del_items(trans, root, path,
1094 pending_del_slot,
1095 pending_del_nr);
1096 BUG_ON(ret);
1097 pending_del_nr = 0;
1098 btrfs_release_path(root, path);
1099 goto search_again;
1100 }
1101 }
1102 ret = 0;
1103 error:
1104 if (pending_del_nr) {
1105 ret = btrfs_del_items(trans, root, path, pending_del_slot,
1106 pending_del_nr);
1107 }
1108 btrfs_release_path(root, path);
1109 btrfs_free_path(path);
1110 inode->i_sb->s_dirt = 1;
1111 return ret;
1112 }
1113
1114 static int btrfs_cow_one_page(struct inode *inode, struct page *page,
1115 size_t zero_start)
1116 {
1117 char *kaddr;
1118 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1119 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1120 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1121 int ret = 0;
1122
1123 WARN_ON(!PageLocked(page));
1124 set_page_extent_mapped(page);
1125
1126 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
1127 set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
1128 page_end, GFP_NOFS);
1129
1130 if (zero_start != PAGE_CACHE_SIZE) {
1131 kaddr = kmap(page);
1132 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
1133 flush_dcache_page(page);
1134 kunmap(page);
1135 }
1136 set_page_dirty(page);
1137 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
1138
1139 return ret;
1140 }
1141
1142 /*
1143 * taken from block_truncate_page, but does cow as it zeros out
1144 * any bytes left in the last page in the file.
1145 */
1146 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
1147 {
1148 struct inode *inode = mapping->host;
1149 struct btrfs_root *root = BTRFS_I(inode)->root;
1150 u32 blocksize = root->sectorsize;
1151 pgoff_t index = from >> PAGE_CACHE_SHIFT;
1152 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1153 struct page *page;
1154 int ret = 0;
1155 u64 page_start;
1156
1157 if ((offset & (blocksize - 1)) == 0)
1158 goto out;
1159
1160 ret = -ENOMEM;
1161 page = grab_cache_page(mapping, index);
1162 if (!page)
1163 goto out;
1164 if (!PageUptodate(page)) {
1165 ret = btrfs_readpage(NULL, page);
1166 lock_page(page);
1167 if (!PageUptodate(page)) {
1168 ret = -EIO;
1169 goto out;
1170 }
1171 }
1172 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1173
1174 ret = btrfs_cow_one_page(inode, page, offset);
1175
1176 unlock_page(page);
1177 page_cache_release(page);
1178 out:
1179 return ret;
1180 }
1181
1182 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
1183 {
1184 struct inode *inode = dentry->d_inode;
1185 int err;
1186
1187 err = inode_change_ok(inode, attr);
1188 if (err)
1189 return err;
1190
1191 if (S_ISREG(inode->i_mode) &&
1192 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
1193 struct btrfs_trans_handle *trans;
1194 struct btrfs_root *root = BTRFS_I(inode)->root;
1195 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1196
1197 u64 mask = root->sectorsize - 1;
1198 u64 hole_start = (inode->i_size + mask) & ~mask;
1199 u64 block_end = (attr->ia_size + mask) & ~mask;
1200 u64 hole_size;
1201 u64 alloc_hint = 0;
1202
1203 if (attr->ia_size <= hole_start)
1204 goto out;
1205
1206 mutex_lock(&root->fs_info->fs_mutex);
1207 err = btrfs_check_free_space(root, 1, 0);
1208 mutex_unlock(&root->fs_info->fs_mutex);
1209 if (err)
1210 goto fail;
1211
1212 btrfs_truncate_page(inode->i_mapping, inode->i_size);
1213
1214 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1215 hole_size = block_end - hole_start;
1216
1217 mutex_lock(&root->fs_info->fs_mutex);
1218 trans = btrfs_start_transaction(root, 1);
1219 btrfs_set_trans_block_group(trans, inode);
1220 err = btrfs_drop_extents(trans, root, inode,
1221 hole_start, block_end, hole_start,
1222 &alloc_hint);
1223
1224 if (alloc_hint != EXTENT_MAP_INLINE) {
1225 err = btrfs_insert_file_extent(trans, root,
1226 inode->i_ino,
1227 hole_start, 0, 0,
1228 hole_size);
1229 btrfs_drop_extent_cache(inode, hole_start,
1230 (u64)-1);
1231 btrfs_check_file(root, inode);
1232 }
1233 btrfs_end_transaction(trans, root);
1234 mutex_unlock(&root->fs_info->fs_mutex);
1235 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1236 if (err)
1237 return err;
1238 }
1239 out:
1240 err = inode_setattr(inode, attr);
1241 fail:
1242 return err;
1243 }
1244
1245 void btrfs_put_inode(struct inode *inode)
1246 {
1247 int ret;
1248
1249 if (!BTRFS_I(inode)->ordered_trans) {
1250 return;
1251 }
1252
1253 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) ||
1254 mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1255 return;
1256
1257 ret = btrfs_del_ordered_inode(inode);
1258 if (ret == 1) {
1259 atomic_dec(&inode->i_count);
1260 }
1261 }
1262
1263 void btrfs_delete_inode(struct inode *inode)
1264 {
1265 struct btrfs_trans_handle *trans;
1266 struct btrfs_root *root = BTRFS_I(inode)->root;
1267 unsigned long nr;
1268 int ret;
1269
1270 truncate_inode_pages(&inode->i_data, 0);
1271 if (is_bad_inode(inode)) {
1272 goto no_delete;
1273 }
1274
1275 inode->i_size = 0;
1276 mutex_lock(&root->fs_info->fs_mutex);
1277 trans = btrfs_start_transaction(root, 1);
1278
1279 btrfs_set_trans_block_group(trans, inode);
1280 ret = btrfs_truncate_in_trans(trans, root, inode, 0);
1281 if (ret)
1282 goto no_delete_lock;
1283
1284 nr = trans->blocks_used;
1285 clear_inode(inode);
1286
1287 btrfs_end_transaction(trans, root);
1288 mutex_unlock(&root->fs_info->fs_mutex);
1289 btrfs_btree_balance_dirty(root, nr);
1290 btrfs_throttle(root);
1291 return;
1292
1293 no_delete_lock:
1294 nr = trans->blocks_used;
1295 btrfs_end_transaction(trans, root);
1296 mutex_unlock(&root->fs_info->fs_mutex);
1297 btrfs_btree_balance_dirty(root, nr);
1298 btrfs_throttle(root);
1299 no_delete:
1300 clear_inode(inode);
1301 }
1302
1303 /*
1304 * this returns the key found in the dir entry in the location pointer.
1305 * If no dir entries were found, location->objectid is 0.
1306 */
1307 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
1308 struct btrfs_key *location)
1309 {
1310 const char *name = dentry->d_name.name;
1311 int namelen = dentry->d_name.len;
1312 struct btrfs_dir_item *di;
1313 struct btrfs_path *path;
1314 struct btrfs_root *root = BTRFS_I(dir)->root;
1315 int ret = 0;
1316
1317 if (namelen == 1 && strcmp(name, ".") == 0) {
1318 location->objectid = dir->i_ino;
1319 location->type = BTRFS_INODE_ITEM_KEY;
1320 location->offset = 0;
1321 return 0;
1322 }
1323 path = btrfs_alloc_path();
1324 BUG_ON(!path);
1325
1326 if (namelen == 2 && strcmp(name, "..") == 0) {
1327 struct btrfs_key key;
1328 struct extent_buffer *leaf;
1329 u32 nritems;
1330 int slot;
1331
1332 key.objectid = dir->i_ino;
1333 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1334 key.offset = 0;
1335 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1336 BUG_ON(ret == 0);
1337 ret = 0;
1338
1339 leaf = path->nodes[0];
1340 slot = path->slots[0];
1341 nritems = btrfs_header_nritems(leaf);
1342 if (slot >= nritems)
1343 goto out_err;
1344
1345 btrfs_item_key_to_cpu(leaf, &key, slot);
1346 if (key.objectid != dir->i_ino ||
1347 key.type != BTRFS_INODE_REF_KEY) {
1348 goto out_err;
1349 }
1350 location->objectid = key.offset;
1351 location->type = BTRFS_INODE_ITEM_KEY;
1352 location->offset = 0;
1353 goto out;
1354 }
1355
1356 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
1357 namelen, 0);
1358 if (IS_ERR(di))
1359 ret = PTR_ERR(di);
1360 if (!di || IS_ERR(di)) {
1361 goto out_err;
1362 }
1363 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
1364 out:
1365 btrfs_free_path(path);
1366 return ret;
1367 out_err:
1368 location->objectid = 0;
1369 goto out;
1370 }
1371
1372 /*
1373 * when we hit a tree root in a directory, the btrfs part of the inode
1374 * needs to be changed to reflect the root directory of the tree root. This
1375 * is kind of like crossing a mount point.
1376 */
1377 static int fixup_tree_root_location(struct btrfs_root *root,
1378 struct btrfs_key *location,
1379 struct btrfs_root **sub_root,
1380 struct dentry *dentry)
1381 {
1382 struct btrfs_path *path;
1383 struct btrfs_root_item *ri;
1384
1385 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
1386 return 0;
1387 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1388 return 0;
1389
1390 path = btrfs_alloc_path();
1391 BUG_ON(!path);
1392 mutex_lock(&root->fs_info->fs_mutex);
1393
1394 *sub_root = btrfs_read_fs_root(root->fs_info, location,
1395 dentry->d_name.name,
1396 dentry->d_name.len);
1397 if (IS_ERR(*sub_root))
1398 return PTR_ERR(*sub_root);
1399
1400 ri = &(*sub_root)->root_item;
1401 location->objectid = btrfs_root_dirid(ri);
1402 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1403 location->offset = 0;
1404
1405 btrfs_free_path(path);
1406 mutex_unlock(&root->fs_info->fs_mutex);
1407 return 0;
1408 }
1409
1410 static int btrfs_init_locked_inode(struct inode *inode, void *p)
1411 {
1412 struct btrfs_iget_args *args = p;
1413 inode->i_ino = args->ino;
1414 BTRFS_I(inode)->root = args->root;
1415 BTRFS_I(inode)->delalloc_bytes = 0;
1416 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1417 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1418 inode->i_mapping, GFP_NOFS);
1419 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1420 inode->i_mapping, GFP_NOFS);
1421 return 0;
1422 }
1423
1424 static int btrfs_find_actor(struct inode *inode, void *opaque)
1425 {
1426 struct btrfs_iget_args *args = opaque;
1427 return (args->ino == inode->i_ino &&
1428 args->root == BTRFS_I(inode)->root);
1429 }
1430
1431 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
1432 u64 root_objectid)
1433 {
1434 struct btrfs_iget_args args;
1435 args.ino = objectid;
1436 args.root = btrfs_lookup_fs_root(btrfs_sb(s)->fs_info, root_objectid);
1437
1438 if (!args.root)
1439 return NULL;
1440
1441 return ilookup5(s, objectid, btrfs_find_actor, (void *)&args);
1442 }
1443
1444 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
1445 struct btrfs_root *root)
1446 {
1447 struct inode *inode;
1448 struct btrfs_iget_args args;
1449 args.ino = objectid;
1450 args.root = root;
1451
1452 inode = iget5_locked(s, objectid, btrfs_find_actor,
1453 btrfs_init_locked_inode,
1454 (void *)&args);
1455 return inode;
1456 }
1457
1458 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
1459 struct nameidata *nd)
1460 {
1461 struct inode * inode;
1462 struct btrfs_inode *bi = BTRFS_I(dir);
1463 struct btrfs_root *root = bi->root;
1464 struct btrfs_root *sub_root = root;
1465 struct btrfs_key location;
1466 int ret;
1467
1468 if (dentry->d_name.len > BTRFS_NAME_LEN)
1469 return ERR_PTR(-ENAMETOOLONG);
1470
1471 mutex_lock(&root->fs_info->fs_mutex);
1472 ret = btrfs_inode_by_name(dir, dentry, &location);
1473 mutex_unlock(&root->fs_info->fs_mutex);
1474
1475 if (ret < 0)
1476 return ERR_PTR(ret);
1477
1478 inode = NULL;
1479 if (location.objectid) {
1480 ret = fixup_tree_root_location(root, &location, &sub_root,
1481 dentry);
1482 if (ret < 0)
1483 return ERR_PTR(ret);
1484 if (ret > 0)
1485 return ERR_PTR(-ENOENT);
1486 inode = btrfs_iget_locked(dir->i_sb, location.objectid,
1487 sub_root);
1488 if (!inode)
1489 return ERR_PTR(-EACCES);
1490 if (inode->i_state & I_NEW) {
1491 /* the inode and parent dir are two different roots */
1492 if (sub_root != root) {
1493 igrab(inode);
1494 sub_root->inode = inode;
1495 }
1496 BTRFS_I(inode)->root = sub_root;
1497 memcpy(&BTRFS_I(inode)->location, &location,
1498 sizeof(location));
1499 btrfs_read_locked_inode(inode);
1500 unlock_new_inode(inode);
1501 }
1502 }
1503 return d_splice_alias(inode, dentry);
1504 }
1505
1506 static unsigned char btrfs_filetype_table[] = {
1507 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
1508 };
1509
1510 static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1511 {
1512 struct inode *inode = filp->f_dentry->d_inode;
1513 struct btrfs_root *root = BTRFS_I(inode)->root;
1514 struct btrfs_item *item;
1515 struct btrfs_dir_item *di;
1516 struct btrfs_key key;
1517 struct btrfs_key found_key;
1518 struct btrfs_path *path;
1519 int ret;
1520 u32 nritems;
1521 struct extent_buffer *leaf;
1522 int slot;
1523 int advance;
1524 unsigned char d_type;
1525 int over = 0;
1526 u32 di_cur;
1527 u32 di_total;
1528 u32 di_len;
1529 int key_type = BTRFS_DIR_INDEX_KEY;
1530 char tmp_name[32];
1531 char *name_ptr;
1532 int name_len;
1533
1534 /* FIXME, use a real flag for deciding about the key type */
1535 if (root->fs_info->tree_root == root)
1536 key_type = BTRFS_DIR_ITEM_KEY;
1537
1538 /* special case for "." */
1539 if (filp->f_pos == 0) {
1540 over = filldir(dirent, ".", 1,
1541 1, inode->i_ino,
1542 DT_DIR);
1543 if (over)
1544 return 0;
1545 filp->f_pos = 1;
1546 }
1547
1548 mutex_lock(&root->fs_info->fs_mutex);
1549 key.objectid = inode->i_ino;
1550 path = btrfs_alloc_path();
1551 path->reada = 2;
1552
1553 /* special case for .., just use the back ref */
1554 if (filp->f_pos == 1) {
1555 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1556 key.offset = 0;
1557 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1558 BUG_ON(ret == 0);
1559 leaf = path->nodes[0];
1560 slot = path->slots[0];
1561 nritems = btrfs_header_nritems(leaf);
1562 if (slot >= nritems) {
1563 btrfs_release_path(root, path);
1564 goto read_dir_items;
1565 }
1566 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1567 btrfs_release_path(root, path);
1568 if (found_key.objectid != key.objectid ||
1569 found_key.type != BTRFS_INODE_REF_KEY)
1570 goto read_dir_items;
1571 over = filldir(dirent, "..", 2,
1572 2, found_key.offset, DT_DIR);
1573 if (over)
1574 goto nopos;
1575 filp->f_pos = 2;
1576 }
1577
1578 read_dir_items:
1579 btrfs_set_key_type(&key, key_type);
1580 key.offset = filp->f_pos;
1581
1582 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1583 if (ret < 0)
1584 goto err;
1585 advance = 0;
1586 while(1) {
1587 leaf = path->nodes[0];
1588 nritems = btrfs_header_nritems(leaf);
1589 slot = path->slots[0];
1590 if (advance || slot >= nritems) {
1591 if (slot >= nritems -1) {
1592 ret = btrfs_next_leaf(root, path);
1593 if (ret)
1594 break;
1595 leaf = path->nodes[0];
1596 nritems = btrfs_header_nritems(leaf);
1597 slot = path->slots[0];
1598 } else {
1599 slot++;
1600 path->slots[0]++;
1601 }
1602 }
1603 advance = 1;
1604 item = btrfs_item_nr(leaf, slot);
1605 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1606
1607 if (found_key.objectid != key.objectid)
1608 break;
1609 if (btrfs_key_type(&found_key) != key_type)
1610 break;
1611 if (found_key.offset < filp->f_pos)
1612 continue;
1613
1614 filp->f_pos = found_key.offset;
1615 advance = 1;
1616 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
1617 di_cur = 0;
1618 di_total = btrfs_item_size(leaf, item);
1619 while(di_cur < di_total) {
1620 struct btrfs_key location;
1621
1622 name_len = btrfs_dir_name_len(leaf, di);
1623 if (name_len < 32) {
1624 name_ptr = tmp_name;
1625 } else {
1626 name_ptr = kmalloc(name_len, GFP_NOFS);
1627 BUG_ON(!name_ptr);
1628 }
1629 read_extent_buffer(leaf, name_ptr,
1630 (unsigned long)(di + 1), name_len);
1631
1632 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
1633 btrfs_dir_item_key_to_cpu(leaf, di, &location);
1634 over = filldir(dirent, name_ptr, name_len,
1635 found_key.offset,
1636 location.objectid,
1637 d_type);
1638
1639 if (name_ptr != tmp_name)
1640 kfree(name_ptr);
1641
1642 if (over)
1643 goto nopos;
1644 di_len = btrfs_dir_name_len(leaf, di) +
1645 btrfs_dir_data_len(leaf, di) +sizeof(*di);
1646 di_cur += di_len;
1647 di = (struct btrfs_dir_item *)((char *)di + di_len);
1648 }
1649 }
1650 if (key_type == BTRFS_DIR_INDEX_KEY)
1651 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
1652 else
1653 filp->f_pos++;
1654 nopos:
1655 ret = 0;
1656 err:
1657 btrfs_release_path(root, path);
1658 btrfs_free_path(path);
1659 mutex_unlock(&root->fs_info->fs_mutex);
1660 return ret;
1661 }
1662
1663 int btrfs_write_inode(struct inode *inode, int wait)
1664 {
1665 struct btrfs_root *root = BTRFS_I(inode)->root;
1666 struct btrfs_trans_handle *trans;
1667 int ret = 0;
1668
1669 if (wait) {
1670 mutex_lock(&root->fs_info->fs_mutex);
1671 trans = btrfs_start_transaction(root, 1);
1672 btrfs_set_trans_block_group(trans, inode);
1673 ret = btrfs_commit_transaction(trans, root);
1674 mutex_unlock(&root->fs_info->fs_mutex);
1675 }
1676 return ret;
1677 }
1678
1679 /*
1680 * This is somewhat expensive, updating the tree every time the
1681 * inode changes. But, it is most likely to find the inode in cache.
1682 * FIXME, needs more benchmarking...there are no reasons other than performance
1683 * to keep or drop this code.
1684 */
1685 void btrfs_dirty_inode(struct inode *inode)
1686 {
1687 struct btrfs_root *root = BTRFS_I(inode)->root;
1688 struct btrfs_trans_handle *trans;
1689
1690 mutex_lock(&root->fs_info->fs_mutex);
1691 trans = btrfs_start_transaction(root, 1);
1692 btrfs_set_trans_block_group(trans, inode);
1693 btrfs_update_inode(trans, root, inode);
1694 btrfs_end_transaction(trans, root);
1695 mutex_unlock(&root->fs_info->fs_mutex);
1696 }
1697
1698 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
1699 struct btrfs_root *root,
1700 const char *name, int name_len,
1701 u64 ref_objectid,
1702 u64 objectid,
1703 struct btrfs_block_group_cache *group,
1704 int mode)
1705 {
1706 struct inode *inode;
1707 struct btrfs_inode_item *inode_item;
1708 struct btrfs_block_group_cache *new_inode_group;
1709 struct btrfs_key *location;
1710 struct btrfs_path *path;
1711 struct btrfs_inode_ref *ref;
1712 struct btrfs_key key[2];
1713 u32 sizes[2];
1714 unsigned long ptr;
1715 int ret;
1716 int owner;
1717
1718 path = btrfs_alloc_path();
1719 BUG_ON(!path);
1720
1721 inode = new_inode(root->fs_info->sb);
1722 if (!inode)
1723 return ERR_PTR(-ENOMEM);
1724
1725 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1726 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1727 inode->i_mapping, GFP_NOFS);
1728 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1729 inode->i_mapping, GFP_NOFS);
1730 BTRFS_I(inode)->delalloc_bytes = 0;
1731 BTRFS_I(inode)->root = root;
1732
1733 if (mode & S_IFDIR)
1734 owner = 0;
1735 else
1736 owner = 1;
1737 new_inode_group = btrfs_find_block_group(root, group, 0,
1738 BTRFS_BLOCK_GROUP_METADATA, owner);
1739 if (!new_inode_group) {
1740 printk("find_block group failed\n");
1741 new_inode_group = group;
1742 }
1743 BTRFS_I(inode)->block_group = new_inode_group;
1744 BTRFS_I(inode)->flags = 0;
1745
1746 key[0].objectid = objectid;
1747 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
1748 key[0].offset = 0;
1749
1750 key[1].objectid = objectid;
1751 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
1752 key[1].offset = ref_objectid;
1753
1754 sizes[0] = sizeof(struct btrfs_inode_item);
1755 sizes[1] = name_len + sizeof(*ref);
1756
1757 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
1758 if (ret != 0)
1759 goto fail;
1760
1761 if (objectid > root->highest_inode)
1762 root->highest_inode = objectid;
1763
1764 inode->i_uid = current->fsuid;
1765 inode->i_gid = current->fsgid;
1766 inode->i_mode = mode;
1767 inode->i_ino = objectid;
1768 inode->i_blocks = 0;
1769 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1770 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1771 struct btrfs_inode_item);
1772 fill_inode_item(path->nodes[0], inode_item, inode);
1773
1774 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
1775 struct btrfs_inode_ref);
1776 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
1777 ptr = (unsigned long)(ref + 1);
1778 write_extent_buffer(path->nodes[0], name, ptr, name_len);
1779
1780 btrfs_mark_buffer_dirty(path->nodes[0]);
1781 btrfs_free_path(path);
1782
1783 location = &BTRFS_I(inode)->location;
1784 location->objectid = objectid;
1785 location->offset = 0;
1786 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1787
1788 insert_inode_hash(inode);
1789 return inode;
1790 fail:
1791 btrfs_free_path(path);
1792 return ERR_PTR(ret);
1793 }
1794
1795 static inline u8 btrfs_inode_type(struct inode *inode)
1796 {
1797 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
1798 }
1799
1800 static int btrfs_add_link(struct btrfs_trans_handle *trans,
1801 struct dentry *dentry, struct inode *inode,
1802 int add_backref)
1803 {
1804 int ret;
1805 struct btrfs_key key;
1806 struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
1807 struct inode *parent_inode;
1808
1809 key.objectid = inode->i_ino;
1810 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1811 key.offset = 0;
1812
1813 ret = btrfs_insert_dir_item(trans, root,
1814 dentry->d_name.name, dentry->d_name.len,
1815 dentry->d_parent->d_inode->i_ino,
1816 &key, btrfs_inode_type(inode));
1817 if (ret == 0) {
1818 if (add_backref) {
1819 ret = btrfs_insert_inode_ref(trans, root,
1820 dentry->d_name.name,
1821 dentry->d_name.len,
1822 inode->i_ino,
1823 dentry->d_parent->d_inode->i_ino);
1824 }
1825 parent_inode = dentry->d_parent->d_inode;
1826 parent_inode->i_size += dentry->d_name.len * 2;
1827 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1828 ret = btrfs_update_inode(trans, root,
1829 dentry->d_parent->d_inode);
1830 }
1831 return ret;
1832 }
1833
1834 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
1835 struct dentry *dentry, struct inode *inode,
1836 int backref)
1837 {
1838 int err = btrfs_add_link(trans, dentry, inode, backref);
1839 if (!err) {
1840 d_instantiate(dentry, inode);
1841 return 0;
1842 }
1843 if (err > 0)
1844 err = -EEXIST;
1845 return err;
1846 }
1847
1848 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
1849 int mode, dev_t rdev)
1850 {
1851 struct btrfs_trans_handle *trans;
1852 struct btrfs_root *root = BTRFS_I(dir)->root;
1853 struct inode *inode = NULL;
1854 int err;
1855 int drop_inode = 0;
1856 u64 objectid;
1857 unsigned long nr = 0;
1858
1859 if (!new_valid_dev(rdev))
1860 return -EINVAL;
1861
1862 mutex_lock(&root->fs_info->fs_mutex);
1863 err = btrfs_check_free_space(root, 1, 0);
1864 if (err)
1865 goto fail;
1866
1867 trans = btrfs_start_transaction(root, 1);
1868 btrfs_set_trans_block_group(trans, dir);
1869
1870 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1871 if (err) {
1872 err = -ENOSPC;
1873 goto out_unlock;
1874 }
1875
1876 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1877 dentry->d_name.len,
1878 dentry->d_parent->d_inode->i_ino, objectid,
1879 BTRFS_I(dir)->block_group, mode);
1880 err = PTR_ERR(inode);
1881 if (IS_ERR(inode))
1882 goto out_unlock;
1883
1884 btrfs_set_trans_block_group(trans, inode);
1885 err = btrfs_add_nondir(trans, dentry, inode, 0);
1886 if (err)
1887 drop_inode = 1;
1888 else {
1889 inode->i_op = &btrfs_special_inode_operations;
1890 init_special_inode(inode, inode->i_mode, rdev);
1891 btrfs_update_inode(trans, root, inode);
1892 }
1893 dir->i_sb->s_dirt = 1;
1894 btrfs_update_inode_block_group(trans, inode);
1895 btrfs_update_inode_block_group(trans, dir);
1896 out_unlock:
1897 nr = trans->blocks_used;
1898 btrfs_end_transaction(trans, root);
1899 fail:
1900 mutex_unlock(&root->fs_info->fs_mutex);
1901
1902 if (drop_inode) {
1903 inode_dec_link_count(inode);
1904 iput(inode);
1905 }
1906 btrfs_btree_balance_dirty(root, nr);
1907 btrfs_throttle(root);
1908 return err;
1909 }
1910
1911 static int btrfs_create(struct inode *dir, struct dentry *dentry,
1912 int mode, struct nameidata *nd)
1913 {
1914 struct btrfs_trans_handle *trans;
1915 struct btrfs_root *root = BTRFS_I(dir)->root;
1916 struct inode *inode = NULL;
1917 int err;
1918 int drop_inode = 0;
1919 unsigned long nr = 0;
1920 u64 objectid;
1921
1922 mutex_lock(&root->fs_info->fs_mutex);
1923 err = btrfs_check_free_space(root, 1, 0);
1924 if (err)
1925 goto fail;
1926 trans = btrfs_start_transaction(root, 1);
1927 btrfs_set_trans_block_group(trans, dir);
1928
1929 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1930 if (err) {
1931 err = -ENOSPC;
1932 goto out_unlock;
1933 }
1934
1935 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1936 dentry->d_name.len,
1937 dentry->d_parent->d_inode->i_ino,
1938 objectid, BTRFS_I(dir)->block_group, mode);
1939 err = PTR_ERR(inode);
1940 if (IS_ERR(inode))
1941 goto out_unlock;
1942
1943 btrfs_set_trans_block_group(trans, inode);
1944 err = btrfs_add_nondir(trans, dentry, inode, 0);
1945 if (err)
1946 drop_inode = 1;
1947 else {
1948 inode->i_mapping->a_ops = &btrfs_aops;
1949 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1950 inode->i_fop = &btrfs_file_operations;
1951 inode->i_op = &btrfs_file_inode_operations;
1952 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1953 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1954 inode->i_mapping, GFP_NOFS);
1955 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1956 inode->i_mapping, GFP_NOFS);
1957 BTRFS_I(inode)->delalloc_bytes = 0;
1958 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1959 }
1960 dir->i_sb->s_dirt = 1;
1961 btrfs_update_inode_block_group(trans, inode);
1962 btrfs_update_inode_block_group(trans, dir);
1963 out_unlock:
1964 nr = trans->blocks_used;
1965 btrfs_end_transaction(trans, root);
1966 fail:
1967 mutex_unlock(&root->fs_info->fs_mutex);
1968
1969 if (drop_inode) {
1970 inode_dec_link_count(inode);
1971 iput(inode);
1972 }
1973 btrfs_btree_balance_dirty(root, nr);
1974 btrfs_throttle(root);
1975 return err;
1976 }
1977
1978 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
1979 struct dentry *dentry)
1980 {
1981 struct btrfs_trans_handle *trans;
1982 struct btrfs_root *root = BTRFS_I(dir)->root;
1983 struct inode *inode = old_dentry->d_inode;
1984 unsigned long nr = 0;
1985 int err;
1986 int drop_inode = 0;
1987
1988 if (inode->i_nlink == 0)
1989 return -ENOENT;
1990
1991 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1992 inode->i_nlink++;
1993 #else
1994 inc_nlink(inode);
1995 #endif
1996 mutex_lock(&root->fs_info->fs_mutex);
1997 err = btrfs_check_free_space(root, 1, 0);
1998 if (err)
1999 goto fail;
2000 trans = btrfs_start_transaction(root, 1);
2001
2002 btrfs_set_trans_block_group(trans, dir);
2003 atomic_inc(&inode->i_count);
2004 err = btrfs_add_nondir(trans, dentry, inode, 1);
2005
2006 if (err)
2007 drop_inode = 1;
2008
2009 dir->i_sb->s_dirt = 1;
2010 btrfs_update_inode_block_group(trans, dir);
2011 err = btrfs_update_inode(trans, root, inode);
2012
2013 if (err)
2014 drop_inode = 1;
2015
2016 nr = trans->blocks_used;
2017 btrfs_end_transaction(trans, root);
2018 fail:
2019 mutex_unlock(&root->fs_info->fs_mutex);
2020
2021 if (drop_inode) {
2022 inode_dec_link_count(inode);
2023 iput(inode);
2024 }
2025 btrfs_btree_balance_dirty(root, nr);
2026 btrfs_throttle(root);
2027 return err;
2028 }
2029
2030 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2031 {
2032 struct inode *inode;
2033 struct btrfs_trans_handle *trans;
2034 struct btrfs_root *root = BTRFS_I(dir)->root;
2035 int err = 0;
2036 int drop_on_err = 0;
2037 u64 objectid;
2038 unsigned long nr = 1;
2039
2040 mutex_lock(&root->fs_info->fs_mutex);
2041 err = btrfs_check_free_space(root, 1, 0);
2042 if (err)
2043 goto out_unlock;
2044
2045 trans = btrfs_start_transaction(root, 1);
2046 btrfs_set_trans_block_group(trans, dir);
2047
2048 if (IS_ERR(trans)) {
2049 err = PTR_ERR(trans);
2050 goto out_unlock;
2051 }
2052
2053 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
2054 if (err) {
2055 err = -ENOSPC;
2056 goto out_unlock;
2057 }
2058
2059 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
2060 dentry->d_name.len,
2061 dentry->d_parent->d_inode->i_ino, objectid,
2062 BTRFS_I(dir)->block_group, S_IFDIR | mode);
2063 if (IS_ERR(inode)) {
2064 err = PTR_ERR(inode);
2065 goto out_fail;
2066 }
2067
2068 drop_on_err = 1;
2069 inode->i_op = &btrfs_dir_inode_operations;
2070 inode->i_fop = &btrfs_dir_file_operations;
2071 btrfs_set_trans_block_group(trans, inode);
2072
2073 inode->i_size = 0;
2074 err = btrfs_update_inode(trans, root, inode);
2075 if (err)
2076 goto out_fail;
2077
2078 err = btrfs_add_link(trans, dentry, inode, 0);
2079 if (err)
2080 goto out_fail;
2081
2082 d_instantiate(dentry, inode);
2083 drop_on_err = 0;
2084 dir->i_sb->s_dirt = 1;
2085 btrfs_update_inode_block_group(trans, inode);
2086 btrfs_update_inode_block_group(trans, dir);
2087
2088 out_fail:
2089 nr = trans->blocks_used;
2090 btrfs_end_transaction(trans, root);
2091
2092 out_unlock:
2093 mutex_unlock(&root->fs_info->fs_mutex);
2094 if (drop_on_err)
2095 iput(inode);
2096 btrfs_btree_balance_dirty(root, nr);
2097 btrfs_throttle(root);
2098 return err;
2099 }
2100
2101 static int merge_extent_mapping(struct extent_map_tree *em_tree,
2102 struct extent_map *existing,
2103 struct extent_map *em)
2104 {
2105 u64 start_diff;
2106 u64 new_end;
2107 int ret = 0;
2108 int real_blocks = existing->block_start < EXTENT_MAP_LAST_BYTE;
2109
2110 if (real_blocks && em->block_start >= EXTENT_MAP_LAST_BYTE)
2111 goto invalid;
2112
2113 if (!real_blocks && em->block_start != existing->block_start)
2114 goto invalid;
2115
2116 new_end = max(existing->start + existing->len, em->start + em->len);
2117
2118 if (existing->start >= em->start) {
2119 if (em->start + em->len < existing->start)
2120 goto invalid;
2121
2122 start_diff = existing->start - em->start;
2123 if (real_blocks && em->block_start + start_diff !=
2124 existing->block_start)
2125 goto invalid;
2126
2127 em->len = new_end - em->start;
2128
2129 remove_extent_mapping(em_tree, existing);
2130 /* free for the tree */
2131 free_extent_map(existing);
2132 ret = add_extent_mapping(em_tree, em);
2133
2134 } else if (em->start > existing->start) {
2135
2136 if (existing->start + existing->len < em->start)
2137 goto invalid;
2138
2139 start_diff = em->start - existing->start;
2140 if (real_blocks && existing->block_start + start_diff !=
2141 em->block_start)
2142 goto invalid;
2143
2144 remove_extent_mapping(em_tree, existing);
2145 em->block_start = existing->block_start;
2146 em->start = existing->start;
2147 em->len = new_end - existing->start;
2148 free_extent_map(existing);
2149
2150 ret = add_extent_mapping(em_tree, em);
2151 } else {
2152 goto invalid;
2153 }
2154 return ret;
2155
2156 invalid:
2157 printk("invalid extent map merge [%Lu %Lu %Lu] [%Lu %Lu %Lu]\n",
2158 existing->start, existing->len, existing->block_start,
2159 em->start, em->len, em->block_start);
2160 return -EIO;
2161 }
2162
2163 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
2164 size_t pg_offset, u64 start, u64 len,
2165 int create)
2166 {
2167 int ret;
2168 int err = 0;
2169 u64 bytenr;
2170 u64 extent_start = 0;
2171 u64 extent_end = 0;
2172 u64 objectid = inode->i_ino;
2173 u32 found_type;
2174 struct btrfs_path *path;
2175 struct btrfs_root *root = BTRFS_I(inode)->root;
2176 struct btrfs_file_extent_item *item;
2177 struct extent_buffer *leaf;
2178 struct btrfs_key found_key;
2179 struct extent_map *em = NULL;
2180 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2181 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2182 struct btrfs_trans_handle *trans = NULL;
2183
2184 path = btrfs_alloc_path();
2185 BUG_ON(!path);
2186 mutex_lock(&root->fs_info->fs_mutex);
2187
2188 again:
2189 spin_lock(&em_tree->lock);
2190 em = lookup_extent_mapping(em_tree, start, len);
2191 spin_unlock(&em_tree->lock);
2192
2193 if (em) {
2194 if (em->start > start) {
2195 printk("get_extent lookup [%Lu %Lu] em [%Lu %Lu]\n",
2196 start, len, em->start, em->len);
2197 WARN_ON(1);
2198 }
2199 if (em->block_start == EXTENT_MAP_INLINE && page)
2200 free_extent_map(em);
2201 else
2202 goto out;
2203 }
2204 em = alloc_extent_map(GFP_NOFS);
2205 if (!em) {
2206 err = -ENOMEM;
2207 goto out;
2208 }
2209
2210 em->start = EXTENT_MAP_HOLE;
2211 em->len = (u64)-1;
2212 em->bdev = inode->i_sb->s_bdev;
2213 ret = btrfs_lookup_file_extent(trans, root, path,
2214 objectid, start, trans != NULL);
2215 if (ret < 0) {
2216 err = ret;
2217 goto out;
2218 }
2219
2220 if (ret != 0) {
2221 if (path->slots[0] == 0)
2222 goto not_found;
2223 path->slots[0]--;
2224 }
2225
2226 leaf = path->nodes[0];
2227 item = btrfs_item_ptr(leaf, path->slots[0],
2228 struct btrfs_file_extent_item);
2229 /* are we inside the extent that was found? */
2230 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2231 found_type = btrfs_key_type(&found_key);
2232 if (found_key.objectid != objectid ||
2233 found_type != BTRFS_EXTENT_DATA_KEY) {
2234 goto not_found;
2235 }
2236
2237 found_type = btrfs_file_extent_type(leaf, item);
2238 extent_start = found_key.offset;
2239 if (found_type == BTRFS_FILE_EXTENT_REG) {
2240 extent_end = extent_start +
2241 btrfs_file_extent_num_bytes(leaf, item);
2242 err = 0;
2243 if (start < extent_start || start >= extent_end) {
2244 em->start = start;
2245 if (start < extent_start) {
2246 if (start + len <= extent_start)
2247 goto not_found;
2248 em->len = extent_end - extent_start;
2249 } else {
2250 em->len = len;
2251 }
2252 goto not_found_em;
2253 }
2254 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
2255 if (bytenr == 0) {
2256 em->start = extent_start;
2257 em->len = extent_end - extent_start;
2258 em->block_start = EXTENT_MAP_HOLE;
2259 goto insert;
2260 }
2261 bytenr += btrfs_file_extent_offset(leaf, item);
2262 em->block_start = bytenr;
2263 em->start = extent_start;
2264 em->len = extent_end - extent_start;
2265 goto insert;
2266 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
2267 u64 page_start;
2268 unsigned long ptr;
2269 char *map;
2270 size_t size;
2271 size_t extent_offset;
2272 size_t copy_size;
2273
2274 size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf,
2275 path->slots[0]));
2276 extent_end = (extent_start + size + root->sectorsize - 1) &
2277 ~((u64)root->sectorsize - 1);
2278 if (start < extent_start || start >= extent_end) {
2279 em->start = start;
2280 if (start < extent_start) {
2281 if (start + len <= extent_start)
2282 goto not_found;
2283 em->len = extent_end - extent_start;
2284 } else {
2285 em->len = len;
2286 }
2287 goto not_found_em;
2288 }
2289 em->block_start = EXTENT_MAP_INLINE;
2290
2291 if (!page) {
2292 em->start = extent_start;
2293 em->len = size;
2294 goto out;
2295 }
2296
2297 page_start = page_offset(page) + pg_offset;
2298 extent_offset = page_start - extent_start;
2299 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
2300 size - extent_offset);
2301 em->start = extent_start + extent_offset;
2302 em->len = (copy_size + root->sectorsize - 1) &
2303 ~((u64)root->sectorsize - 1);
2304 map = kmap(page);
2305 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
2306 if (create == 0 && !PageUptodate(page)) {
2307 read_extent_buffer(leaf, map + pg_offset, ptr,
2308 copy_size);
2309 flush_dcache_page(page);
2310 } else if (create && PageUptodate(page)) {
2311 if (!trans) {
2312 kunmap(page);
2313 free_extent_map(em);
2314 em = NULL;
2315 btrfs_release_path(root, path);
2316 trans = btrfs_start_transaction(root, 1);
2317 goto again;
2318 }
2319 write_extent_buffer(leaf, map + pg_offset, ptr,
2320 copy_size);
2321 btrfs_mark_buffer_dirty(leaf);
2322 }
2323 kunmap(page);
2324 set_extent_uptodate(io_tree, em->start,
2325 extent_map_end(em) - 1, GFP_NOFS);
2326 goto insert;
2327 } else {
2328 printk("unkknown found_type %d\n", found_type);
2329 WARN_ON(1);
2330 }
2331 not_found:
2332 em->start = start;
2333 em->len = len;
2334 not_found_em:
2335 em->block_start = EXTENT_MAP_HOLE;
2336 insert:
2337 btrfs_release_path(root, path);
2338 if (em->start > start || extent_map_end(em) <= start) {
2339 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
2340 err = -EIO;
2341 goto out;
2342 }
2343
2344 err = 0;
2345 spin_lock(&em_tree->lock);
2346 ret = add_extent_mapping(em_tree, em);
2347
2348 /* it is possible that someone inserted the extent into the tree
2349 * while we had the lock dropped. It is also possible that
2350 * an overlapping map exists in the tree
2351 */
2352 if (ret == -EEXIST) {
2353 struct extent_map *existing;
2354 existing = lookup_extent_mapping(em_tree, start, len);
2355 if (!existing) {
2356 existing = lookup_extent_mapping(em_tree, em->start,
2357 em->len);
2358 if (existing) {
2359 err = merge_extent_mapping(em_tree, existing,
2360 em);
2361 free_extent_map(existing);
2362 if (err) {
2363 free_extent_map(em);
2364 em = NULL;
2365 }
2366 } else {
2367 err = -EIO;
2368 printk("failing to insert %Lu %Lu\n",
2369 start, len);
2370 free_extent_map(em);
2371 em = NULL;
2372 }
2373 } else {
2374 free_extent_map(em);
2375 em = existing;
2376 }
2377 }
2378 spin_unlock(&em_tree->lock);
2379 out:
2380 btrfs_free_path(path);
2381 if (trans) {
2382 ret = btrfs_end_transaction(trans, root);
2383 if (!err)
2384 err = ret;
2385 }
2386 mutex_unlock(&root->fs_info->fs_mutex);
2387 if (err) {
2388 free_extent_map(em);
2389 WARN_ON(1);
2390 return ERR_PTR(err);
2391 }
2392 return em;
2393 }
2394
2395 static int btrfs_get_block(struct inode *inode, sector_t iblock,
2396 struct buffer_head *bh_result, int create)
2397 {
2398 struct extent_map *em;
2399 u64 start = (u64)iblock << inode->i_blkbits;
2400 struct btrfs_multi_bio *multi = NULL;
2401 struct btrfs_root *root = BTRFS_I(inode)->root;
2402 u64 len;
2403 u64 logical;
2404 u64 map_length;
2405 int ret = 0;
2406
2407 em = btrfs_get_extent(inode, NULL, 0, start, bh_result->b_size, 0);
2408
2409 if (!em || IS_ERR(em))
2410 goto out;
2411
2412 if (em->start > start || em->start + em->len <= start)
2413 goto out;
2414
2415 if (em->block_start == EXTENT_MAP_INLINE) {
2416 ret = -EINVAL;
2417 goto out;
2418 }
2419
2420 if (em->block_start == EXTENT_MAP_HOLE ||
2421 em->block_start == EXTENT_MAP_DELALLOC) {
2422 goto out;
2423 }
2424
2425 len = em->start + em->len - start;
2426 len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size)));
2427
2428 logical = start - em->start;
2429 logical = em->block_start + logical;
2430
2431 map_length = len;
2432 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
2433 logical, &map_length, &multi, 0);
2434 BUG_ON(ret);
2435 bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits;
2436 bh_result->b_size = min(map_length, len);
2437 bh_result->b_bdev = multi->stripes[0].dev->bdev;
2438 set_buffer_mapped(bh_result);
2439 kfree(multi);
2440 out:
2441 free_extent_map(em);
2442 return ret;
2443 }
2444
2445 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
2446 const struct iovec *iov, loff_t offset,
2447 unsigned long nr_segs)
2448 {
2449 struct file *file = iocb->ki_filp;
2450 struct inode *inode = file->f_mapping->host;
2451
2452 if (rw == WRITE)
2453 return -EINVAL;
2454
2455 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
2456 offset, nr_segs, btrfs_get_block, NULL);
2457 }
2458
2459 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
2460 {
2461 return extent_bmap(mapping, iblock, btrfs_get_extent);
2462 }
2463
2464 int btrfs_readpage(struct file *file, struct page *page)
2465 {
2466 struct extent_io_tree *tree;
2467 tree = &BTRFS_I(page->mapping->host)->io_tree;
2468 return extent_read_full_page(tree, page, btrfs_get_extent);
2469 }
2470
2471 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
2472 {
2473 struct extent_io_tree *tree;
2474
2475
2476 if (current->flags & PF_MEMALLOC) {
2477 redirty_page_for_writepage(wbc, page);
2478 unlock_page(page);
2479 return 0;
2480 }
2481 tree = &BTRFS_I(page->mapping->host)->io_tree;
2482 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
2483 }
2484
2485 static int btrfs_writepages(struct address_space *mapping,
2486 struct writeback_control *wbc)
2487 {
2488 struct extent_io_tree *tree;
2489 tree = &BTRFS_I(mapping->host)->io_tree;
2490 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
2491 }
2492
2493 static int
2494 btrfs_readpages(struct file *file, struct address_space *mapping,
2495 struct list_head *pages, unsigned nr_pages)
2496 {
2497 struct extent_io_tree *tree;
2498 tree = &BTRFS_I(mapping->host)->io_tree;
2499 return extent_readpages(tree, mapping, pages, nr_pages,
2500 btrfs_get_extent);
2501 }
2502
2503 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
2504 {
2505 struct extent_io_tree *tree;
2506 struct extent_map_tree *map;
2507 int ret;
2508
2509 tree = &BTRFS_I(page->mapping->host)->io_tree;
2510 map = &BTRFS_I(page->mapping->host)->extent_tree;
2511 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
2512 if (ret == 1) {
2513 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
2514 ClearPagePrivate(page);
2515 set_page_private(page, 0);
2516 page_cache_release(page);
2517 }
2518 return ret;
2519 }
2520
2521 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
2522 {
2523 struct extent_io_tree *tree;
2524
2525 tree = &BTRFS_I(page->mapping->host)->io_tree;
2526 extent_invalidatepage(tree, page, offset);
2527 btrfs_releasepage(page, GFP_NOFS);
2528 if (PagePrivate(page)) {
2529 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
2530 ClearPagePrivate(page);
2531 set_page_private(page, 0);
2532 page_cache_release(page);
2533 }
2534 }
2535
2536 /*
2537 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
2538 * called from a page fault handler when a page is first dirtied. Hence we must
2539 * be careful to check for EOF conditions here. We set the page up correctly
2540 * for a written page which means we get ENOSPC checking when writing into
2541 * holes and correct delalloc and unwritten extent mapping on filesystems that
2542 * support these features.
2543 *
2544 * We are not allowed to take the i_mutex here so we have to play games to
2545 * protect against truncate races as the page could now be beyond EOF. Because
2546 * vmtruncate() writes the inode size before removing pages, once we have the
2547 * page lock we can determine safely if the page is beyond EOF. If it is not
2548 * beyond EOF, then the page is guaranteed safe against truncation until we
2549 * unlock the page.
2550 */
2551 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
2552 {
2553 struct inode *inode = fdentry(vma->vm_file)->d_inode;
2554 struct btrfs_root *root = BTRFS_I(inode)->root;
2555 unsigned long end;
2556 loff_t size;
2557 int ret;
2558 u64 page_start;
2559
2560 mutex_lock(&root->fs_info->fs_mutex);
2561 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
2562 mutex_unlock(&root->fs_info->fs_mutex);
2563 if (ret)
2564 goto out;
2565
2566 ret = -EINVAL;
2567
2568 lock_page(page);
2569 wait_on_page_writeback(page);
2570 size = i_size_read(inode);
2571 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2572
2573 if ((page->mapping != inode->i_mapping) ||
2574 (page_start > size)) {
2575 /* page got truncated out from underneath us */
2576 goto out_unlock;
2577 }
2578
2579 /* page is wholly or partially inside EOF */
2580 if (page_start + PAGE_CACHE_SIZE > size)
2581 end = size & ~PAGE_CACHE_MASK;
2582 else
2583 end = PAGE_CACHE_SIZE;
2584
2585 ret = btrfs_cow_one_page(inode, page, end);
2586
2587 out_unlock:
2588 unlock_page(page);
2589 out:
2590 return ret;
2591 }
2592
2593 static void btrfs_truncate(struct inode *inode)
2594 {
2595 struct btrfs_root *root = BTRFS_I(inode)->root;
2596 int ret;
2597 struct btrfs_trans_handle *trans;
2598 unsigned long nr;
2599
2600 if (!S_ISREG(inode->i_mode))
2601 return;
2602 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2603 return;
2604
2605 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2606
2607 mutex_lock(&root->fs_info->fs_mutex);
2608 trans = btrfs_start_transaction(root, 1);
2609 btrfs_set_trans_block_group(trans, inode);
2610
2611 /* FIXME, add redo link to tree so we don't leak on crash */
2612 ret = btrfs_truncate_in_trans(trans, root, inode,
2613 BTRFS_EXTENT_DATA_KEY);
2614 btrfs_update_inode(trans, root, inode);
2615 nr = trans->blocks_used;
2616
2617 ret = btrfs_end_transaction(trans, root);
2618 BUG_ON(ret);
2619 mutex_unlock(&root->fs_info->fs_mutex);
2620 btrfs_btree_balance_dirty(root, nr);
2621 btrfs_throttle(root);
2622 }
2623
2624 static int noinline create_subvol(struct btrfs_root *root, char *name,
2625 int namelen)
2626 {
2627 struct btrfs_trans_handle *trans;
2628 struct btrfs_key key;
2629 struct btrfs_root_item root_item;
2630 struct btrfs_inode_item *inode_item;
2631 struct extent_buffer *leaf;
2632 struct btrfs_root *new_root = root;
2633 struct inode *inode;
2634 struct inode *dir;
2635 int ret;
2636 int err;
2637 u64 objectid;
2638 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
2639 unsigned long nr = 1;
2640
2641 mutex_lock(&root->fs_info->fs_mutex);
2642 ret = btrfs_check_free_space(root, 1, 0);
2643 if (ret)
2644 goto fail_commit;
2645
2646 trans = btrfs_start_transaction(root, 1);
2647 BUG_ON(!trans);
2648
2649 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2650 0, &objectid);
2651 if (ret)
2652 goto fail;
2653
2654 leaf = __btrfs_alloc_free_block(trans, root, root->leafsize,
2655 objectid, trans->transid, 0, 0,
2656 0, 0);
2657 if (IS_ERR(leaf))
2658 return PTR_ERR(leaf);
2659
2660 btrfs_set_header_nritems(leaf, 0);
2661 btrfs_set_header_level(leaf, 0);
2662 btrfs_set_header_bytenr(leaf, leaf->start);
2663 btrfs_set_header_generation(leaf, trans->transid);
2664 btrfs_set_header_owner(leaf, objectid);
2665
2666 write_extent_buffer(leaf, root->fs_info->fsid,
2667 (unsigned long)btrfs_header_fsid(leaf),
2668 BTRFS_FSID_SIZE);
2669 btrfs_mark_buffer_dirty(leaf);
2670
2671 inode_item = &root_item.inode;
2672 memset(inode_item, 0, sizeof(*inode_item));
2673 inode_item->generation = cpu_to_le64(1);
2674 inode_item->size = cpu_to_le64(3);
2675 inode_item->nlink = cpu_to_le32(1);
2676 inode_item->nblocks = cpu_to_le64(1);
2677 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
2678
2679 btrfs_set_root_bytenr(&root_item, leaf->start);
2680 btrfs_set_root_level(&root_item, 0);
2681 btrfs_set_root_refs(&root_item, 1);
2682 btrfs_set_root_used(&root_item, 0);
2683
2684 memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
2685 root_item.drop_level = 0;
2686
2687 free_extent_buffer(leaf);
2688 leaf = NULL;
2689
2690 btrfs_set_root_dirid(&root_item, new_dirid);
2691
2692 key.objectid = objectid;
2693 key.offset = 1;
2694 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2695 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2696 &root_item);
2697 if (ret)
2698 goto fail;
2699
2700 /*
2701 * insert the directory item
2702 */
2703 key.offset = (u64)-1;
2704 dir = root->fs_info->sb->s_root->d_inode;
2705 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2706 name, namelen, dir->i_ino, &key,
2707 BTRFS_FT_DIR);
2708 if (ret)
2709 goto fail;
2710
2711 ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
2712 name, namelen, objectid,
2713 root->fs_info->sb->s_root->d_inode->i_ino);
2714 if (ret)
2715 goto fail;
2716
2717 ret = btrfs_commit_transaction(trans, root);
2718 if (ret)
2719 goto fail_commit;
2720
2721 new_root = btrfs_read_fs_root(root->fs_info, &key, name, namelen);
2722 BUG_ON(!new_root);
2723
2724 trans = btrfs_start_transaction(new_root, 1);
2725 BUG_ON(!trans);
2726
2727 inode = btrfs_new_inode(trans, new_root, "..", 2, new_dirid,
2728 new_dirid,
2729 BTRFS_I(dir)->block_group, S_IFDIR | 0700);
2730 if (IS_ERR(inode))
2731 goto fail;
2732 inode->i_op = &btrfs_dir_inode_operations;
2733 inode->i_fop = &btrfs_dir_file_operations;
2734 new_root->inode = inode;
2735
2736 ret = btrfs_insert_inode_ref(trans, new_root, "..", 2, new_dirid,
2737 new_dirid);
2738 inode->i_nlink = 1;
2739 inode->i_size = 0;
2740 ret = btrfs_update_inode(trans, new_root, inode);
2741 if (ret)
2742 goto fail;
2743 fail:
2744 nr = trans->blocks_used;
2745 err = btrfs_commit_transaction(trans, new_root);
2746 if (err && !ret)
2747 ret = err;
2748 fail_commit:
2749 mutex_unlock(&root->fs_info->fs_mutex);
2750 btrfs_btree_balance_dirty(root, nr);
2751 btrfs_throttle(root);
2752 return ret;
2753 }
2754
2755 static int create_snapshot(struct btrfs_root *root, char *name, int namelen)
2756 {
2757 struct btrfs_pending_snapshot *pending_snapshot;
2758 struct btrfs_trans_handle *trans;
2759 int ret;
2760 int err;
2761 unsigned long nr = 0;
2762
2763 if (!root->ref_cows)
2764 return -EINVAL;
2765
2766 mutex_lock(&root->fs_info->fs_mutex);
2767 ret = btrfs_check_free_space(root, 1, 0);
2768 if (ret)
2769 goto fail_unlock;
2770
2771 pending_snapshot = kmalloc(sizeof(*pending_snapshot), GFP_NOFS);
2772 if (!pending_snapshot) {
2773 ret = -ENOMEM;
2774 goto fail_unlock;
2775 }
2776 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
2777 if (!pending_snapshot->name) {
2778 ret = -ENOMEM;
2779 kfree(pending_snapshot);
2780 goto fail_unlock;
2781 }
2782 memcpy(pending_snapshot->name, name, namelen);
2783 pending_snapshot->name[namelen] = '\0';
2784 trans = btrfs_start_transaction(root, 1);
2785 BUG_ON(!trans);
2786 pending_snapshot->root = root;
2787 list_add(&pending_snapshot->list,
2788 &trans->transaction->pending_snapshots);
2789 ret = btrfs_update_inode(trans, root, root->inode);
2790 err = btrfs_commit_transaction(trans, root);
2791
2792 fail_unlock:
2793 mutex_unlock(&root->fs_info->fs_mutex);
2794 btrfs_btree_balance_dirty(root, nr);
2795 btrfs_throttle(root);
2796 return ret;
2797 }
2798
2799 unsigned long btrfs_force_ra(struct address_space *mapping,
2800 struct file_ra_state *ra, struct file *file,
2801 pgoff_t offset, pgoff_t last_index)
2802 {
2803 pgoff_t req_size;
2804
2805 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2806 req_size = last_index - offset + 1;
2807 offset = page_cache_readahead(mapping, ra, file, offset, req_size);
2808 return offset;
2809 #else
2810 req_size = min(last_index - offset + 1, (pgoff_t)128);
2811 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
2812 return offset + req_size;
2813 #endif
2814 }
2815
2816 int btrfs_defrag_file(struct file *file) {
2817 struct inode *inode = fdentry(file)->d_inode;
2818 struct btrfs_root *root = BTRFS_I(inode)->root;
2819 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2820 struct page *page;
2821 unsigned long last_index;
2822 unsigned long ra_index = 0;
2823 u64 page_start;
2824 u64 page_end;
2825 unsigned long i;
2826 int ret;
2827
2828 mutex_lock(&root->fs_info->fs_mutex);
2829 ret = btrfs_check_free_space(root, inode->i_size, 0);
2830 mutex_unlock(&root->fs_info->fs_mutex);
2831 if (ret)
2832 return -ENOSPC;
2833
2834 mutex_lock(&inode->i_mutex);
2835 last_index = inode->i_size >> PAGE_CACHE_SHIFT;
2836 for (i = 0; i <= last_index; i++) {
2837 if (i == ra_index) {
2838 ra_index = btrfs_force_ra(inode->i_mapping,
2839 &file->f_ra,
2840 file, ra_index, last_index);
2841 }
2842 page = grab_cache_page(inode->i_mapping, i);
2843 if (!page)
2844 goto out_unlock;
2845 if (!PageUptodate(page)) {
2846 btrfs_readpage(NULL, page);
2847 lock_page(page);
2848 if (!PageUptodate(page)) {
2849 unlock_page(page);
2850 page_cache_release(page);
2851 goto out_unlock;
2852 }
2853 }
2854 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2855 page_end = page_start + PAGE_CACHE_SIZE - 1;
2856
2857 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2858 set_extent_delalloc(io_tree, page_start,
2859 page_end, GFP_NOFS);
2860
2861 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2862 set_page_dirty(page);
2863 unlock_page(page);
2864 page_cache_release(page);
2865 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
2866 }
2867
2868 out_unlock:
2869 mutex_unlock(&inode->i_mutex);
2870 return 0;
2871 }
2872
2873 static int btrfs_ioctl_resize(struct btrfs_root *root, void __user *arg)
2874 {
2875 u64 new_size;
2876 u64 old_size;
2877 struct btrfs_ioctl_vol_args *vol_args;
2878 struct btrfs_trans_handle *trans;
2879 char *sizestr;
2880 int ret = 0;
2881 int namelen;
2882 int mod = 0;
2883
2884 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
2885
2886 if (!vol_args)
2887 return -ENOMEM;
2888
2889 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
2890 ret = -EFAULT;
2891 goto out;
2892 }
2893 namelen = strlen(vol_args->name);
2894 if (namelen > BTRFS_VOL_NAME_MAX) {
2895 ret = -EINVAL;
2896 goto out;
2897 }
2898
2899 sizestr = vol_args->name;
2900 if (!strcmp(sizestr, "max"))
2901 new_size = root->fs_info->sb->s_bdev->bd_inode->i_size;
2902 else {
2903 if (sizestr[0] == '-') {
2904 mod = -1;
2905 sizestr++;
2906 } else if (sizestr[0] == '+') {
2907 mod = 1;
2908 sizestr++;
2909 }
2910 new_size = btrfs_parse_size(sizestr);
2911 if (new_size == 0) {
2912 ret = -EINVAL;
2913 goto out;
2914 }
2915 }
2916
2917 mutex_lock(&root->fs_info->fs_mutex);
2918 old_size = btrfs_super_total_bytes(&root->fs_info->super_copy);
2919
2920 if (mod < 0) {
2921 if (new_size > old_size) {
2922 ret = -EINVAL;
2923 goto out_unlock;
2924 }
2925 new_size = old_size - new_size;
2926 } else if (mod > 0) {
2927 new_size = old_size + new_size;
2928 }
2929
2930 if (new_size < 256 * 1024 * 1024) {
2931 ret = -EINVAL;
2932 goto out_unlock;
2933 }
2934 if (new_size > root->fs_info->sb->s_bdev->bd_inode->i_size) {
2935 ret = -EFBIG;
2936 goto out_unlock;
2937 }
2938
2939 do_div(new_size, root->sectorsize);
2940 new_size *= root->sectorsize;
2941
2942 printk("new size is %Lu\n", new_size);
2943 if (new_size > old_size) {
2944 trans = btrfs_start_transaction(root, 1);
2945 ret = btrfs_grow_extent_tree(trans, root, new_size);
2946 btrfs_commit_transaction(trans, root);
2947 } else {
2948 ret = btrfs_shrink_extent_tree(root, new_size);
2949 }
2950
2951 out_unlock:
2952 mutex_unlock(&root->fs_info->fs_mutex);
2953 out:
2954 kfree(vol_args);
2955 return ret;
2956 }
2957
2958 static int noinline btrfs_ioctl_snap_create(struct btrfs_root *root,
2959 void __user *arg)
2960 {
2961 struct btrfs_ioctl_vol_args *vol_args;
2962 struct btrfs_dir_item *di;
2963 struct btrfs_path *path;
2964 u64 root_dirid;
2965 int namelen;
2966 int ret;
2967
2968 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
2969
2970 if (!vol_args)
2971 return -ENOMEM;
2972
2973 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
2974 ret = -EFAULT;
2975 goto out;
2976 }
2977
2978 namelen = strlen(vol_args->name);
2979 if (namelen > BTRFS_VOL_NAME_MAX) {
2980 ret = -EINVAL;
2981 goto out;
2982 }
2983 if (strchr(vol_args->name, '/')) {
2984 ret = -EINVAL;
2985 goto out;
2986 }
2987
2988 path = btrfs_alloc_path();
2989 if (!path) {
2990 ret = -ENOMEM;
2991 goto out;
2992 }
2993
2994 root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
2995 mutex_lock(&root->fs_info->fs_mutex);
2996 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
2997 path, root_dirid,
2998 vol_args->name, namelen, 0);
2999 mutex_unlock(&root->fs_info->fs_mutex);
3000 btrfs_free_path(path);
3001
3002 if (di && !IS_ERR(di)) {
3003 ret = -EEXIST;
3004 goto out;
3005 }
3006
3007 if (IS_ERR(di)) {
3008 ret = PTR_ERR(di);
3009 goto out;
3010 }
3011
3012 if (root == root->fs_info->tree_root)
3013 ret = create_subvol(root, vol_args->name, namelen);
3014 else
3015 ret = create_snapshot(root, vol_args->name, namelen);
3016 out:
3017 kfree(vol_args);
3018 return ret;
3019 }
3020
3021 static int btrfs_ioctl_defrag(struct file *file)
3022 {
3023 struct inode *inode = fdentry(file)->d_inode;
3024 struct btrfs_root *root = BTRFS_I(inode)->root;
3025
3026 switch (inode->i_mode & S_IFMT) {
3027 case S_IFDIR:
3028 mutex_lock(&root->fs_info->fs_mutex);
3029 btrfs_defrag_root(root, 0);
3030 btrfs_defrag_root(root->fs_info->extent_root, 0);
3031 mutex_unlock(&root->fs_info->fs_mutex);
3032 break;
3033 case S_IFREG:
3034 btrfs_defrag_file(file);
3035 break;
3036 }
3037
3038 return 0;
3039 }
3040
3041 long btrfs_ioctl(struct file *file, unsigned int
3042 cmd, unsigned long arg)
3043 {
3044 struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
3045
3046 switch (cmd) {
3047 case BTRFS_IOC_SNAP_CREATE:
3048 return btrfs_ioctl_snap_create(root, (void __user *)arg);
3049 case BTRFS_IOC_DEFRAG:
3050 return btrfs_ioctl_defrag(file);
3051 case BTRFS_IOC_RESIZE:
3052 return btrfs_ioctl_resize(root, (void __user *)arg);
3053 }
3054
3055 return -ENOTTY;
3056 }
3057
3058 /*
3059 * Called inside transaction, so use GFP_NOFS
3060 */
3061 struct inode *btrfs_alloc_inode(struct super_block *sb)
3062 {
3063 struct btrfs_inode *ei;
3064
3065 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
3066 if (!ei)
3067 return NULL;
3068 ei->last_trans = 0;
3069 ei->ordered_trans = 0;
3070 return &ei->vfs_inode;
3071 }
3072
3073 void btrfs_destroy_inode(struct inode *inode)
3074 {
3075 WARN_ON(!list_empty(&inode->i_dentry));
3076 WARN_ON(inode->i_data.nrpages);
3077
3078 btrfs_drop_extent_cache(inode, 0, (u64)-1);
3079 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
3080 }
3081
3082 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
3083 static void init_once(struct kmem_cache * cachep, void *foo)
3084 #else
3085 static void init_once(void * foo, struct kmem_cache * cachep,
3086 unsigned long flags)
3087 #endif
3088 {
3089 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
3090
3091 inode_init_once(&ei->vfs_inode);
3092 }
3093
3094 void btrfs_destroy_cachep(void)
3095 {
3096 if (btrfs_inode_cachep)
3097 kmem_cache_destroy(btrfs_inode_cachep);
3098 if (btrfs_trans_handle_cachep)
3099 kmem_cache_destroy(btrfs_trans_handle_cachep);
3100 if (btrfs_transaction_cachep)
3101 kmem_cache_destroy(btrfs_transaction_cachep);
3102 if (btrfs_bit_radix_cachep)
3103 kmem_cache_destroy(btrfs_bit_radix_cachep);
3104 if (btrfs_path_cachep)
3105 kmem_cache_destroy(btrfs_path_cachep);
3106 }
3107
3108 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
3109 unsigned long extra_flags,
3110 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
3111 void (*ctor)(struct kmem_cache *, void *)
3112 #else
3113 void (*ctor)(void *, struct kmem_cache *,
3114 unsigned long)
3115 #endif
3116 )
3117 {
3118 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
3119 SLAB_MEM_SPREAD | extra_flags), ctor
3120 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
3121 ,NULL
3122 #endif
3123 );
3124 }
3125
3126 int btrfs_init_cachep(void)
3127 {
3128 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
3129 sizeof(struct btrfs_inode),
3130 0, init_once);
3131 if (!btrfs_inode_cachep)
3132 goto fail;
3133 btrfs_trans_handle_cachep =
3134 btrfs_cache_create("btrfs_trans_handle_cache",
3135 sizeof(struct btrfs_trans_handle),
3136 0, NULL);
3137 if (!btrfs_trans_handle_cachep)
3138 goto fail;
3139 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
3140 sizeof(struct btrfs_transaction),
3141 0, NULL);
3142 if (!btrfs_transaction_cachep)
3143 goto fail;
3144 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
3145 sizeof(struct btrfs_path),
3146 0, NULL);
3147 if (!btrfs_path_cachep)
3148 goto fail;
3149 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
3150 SLAB_DESTROY_BY_RCU, NULL);
3151 if (!btrfs_bit_radix_cachep)
3152 goto fail;
3153 return 0;
3154 fail:
3155 btrfs_destroy_cachep();
3156 return -ENOMEM;
3157 }
3158
3159 static int btrfs_getattr(struct vfsmount *mnt,
3160 struct dentry *dentry, struct kstat *stat)
3161 {
3162 struct inode *inode = dentry->d_inode;
3163 generic_fillattr(inode, stat);
3164 stat->blksize = PAGE_CACHE_SIZE;
3165 stat->blocks = inode->i_blocks + (BTRFS_I(inode)->delalloc_bytes >> 9);
3166 return 0;
3167 }
3168
3169 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
3170 struct inode * new_dir,struct dentry *new_dentry)
3171 {
3172 struct btrfs_trans_handle *trans;
3173 struct btrfs_root *root = BTRFS_I(old_dir)->root;
3174 struct inode *new_inode = new_dentry->d_inode;
3175 struct inode *old_inode = old_dentry->d_inode;
3176 struct timespec ctime = CURRENT_TIME;
3177 struct btrfs_path *path;
3178 int ret;
3179
3180 if (S_ISDIR(old_inode->i_mode) && new_inode &&
3181 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
3182 return -ENOTEMPTY;
3183 }
3184
3185 mutex_lock(&root->fs_info->fs_mutex);
3186 ret = btrfs_check_free_space(root, 1, 0);
3187 if (ret)
3188 goto out_unlock;
3189
3190 trans = btrfs_start_transaction(root, 1);
3191
3192 btrfs_set_trans_block_group(trans, new_dir);
3193 path = btrfs_alloc_path();
3194 if (!path) {
3195 ret = -ENOMEM;
3196 goto out_fail;
3197 }
3198
3199 old_dentry->d_inode->i_nlink++;
3200 old_dir->i_ctime = old_dir->i_mtime = ctime;
3201 new_dir->i_ctime = new_dir->i_mtime = ctime;
3202 old_inode->i_ctime = ctime;
3203
3204 ret = btrfs_unlink_trans(trans, root, old_dir, old_dentry);
3205 if (ret)
3206 goto out_fail;
3207
3208 if (new_inode) {
3209 new_inode->i_ctime = CURRENT_TIME;
3210 ret = btrfs_unlink_trans(trans, root, new_dir, new_dentry);
3211 if (ret)
3212 goto out_fail;
3213 }
3214 ret = btrfs_add_link(trans, new_dentry, old_inode, 1);
3215 if (ret)
3216 goto out_fail;
3217
3218 out_fail:
3219 btrfs_free_path(path);
3220 btrfs_end_transaction(trans, root);
3221 out_unlock:
3222 mutex_unlock(&root->fs_info->fs_mutex);
3223 return ret;
3224 }
3225
3226 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
3227 const char *symname)
3228 {
3229 struct btrfs_trans_handle *trans;
3230 struct btrfs_root *root = BTRFS_I(dir)->root;
3231 struct btrfs_path *path;
3232 struct btrfs_key key;
3233 struct inode *inode = NULL;
3234 int err;
3235 int drop_inode = 0;
3236 u64 objectid;
3237 int name_len;
3238 int datasize;
3239 unsigned long ptr;
3240 struct btrfs_file_extent_item *ei;
3241 struct extent_buffer *leaf;
3242 unsigned long nr = 0;
3243
3244 name_len = strlen(symname) + 1;
3245 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
3246 return -ENAMETOOLONG;
3247
3248 mutex_lock(&root->fs_info->fs_mutex);
3249 err = btrfs_check_free_space(root, 1, 0);
3250 if (err)
3251 goto out_fail;
3252
3253 trans = btrfs_start_transaction(root, 1);
3254 btrfs_set_trans_block_group(trans, dir);
3255
3256 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3257 if (err) {
3258 err = -ENOSPC;
3259 goto out_unlock;
3260 }
3261
3262 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
3263 dentry->d_name.len,
3264 dentry->d_parent->d_inode->i_ino, objectid,
3265 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO);
3266 err = PTR_ERR(inode);
3267 if (IS_ERR(inode))
3268 goto out_unlock;
3269
3270 btrfs_set_trans_block_group(trans, inode);
3271 err = btrfs_add_nondir(trans, dentry, inode, 0);
3272 if (err)
3273 drop_inode = 1;
3274 else {
3275 inode->i_mapping->a_ops = &btrfs_aops;
3276 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3277 inode->i_fop = &btrfs_file_operations;
3278 inode->i_op = &btrfs_file_inode_operations;
3279 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3280 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3281 inode->i_mapping, GFP_NOFS);
3282 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3283 inode->i_mapping, GFP_NOFS);
3284 BTRFS_I(inode)->delalloc_bytes = 0;
3285 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3286 }
3287 dir->i_sb->s_dirt = 1;
3288 btrfs_update_inode_block_group(trans, inode);
3289 btrfs_update_inode_block_group(trans, dir);
3290 if (drop_inode)
3291 goto out_unlock;
3292
3293 path = btrfs_alloc_path();
3294 BUG_ON(!path);
3295 key.objectid = inode->i_ino;
3296 key.offset = 0;
3297 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
3298 datasize = btrfs_file_extent_calc_inline_size(name_len);
3299 err = btrfs_insert_empty_item(trans, root, path, &key,
3300 datasize);
3301 if (err) {
3302 drop_inode = 1;
3303 goto out_unlock;
3304 }
3305 leaf = path->nodes[0];
3306 ei = btrfs_item_ptr(leaf, path->slots[0],
3307 struct btrfs_file_extent_item);
3308 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
3309 btrfs_set_file_extent_type(leaf, ei,
3310 BTRFS_FILE_EXTENT_INLINE);
3311 ptr = btrfs_file_extent_inline_start(ei);
3312 write_extent_buffer(leaf, symname, ptr, name_len);
3313 btrfs_mark_buffer_dirty(leaf);
3314 btrfs_free_path(path);
3315
3316 inode->i_op = &btrfs_symlink_inode_operations;
3317 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3318 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3319 inode->i_size = name_len - 1;
3320 err = btrfs_update_inode(trans, root, inode);
3321 if (err)
3322 drop_inode = 1;
3323
3324 out_unlock:
3325 nr = trans->blocks_used;
3326 btrfs_end_transaction(trans, root);
3327 out_fail:
3328 mutex_unlock(&root->fs_info->fs_mutex);
3329 if (drop_inode) {
3330 inode_dec_link_count(inode);
3331 iput(inode);
3332 }
3333 btrfs_btree_balance_dirty(root, nr);
3334 btrfs_throttle(root);
3335 return err;
3336 }
3337
3338 static int btrfs_permission(struct inode *inode, int mask,
3339 struct nameidata *nd)
3340 {
3341 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
3342 return -EACCES;
3343 return generic_permission(inode, mask, NULL);
3344 }
3345
3346 static struct inode_operations btrfs_dir_inode_operations = {
3347 .lookup = btrfs_lookup,
3348 .create = btrfs_create,
3349 .unlink = btrfs_unlink,
3350 .link = btrfs_link,
3351 .mkdir = btrfs_mkdir,
3352 .rmdir = btrfs_rmdir,
3353 .rename = btrfs_rename,
3354 .symlink = btrfs_symlink,
3355 .setattr = btrfs_setattr,
3356 .mknod = btrfs_mknod,
3357 .setxattr = generic_setxattr,
3358 .getxattr = generic_getxattr,
3359 .listxattr = btrfs_listxattr,
3360 .removexattr = generic_removexattr,
3361 .permission = btrfs_permission,
3362 };
3363 static struct inode_operations btrfs_dir_ro_inode_operations = {
3364 .lookup = btrfs_lookup,
3365 .permission = btrfs_permission,
3366 };
3367 static struct file_operations btrfs_dir_file_operations = {
3368 .llseek = generic_file_llseek,
3369 .read = generic_read_dir,
3370 .readdir = btrfs_readdir,
3371 .unlocked_ioctl = btrfs_ioctl,
3372 #ifdef CONFIG_COMPAT
3373 .compat_ioctl = btrfs_ioctl,
3374 #endif
3375 };
3376
3377 static struct extent_io_ops btrfs_extent_io_ops = {
3378 .fill_delalloc = run_delalloc_range,
3379 .submit_bio_hook = btrfs_submit_bio_hook,
3380 .merge_bio_hook = btrfs_merge_bio_hook,
3381 .readpage_io_hook = btrfs_readpage_io_hook,
3382 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
3383 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
3384 .set_bit_hook = btrfs_set_bit_hook,
3385 .clear_bit_hook = btrfs_clear_bit_hook,
3386 };
3387
3388 static struct address_space_operations btrfs_aops = {
3389 .readpage = btrfs_readpage,
3390 .writepage = btrfs_writepage,
3391 .writepages = btrfs_writepages,
3392 .readpages = btrfs_readpages,
3393 .sync_page = block_sync_page,
3394 .bmap = btrfs_bmap,
3395 .direct_IO = btrfs_direct_IO,
3396 .invalidatepage = btrfs_invalidatepage,
3397 .releasepage = btrfs_releasepage,
3398 .set_page_dirty = __set_page_dirty_nobuffers,
3399 };
3400
3401 static struct address_space_operations btrfs_symlink_aops = {
3402 .readpage = btrfs_readpage,
3403 .writepage = btrfs_writepage,
3404 .invalidatepage = btrfs_invalidatepage,
3405 .releasepage = btrfs_releasepage,
3406 };
3407
3408 static struct inode_operations btrfs_file_inode_operations = {
3409 .truncate = btrfs_truncate,
3410 .getattr = btrfs_getattr,
3411 .setattr = btrfs_setattr,
3412 .setxattr = generic_setxattr,
3413 .getxattr = generic_getxattr,
3414 .listxattr = btrfs_listxattr,
3415 .removexattr = generic_removexattr,
3416 .permission = btrfs_permission,
3417 };
3418 static struct inode_operations btrfs_special_inode_operations = {
3419 .getattr = btrfs_getattr,
3420 .setattr = btrfs_setattr,
3421 .permission = btrfs_permission,
3422 };
3423 static struct inode_operations btrfs_symlink_inode_operations = {
3424 .readlink = generic_readlink,
3425 .follow_link = page_follow_link_light,
3426 .put_link = page_put_link,
3427 .permission = btrfs_permission,
3428 };
This page took 0.170906 seconds and 6 git commands to generate.