Btrfs: Add support for online device removal
[deliverable/linux.git] / fs / btrfs / inode.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/version.h>
38 #include <linux/xattr.h>
39 #include "ctree.h"
40 #include "disk-io.h"
41 #include "transaction.h"
42 #include "btrfs_inode.h"
43 #include "ioctl.h"
44 #include "print-tree.h"
45 #include "volumes.h"
46
47 struct btrfs_iget_args {
48 u64 ino;
49 struct btrfs_root *root;
50 };
51
52 static struct inode_operations btrfs_dir_inode_operations;
53 static struct inode_operations btrfs_symlink_inode_operations;
54 static struct inode_operations btrfs_dir_ro_inode_operations;
55 static struct inode_operations btrfs_special_inode_operations;
56 static struct inode_operations btrfs_file_inode_operations;
57 static struct address_space_operations btrfs_aops;
58 static struct address_space_operations btrfs_symlink_aops;
59 static struct file_operations btrfs_dir_file_operations;
60 static struct extent_io_ops btrfs_extent_io_ops;
61
62 static struct kmem_cache *btrfs_inode_cachep;
63 struct kmem_cache *btrfs_trans_handle_cachep;
64 struct kmem_cache *btrfs_transaction_cachep;
65 struct kmem_cache *btrfs_bit_radix_cachep;
66 struct kmem_cache *btrfs_path_cachep;
67
68 #define S_SHIFT 12
69 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
70 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
71 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
72 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
73 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
74 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
75 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
76 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
77 };
78
79 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
80 int for_del)
81 {
82 u64 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
83 u64 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
84 u64 thresh;
85 unsigned long flags;
86 int ret = 0;
87
88 if (for_del)
89 thresh = total * 90;
90 else
91 thresh = total * 85;
92
93 do_div(thresh, 100);
94
95 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
96 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
97 ret = -ENOSPC;
98 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
99 return ret;
100 }
101
102 static int cow_file_range(struct inode *inode, u64 start, u64 end)
103 {
104 struct btrfs_root *root = BTRFS_I(inode)->root;
105 struct btrfs_trans_handle *trans;
106 u64 alloc_hint = 0;
107 u64 num_bytes;
108 u64 cur_alloc_size;
109 u64 blocksize = root->sectorsize;
110 u64 orig_start = start;
111 u64 orig_num_bytes;
112 struct btrfs_key ins;
113 int ret;
114
115 trans = btrfs_start_transaction(root, 1);
116 BUG_ON(!trans);
117 btrfs_set_trans_block_group(trans, inode);
118
119 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
120 num_bytes = max(blocksize, num_bytes);
121 ret = btrfs_drop_extents(trans, root, inode,
122 start, start + num_bytes, start, &alloc_hint);
123 orig_num_bytes = num_bytes;
124
125 if (alloc_hint == EXTENT_MAP_INLINE)
126 goto out;
127
128 BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy));
129
130 while(num_bytes > 0) {
131 cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
132 ret = btrfs_alloc_extent(trans, root, cur_alloc_size,
133 root->sectorsize,
134 root->root_key.objectid,
135 trans->transid,
136 inode->i_ino, start, 0,
137 alloc_hint, (u64)-1, &ins, 1);
138 if (ret) {
139 WARN_ON(1);
140 goto out;
141 }
142 cur_alloc_size = ins.offset;
143 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
144 start, ins.objectid, ins.offset,
145 ins.offset, 0);
146 inode->i_blocks += ins.offset >> 9;
147 btrfs_check_file(root, inode);
148 if (num_bytes < cur_alloc_size) {
149 printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes,
150 cur_alloc_size);
151 break;
152 }
153 num_bytes -= cur_alloc_size;
154 alloc_hint = ins.objectid + ins.offset;
155 start += cur_alloc_size;
156 }
157 btrfs_drop_extent_cache(inode, orig_start,
158 orig_start + orig_num_bytes - 1);
159 btrfs_add_ordered_inode(inode);
160 btrfs_update_inode(trans, root, inode);
161 out:
162 btrfs_end_transaction(trans, root);
163 return ret;
164 }
165
166 static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end)
167 {
168 u64 extent_start;
169 u64 extent_end;
170 u64 bytenr;
171 u64 cow_end;
172 u64 loops = 0;
173 u64 total_fs_bytes;
174 struct btrfs_root *root = BTRFS_I(inode)->root;
175 struct extent_buffer *leaf;
176 int found_type;
177 struct btrfs_path *path;
178 struct btrfs_file_extent_item *item;
179 int ret;
180 int err;
181 struct btrfs_key found_key;
182
183 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
184 path = btrfs_alloc_path();
185 BUG_ON(!path);
186 again:
187 ret = btrfs_lookup_file_extent(NULL, root, path,
188 inode->i_ino, start, 0);
189 if (ret < 0) {
190 btrfs_free_path(path);
191 return ret;
192 }
193
194 cow_end = end;
195 if (ret != 0) {
196 if (path->slots[0] == 0)
197 goto not_found;
198 path->slots[0]--;
199 }
200
201 leaf = path->nodes[0];
202 item = btrfs_item_ptr(leaf, path->slots[0],
203 struct btrfs_file_extent_item);
204
205 /* are we inside the extent that was found? */
206 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
207 found_type = btrfs_key_type(&found_key);
208 if (found_key.objectid != inode->i_ino ||
209 found_type != BTRFS_EXTENT_DATA_KEY) {
210 goto not_found;
211 }
212
213 found_type = btrfs_file_extent_type(leaf, item);
214 extent_start = found_key.offset;
215 if (found_type == BTRFS_FILE_EXTENT_REG) {
216 u64 extent_num_bytes;
217
218 extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item);
219 extent_end = extent_start + extent_num_bytes;
220 err = 0;
221
222 if (loops && start != extent_start)
223 goto not_found;
224
225 if (start < extent_start || start >= extent_end)
226 goto not_found;
227
228 cow_end = min(end, extent_end - 1);
229 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
230 if (bytenr == 0)
231 goto not_found;
232
233 /*
234 * we may be called by the resizer, make sure we're inside
235 * the limits of the FS
236 */
237 if (bytenr + extent_num_bytes > total_fs_bytes)
238 goto not_found;
239
240 if (btrfs_count_snapshots_in_path(root, path, bytenr) != 1) {
241 goto not_found;
242 }
243
244 start = extent_end;
245 } else {
246 goto not_found;
247 }
248 loop:
249 if (start > end) {
250 btrfs_free_path(path);
251 return 0;
252 }
253 btrfs_release_path(root, path);
254 loops++;
255 goto again;
256
257 not_found:
258 cow_file_range(inode, start, cow_end);
259 start = cow_end + 1;
260 goto loop;
261 }
262
263 static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
264 {
265 struct btrfs_root *root = BTRFS_I(inode)->root;
266 int ret;
267 mutex_lock(&root->fs_info->fs_mutex);
268 if (btrfs_test_opt(root, NODATACOW) ||
269 btrfs_test_flag(inode, NODATACOW))
270 ret = run_delalloc_nocow(inode, start, end);
271 else
272 ret = cow_file_range(inode, start, end);
273
274 mutex_unlock(&root->fs_info->fs_mutex);
275 return ret;
276 }
277
278 int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
279 unsigned long old, unsigned long bits)
280 {
281 unsigned long flags;
282 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
283 struct btrfs_root *root = BTRFS_I(inode)->root;
284 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
285 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
286 root->fs_info->delalloc_bytes += end - start + 1;
287 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
288 }
289 return 0;
290 }
291
292 int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
293 unsigned long old, unsigned long bits)
294 {
295 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
296 struct btrfs_root *root = BTRFS_I(inode)->root;
297 unsigned long flags;
298
299 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
300 if (end - start + 1 > root->fs_info->delalloc_bytes) {
301 printk("warning: delalloc account %Lu %Lu\n",
302 end - start + 1, root->fs_info->delalloc_bytes);
303 root->fs_info->delalloc_bytes = 0;
304 BTRFS_I(inode)->delalloc_bytes = 0;
305 } else {
306 root->fs_info->delalloc_bytes -= end - start + 1;
307 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
308 }
309 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
310 }
311 return 0;
312 }
313
314 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
315 size_t size, struct bio *bio)
316 {
317 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
318 struct btrfs_mapping_tree *map_tree;
319 u64 logical = bio->bi_sector << 9;
320 u64 length = 0;
321 u64 map_length;
322 int ret;
323
324 length = bio->bi_size;
325 map_tree = &root->fs_info->mapping_tree;
326 map_length = length;
327 ret = btrfs_map_block(map_tree, READ, logical,
328 &map_length, NULL, 0);
329
330 if (map_length < length + size) {
331 return 1;
332 }
333 return 0;
334 }
335
336 int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
337 int mirror_num)
338 {
339 struct btrfs_root *root = BTRFS_I(inode)->root;
340 struct btrfs_trans_handle *trans;
341 int ret = 0;
342 char *sums = NULL;
343
344 ret = btrfs_csum_one_bio(root, bio, &sums);
345 BUG_ON(ret);
346
347 mutex_lock(&root->fs_info->fs_mutex);
348 trans = btrfs_start_transaction(root, 1);
349
350 btrfs_set_trans_block_group(trans, inode);
351 btrfs_csum_file_blocks(trans, root, inode, bio, sums);
352
353 ret = btrfs_end_transaction(trans, root);
354 BUG_ON(ret);
355 mutex_unlock(&root->fs_info->fs_mutex);
356
357 kfree(sums);
358
359 return btrfs_map_bio(root, rw, bio, mirror_num);
360 }
361
362 int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
363 int mirror_num)
364 {
365 struct btrfs_root *root = BTRFS_I(inode)->root;
366 int ret = 0;
367
368 if (!(rw & (1 << BIO_RW))) {
369 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
370 BUG_ON(ret);
371 goto mapit;
372 }
373
374 if (btrfs_test_opt(root, NODATASUM) ||
375 btrfs_test_flag(inode, NODATASUM)) {
376 goto mapit;
377 }
378
379 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
380 inode, rw, bio, mirror_num,
381 __btrfs_submit_bio_hook);
382 mapit:
383 return btrfs_map_bio(root, rw, bio, mirror_num);
384 }
385
386 int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
387 {
388 int ret = 0;
389 struct inode *inode = page->mapping->host;
390 struct btrfs_root *root = BTRFS_I(inode)->root;
391 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
392 struct btrfs_csum_item *item;
393 struct btrfs_path *path = NULL;
394 u32 csum;
395
396 if (btrfs_test_opt(root, NODATASUM) ||
397 btrfs_test_flag(inode, NODATASUM))
398 return 0;
399
400 mutex_lock(&root->fs_info->fs_mutex);
401 path = btrfs_alloc_path();
402 item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0);
403 if (IS_ERR(item)) {
404 ret = PTR_ERR(item);
405 /* a csum that isn't present is a preallocated region. */
406 if (ret == -ENOENT || ret == -EFBIG)
407 ret = 0;
408 csum = 0;
409 printk("no csum found for inode %lu start %Lu\n", inode->i_ino, start);
410 goto out;
411 }
412 read_extent_buffer(path->nodes[0], &csum, (unsigned long)item,
413 BTRFS_CRC32_SIZE);
414 set_state_private(io_tree, start, csum);
415 out:
416 if (path)
417 btrfs_free_path(path);
418 mutex_unlock(&root->fs_info->fs_mutex);
419 return ret;
420 }
421
422 struct io_failure_record {
423 struct page *page;
424 u64 start;
425 u64 len;
426 u64 logical;
427 int last_mirror;
428 };
429
430 int btrfs_readpage_io_failed_hook(struct bio *failed_bio,
431 struct page *page, u64 start, u64 end,
432 struct extent_state *state)
433 {
434 struct io_failure_record *failrec = NULL;
435 u64 private;
436 struct extent_map *em;
437 struct inode *inode = page->mapping->host;
438 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
439 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
440 struct bio *bio;
441 int num_copies;
442 int ret;
443 u64 logical;
444
445 ret = get_state_private(failure_tree, start, &private);
446 if (ret) {
447 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
448 if (!failrec)
449 return -ENOMEM;
450 failrec->start = start;
451 failrec->len = end - start + 1;
452 failrec->last_mirror = 0;
453
454 spin_lock(&em_tree->lock);
455 em = lookup_extent_mapping(em_tree, start, failrec->len);
456 if (em->start > start || em->start + em->len < start) {
457 free_extent_map(em);
458 em = NULL;
459 }
460 spin_unlock(&em_tree->lock);
461
462 if (!em || IS_ERR(em)) {
463 kfree(failrec);
464 return -EIO;
465 }
466 logical = start - em->start;
467 logical = em->block_start + logical;
468 failrec->logical = logical;
469 free_extent_map(em);
470 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
471 EXTENT_DIRTY, GFP_NOFS);
472 set_state_private(failure_tree, start,
473 (u64)(unsigned long)failrec);
474 } else {
475 failrec = (struct io_failure_record *)(unsigned long)private;
476 }
477 num_copies = btrfs_num_copies(
478 &BTRFS_I(inode)->root->fs_info->mapping_tree,
479 failrec->logical, failrec->len);
480 failrec->last_mirror++;
481 if (!state) {
482 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
483 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
484 failrec->start,
485 EXTENT_LOCKED);
486 if (state && state->start != failrec->start)
487 state = NULL;
488 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
489 }
490 if (!state || failrec->last_mirror > num_copies) {
491 set_state_private(failure_tree, failrec->start, 0);
492 clear_extent_bits(failure_tree, failrec->start,
493 failrec->start + failrec->len - 1,
494 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
495 kfree(failrec);
496 return -EIO;
497 }
498 bio = bio_alloc(GFP_NOFS, 1);
499 bio->bi_private = state;
500 bio->bi_end_io = failed_bio->bi_end_io;
501 bio->bi_sector = failrec->logical >> 9;
502 bio->bi_bdev = failed_bio->bi_bdev;
503 bio->bi_size = 0;
504 bio_add_page(bio, page, failrec->len, start - page_offset(page));
505 btrfs_submit_bio_hook(inode, READ, bio, failrec->last_mirror);
506 return 0;
507 }
508
509 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
510 struct extent_state *state)
511 {
512 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
513 struct inode *inode = page->mapping->host;
514 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
515 char *kaddr;
516 u64 private = ~(u32)0;
517 int ret;
518 struct btrfs_root *root = BTRFS_I(inode)->root;
519 u32 csum = ~(u32)0;
520 unsigned long flags;
521
522 if (btrfs_test_opt(root, NODATASUM) ||
523 btrfs_test_flag(inode, NODATASUM))
524 return 0;
525 if (state && state->start == start) {
526 private = state->private;
527 ret = 0;
528 } else {
529 ret = get_state_private(io_tree, start, &private);
530 }
531 local_irq_save(flags);
532 kaddr = kmap_atomic(page, KM_IRQ0);
533 if (ret) {
534 goto zeroit;
535 }
536 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
537 btrfs_csum_final(csum, (char *)&csum);
538 if (csum != private) {
539 goto zeroit;
540 }
541 kunmap_atomic(kaddr, KM_IRQ0);
542 local_irq_restore(flags);
543
544 /* if the io failure tree for this inode is non-empty,
545 * check to see if we've recovered from a failed IO
546 */
547 private = 0;
548 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
549 (u64)-1, 1, EXTENT_DIRTY)) {
550 u64 private_failure;
551 struct io_failure_record *failure;
552 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
553 start, &private_failure);
554 if (ret == 0) {
555 failure = (struct io_failure_record *)(unsigned long)
556 private_failure;
557 set_state_private(&BTRFS_I(inode)->io_failure_tree,
558 failure->start, 0);
559 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
560 failure->start,
561 failure->start + failure->len - 1,
562 EXTENT_DIRTY | EXTENT_LOCKED,
563 GFP_NOFS);
564 kfree(failure);
565 }
566 }
567 return 0;
568
569 zeroit:
570 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
571 page->mapping->host->i_ino, (unsigned long long)start, csum,
572 private);
573 memset(kaddr + offset, 1, end - start + 1);
574 flush_dcache_page(page);
575 kunmap_atomic(kaddr, KM_IRQ0);
576 local_irq_restore(flags);
577 if (private == 0)
578 return 0;
579 return -EIO;
580 }
581
582 void btrfs_read_locked_inode(struct inode *inode)
583 {
584 struct btrfs_path *path;
585 struct extent_buffer *leaf;
586 struct btrfs_inode_item *inode_item;
587 struct btrfs_timespec *tspec;
588 struct btrfs_root *root = BTRFS_I(inode)->root;
589 struct btrfs_key location;
590 u64 alloc_group_block;
591 u32 rdev;
592 int ret;
593
594 path = btrfs_alloc_path();
595 BUG_ON(!path);
596 mutex_lock(&root->fs_info->fs_mutex);
597 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
598
599 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
600 if (ret)
601 goto make_bad;
602
603 leaf = path->nodes[0];
604 inode_item = btrfs_item_ptr(leaf, path->slots[0],
605 struct btrfs_inode_item);
606
607 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
608 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
609 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
610 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
611 inode->i_size = btrfs_inode_size(leaf, inode_item);
612
613 tspec = btrfs_inode_atime(inode_item);
614 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
615 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
616
617 tspec = btrfs_inode_mtime(inode_item);
618 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
619 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
620
621 tspec = btrfs_inode_ctime(inode_item);
622 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
623 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
624
625 inode->i_blocks = btrfs_inode_nblocks(leaf, inode_item);
626 inode->i_generation = btrfs_inode_generation(leaf, inode_item);
627 inode->i_rdev = 0;
628 rdev = btrfs_inode_rdev(leaf, inode_item);
629
630 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
631 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
632 alloc_group_block);
633 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
634 if (!BTRFS_I(inode)->block_group) {
635 BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
636 NULL, 0,
637 BTRFS_BLOCK_GROUP_METADATA, 0);
638 }
639 btrfs_free_path(path);
640 inode_item = NULL;
641
642 mutex_unlock(&root->fs_info->fs_mutex);
643
644 switch (inode->i_mode & S_IFMT) {
645 case S_IFREG:
646 inode->i_mapping->a_ops = &btrfs_aops;
647 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
648 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
649 inode->i_fop = &btrfs_file_operations;
650 inode->i_op = &btrfs_file_inode_operations;
651 break;
652 case S_IFDIR:
653 inode->i_fop = &btrfs_dir_file_operations;
654 if (root == root->fs_info->tree_root)
655 inode->i_op = &btrfs_dir_ro_inode_operations;
656 else
657 inode->i_op = &btrfs_dir_inode_operations;
658 break;
659 case S_IFLNK:
660 inode->i_op = &btrfs_symlink_inode_operations;
661 inode->i_mapping->a_ops = &btrfs_symlink_aops;
662 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
663 break;
664 default:
665 init_special_inode(inode, inode->i_mode, rdev);
666 break;
667 }
668 return;
669
670 make_bad:
671 btrfs_release_path(root, path);
672 btrfs_free_path(path);
673 mutex_unlock(&root->fs_info->fs_mutex);
674 make_bad_inode(inode);
675 }
676
677 static void fill_inode_item(struct extent_buffer *leaf,
678 struct btrfs_inode_item *item,
679 struct inode *inode)
680 {
681 btrfs_set_inode_uid(leaf, item, inode->i_uid);
682 btrfs_set_inode_gid(leaf, item, inode->i_gid);
683 btrfs_set_inode_size(leaf, item, inode->i_size);
684 btrfs_set_inode_mode(leaf, item, inode->i_mode);
685 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
686
687 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
688 inode->i_atime.tv_sec);
689 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
690 inode->i_atime.tv_nsec);
691
692 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
693 inode->i_mtime.tv_sec);
694 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
695 inode->i_mtime.tv_nsec);
696
697 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
698 inode->i_ctime.tv_sec);
699 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
700 inode->i_ctime.tv_nsec);
701
702 btrfs_set_inode_nblocks(leaf, item, inode->i_blocks);
703 btrfs_set_inode_generation(leaf, item, inode->i_generation);
704 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
705 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
706 btrfs_set_inode_block_group(leaf, item,
707 BTRFS_I(inode)->block_group->key.objectid);
708 }
709
710 int btrfs_update_inode(struct btrfs_trans_handle *trans,
711 struct btrfs_root *root,
712 struct inode *inode)
713 {
714 struct btrfs_inode_item *inode_item;
715 struct btrfs_path *path;
716 struct extent_buffer *leaf;
717 int ret;
718
719 path = btrfs_alloc_path();
720 BUG_ON(!path);
721 ret = btrfs_lookup_inode(trans, root, path,
722 &BTRFS_I(inode)->location, 1);
723 if (ret) {
724 if (ret > 0)
725 ret = -ENOENT;
726 goto failed;
727 }
728
729 leaf = path->nodes[0];
730 inode_item = btrfs_item_ptr(leaf, path->slots[0],
731 struct btrfs_inode_item);
732
733 fill_inode_item(leaf, inode_item, inode);
734 btrfs_mark_buffer_dirty(leaf);
735 btrfs_set_inode_last_trans(trans, inode);
736 ret = 0;
737 failed:
738 btrfs_release_path(root, path);
739 btrfs_free_path(path);
740 return ret;
741 }
742
743
744 static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
745 struct btrfs_root *root,
746 struct inode *dir,
747 struct dentry *dentry)
748 {
749 struct btrfs_path *path;
750 const char *name = dentry->d_name.name;
751 int name_len = dentry->d_name.len;
752 int ret = 0;
753 struct extent_buffer *leaf;
754 struct btrfs_dir_item *di;
755 struct btrfs_key key;
756
757 path = btrfs_alloc_path();
758 if (!path) {
759 ret = -ENOMEM;
760 goto err;
761 }
762
763 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
764 name, name_len, -1);
765 if (IS_ERR(di)) {
766 ret = PTR_ERR(di);
767 goto err;
768 }
769 if (!di) {
770 ret = -ENOENT;
771 goto err;
772 }
773 leaf = path->nodes[0];
774 btrfs_dir_item_key_to_cpu(leaf, di, &key);
775 ret = btrfs_delete_one_dir_name(trans, root, path, di);
776 if (ret)
777 goto err;
778 btrfs_release_path(root, path);
779
780 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
781 key.objectid, name, name_len, -1);
782 if (IS_ERR(di)) {
783 ret = PTR_ERR(di);
784 goto err;
785 }
786 if (!di) {
787 ret = -ENOENT;
788 goto err;
789 }
790 ret = btrfs_delete_one_dir_name(trans, root, path, di);
791
792 dentry->d_inode->i_ctime = dir->i_ctime;
793 ret = btrfs_del_inode_ref(trans, root, name, name_len,
794 dentry->d_inode->i_ino,
795 dentry->d_parent->d_inode->i_ino);
796 if (ret) {
797 printk("failed to delete reference to %.*s, "
798 "inode %lu parent %lu\n", name_len, name,
799 dentry->d_inode->i_ino,
800 dentry->d_parent->d_inode->i_ino);
801 }
802 err:
803 btrfs_free_path(path);
804 if (!ret) {
805 dir->i_size -= name_len * 2;
806 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
807 btrfs_update_inode(trans, root, dir);
808 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
809 dentry->d_inode->i_nlink--;
810 #else
811 drop_nlink(dentry->d_inode);
812 #endif
813 ret = btrfs_update_inode(trans, root, dentry->d_inode);
814 dir->i_sb->s_dirt = 1;
815 }
816 return ret;
817 }
818
819 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
820 {
821 struct btrfs_root *root;
822 struct btrfs_trans_handle *trans;
823 struct inode *inode = dentry->d_inode;
824 int ret;
825 unsigned long nr = 0;
826
827 root = BTRFS_I(dir)->root;
828 mutex_lock(&root->fs_info->fs_mutex);
829
830 ret = btrfs_check_free_space(root, 1, 1);
831 if (ret)
832 goto fail;
833
834 trans = btrfs_start_transaction(root, 1);
835
836 btrfs_set_trans_block_group(trans, dir);
837 ret = btrfs_unlink_trans(trans, root, dir, dentry);
838 nr = trans->blocks_used;
839
840 if (inode->i_nlink == 0) {
841 int found;
842 /* if the inode isn't linked anywhere,
843 * we don't need to worry about
844 * data=ordered
845 */
846 found = btrfs_del_ordered_inode(inode);
847 if (found == 1) {
848 atomic_dec(&inode->i_count);
849 }
850 }
851
852 btrfs_end_transaction(trans, root);
853 fail:
854 mutex_unlock(&root->fs_info->fs_mutex);
855 btrfs_btree_balance_dirty(root, nr);
856 btrfs_throttle(root);
857 return ret;
858 }
859
860 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
861 {
862 struct inode *inode = dentry->d_inode;
863 int err = 0;
864 int ret;
865 struct btrfs_root *root = BTRFS_I(dir)->root;
866 struct btrfs_trans_handle *trans;
867 unsigned long nr = 0;
868
869 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
870 return -ENOTEMPTY;
871
872 mutex_lock(&root->fs_info->fs_mutex);
873 ret = btrfs_check_free_space(root, 1, 1);
874 if (ret)
875 goto fail;
876
877 trans = btrfs_start_transaction(root, 1);
878 btrfs_set_trans_block_group(trans, dir);
879
880 /* now the directory is empty */
881 err = btrfs_unlink_trans(trans, root, dir, dentry);
882 if (!err) {
883 inode->i_size = 0;
884 }
885
886 nr = trans->blocks_used;
887 ret = btrfs_end_transaction(trans, root);
888 fail:
889 mutex_unlock(&root->fs_info->fs_mutex);
890 btrfs_btree_balance_dirty(root, nr);
891 btrfs_throttle(root);
892
893 if (ret && !err)
894 err = ret;
895 return err;
896 }
897
898 /*
899 * this can truncate away extent items, csum items and directory items.
900 * It starts at a high offset and removes keys until it can't find
901 * any higher than i_size.
902 *
903 * csum items that cross the new i_size are truncated to the new size
904 * as well.
905 */
906 static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
907 struct btrfs_root *root,
908 struct inode *inode,
909 u32 min_type)
910 {
911 int ret;
912 struct btrfs_path *path;
913 struct btrfs_key key;
914 struct btrfs_key found_key;
915 u32 found_type;
916 struct extent_buffer *leaf;
917 struct btrfs_file_extent_item *fi;
918 u64 extent_start = 0;
919 u64 extent_num_bytes = 0;
920 u64 item_end = 0;
921 u64 root_gen = 0;
922 u64 root_owner = 0;
923 int found_extent;
924 int del_item;
925 int pending_del_nr = 0;
926 int pending_del_slot = 0;
927 int extent_type = -1;
928 u64 mask = root->sectorsize - 1;
929
930 btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1);
931 path = btrfs_alloc_path();
932 path->reada = -1;
933 BUG_ON(!path);
934
935 /* FIXME, add redo link to tree so we don't leak on crash */
936 key.objectid = inode->i_ino;
937 key.offset = (u64)-1;
938 key.type = (u8)-1;
939
940 btrfs_init_path(path);
941 search_again:
942 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
943 if (ret < 0) {
944 goto error;
945 }
946 if (ret > 0) {
947 BUG_ON(path->slots[0] == 0);
948 path->slots[0]--;
949 }
950
951 while(1) {
952 fi = NULL;
953 leaf = path->nodes[0];
954 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
955 found_type = btrfs_key_type(&found_key);
956
957 if (found_key.objectid != inode->i_ino)
958 break;
959
960 if (found_type < min_type)
961 break;
962
963 item_end = found_key.offset;
964 if (found_type == BTRFS_EXTENT_DATA_KEY) {
965 fi = btrfs_item_ptr(leaf, path->slots[0],
966 struct btrfs_file_extent_item);
967 extent_type = btrfs_file_extent_type(leaf, fi);
968 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
969 item_end +=
970 btrfs_file_extent_num_bytes(leaf, fi);
971 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
972 struct btrfs_item *item = btrfs_item_nr(leaf,
973 path->slots[0]);
974 item_end += btrfs_file_extent_inline_len(leaf,
975 item);
976 }
977 item_end--;
978 }
979 if (found_type == BTRFS_CSUM_ITEM_KEY) {
980 ret = btrfs_csum_truncate(trans, root, path,
981 inode->i_size);
982 BUG_ON(ret);
983 }
984 if (item_end < inode->i_size) {
985 if (found_type == BTRFS_DIR_ITEM_KEY) {
986 found_type = BTRFS_INODE_ITEM_KEY;
987 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
988 found_type = BTRFS_CSUM_ITEM_KEY;
989 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
990 found_type = BTRFS_XATTR_ITEM_KEY;
991 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
992 found_type = BTRFS_INODE_REF_KEY;
993 } else if (found_type) {
994 found_type--;
995 } else {
996 break;
997 }
998 btrfs_set_key_type(&key, found_type);
999 goto next;
1000 }
1001 if (found_key.offset >= inode->i_size)
1002 del_item = 1;
1003 else
1004 del_item = 0;
1005 found_extent = 0;
1006
1007 /* FIXME, shrink the extent if the ref count is only 1 */
1008 if (found_type != BTRFS_EXTENT_DATA_KEY)
1009 goto delete;
1010
1011 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
1012 u64 num_dec;
1013 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
1014 if (!del_item) {
1015 u64 orig_num_bytes =
1016 btrfs_file_extent_num_bytes(leaf, fi);
1017 extent_num_bytes = inode->i_size -
1018 found_key.offset + root->sectorsize - 1;
1019 extent_num_bytes = extent_num_bytes &
1020 ~((u64)root->sectorsize - 1);
1021 btrfs_set_file_extent_num_bytes(leaf, fi,
1022 extent_num_bytes);
1023 num_dec = (orig_num_bytes -
1024 extent_num_bytes);
1025 if (extent_start != 0)
1026 dec_i_blocks(inode, num_dec);
1027 btrfs_mark_buffer_dirty(leaf);
1028 } else {
1029 extent_num_bytes =
1030 btrfs_file_extent_disk_num_bytes(leaf,
1031 fi);
1032 /* FIXME blocksize != 4096 */
1033 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
1034 if (extent_start != 0) {
1035 found_extent = 1;
1036 dec_i_blocks(inode, num_dec);
1037 }
1038 root_gen = btrfs_header_generation(leaf);
1039 root_owner = btrfs_header_owner(leaf);
1040 }
1041 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1042 if (!del_item) {
1043 u32 newsize = inode->i_size - found_key.offset;
1044 dec_i_blocks(inode, item_end + 1 -
1045 found_key.offset - newsize);
1046 newsize =
1047 btrfs_file_extent_calc_inline_size(newsize);
1048 ret = btrfs_truncate_item(trans, root, path,
1049 newsize, 1);
1050 BUG_ON(ret);
1051 } else {
1052 dec_i_blocks(inode, item_end + 1 -
1053 found_key.offset);
1054 }
1055 }
1056 delete:
1057 if (del_item) {
1058 if (!pending_del_nr) {
1059 /* no pending yet, add ourselves */
1060 pending_del_slot = path->slots[0];
1061 pending_del_nr = 1;
1062 } else if (pending_del_nr &&
1063 path->slots[0] + 1 == pending_del_slot) {
1064 /* hop on the pending chunk */
1065 pending_del_nr++;
1066 pending_del_slot = path->slots[0];
1067 } else {
1068 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
1069 }
1070 } else {
1071 break;
1072 }
1073 if (found_extent) {
1074 ret = btrfs_free_extent(trans, root, extent_start,
1075 extent_num_bytes,
1076 root_owner,
1077 root_gen, inode->i_ino,
1078 found_key.offset, 0);
1079 BUG_ON(ret);
1080 }
1081 next:
1082 if (path->slots[0] == 0) {
1083 if (pending_del_nr)
1084 goto del_pending;
1085 btrfs_release_path(root, path);
1086 goto search_again;
1087 }
1088
1089 path->slots[0]--;
1090 if (pending_del_nr &&
1091 path->slots[0] + 1 != pending_del_slot) {
1092 struct btrfs_key debug;
1093 del_pending:
1094 btrfs_item_key_to_cpu(path->nodes[0], &debug,
1095 pending_del_slot);
1096 ret = btrfs_del_items(trans, root, path,
1097 pending_del_slot,
1098 pending_del_nr);
1099 BUG_ON(ret);
1100 pending_del_nr = 0;
1101 btrfs_release_path(root, path);
1102 goto search_again;
1103 }
1104 }
1105 ret = 0;
1106 error:
1107 if (pending_del_nr) {
1108 ret = btrfs_del_items(trans, root, path, pending_del_slot,
1109 pending_del_nr);
1110 }
1111 btrfs_release_path(root, path);
1112 btrfs_free_path(path);
1113 inode->i_sb->s_dirt = 1;
1114 return ret;
1115 }
1116
1117 static int btrfs_cow_one_page(struct inode *inode, struct page *page,
1118 size_t zero_start)
1119 {
1120 char *kaddr;
1121 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1122 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1123 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1124 int ret = 0;
1125
1126 WARN_ON(!PageLocked(page));
1127 set_page_extent_mapped(page);
1128
1129 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
1130 set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
1131 page_end, GFP_NOFS);
1132
1133 if (zero_start != PAGE_CACHE_SIZE) {
1134 kaddr = kmap(page);
1135 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
1136 flush_dcache_page(page);
1137 kunmap(page);
1138 }
1139 set_page_dirty(page);
1140 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
1141
1142 return ret;
1143 }
1144
1145 /*
1146 * taken from block_truncate_page, but does cow as it zeros out
1147 * any bytes left in the last page in the file.
1148 */
1149 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
1150 {
1151 struct inode *inode = mapping->host;
1152 struct btrfs_root *root = BTRFS_I(inode)->root;
1153 u32 blocksize = root->sectorsize;
1154 pgoff_t index = from >> PAGE_CACHE_SHIFT;
1155 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1156 struct page *page;
1157 int ret = 0;
1158 u64 page_start;
1159
1160 if ((offset & (blocksize - 1)) == 0)
1161 goto out;
1162
1163 ret = -ENOMEM;
1164 page = grab_cache_page(mapping, index);
1165 if (!page)
1166 goto out;
1167 if (!PageUptodate(page)) {
1168 ret = btrfs_readpage(NULL, page);
1169 lock_page(page);
1170 if (!PageUptodate(page)) {
1171 ret = -EIO;
1172 goto out;
1173 }
1174 }
1175 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
1176
1177 ret = btrfs_cow_one_page(inode, page, offset);
1178
1179 unlock_page(page);
1180 page_cache_release(page);
1181 out:
1182 return ret;
1183 }
1184
1185 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
1186 {
1187 struct inode *inode = dentry->d_inode;
1188 int err;
1189
1190 err = inode_change_ok(inode, attr);
1191 if (err)
1192 return err;
1193
1194 if (S_ISREG(inode->i_mode) &&
1195 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
1196 struct btrfs_trans_handle *trans;
1197 struct btrfs_root *root = BTRFS_I(inode)->root;
1198 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1199
1200 u64 mask = root->sectorsize - 1;
1201 u64 hole_start = (inode->i_size + mask) & ~mask;
1202 u64 block_end = (attr->ia_size + mask) & ~mask;
1203 u64 hole_size;
1204 u64 alloc_hint = 0;
1205
1206 if (attr->ia_size <= hole_start)
1207 goto out;
1208
1209 mutex_lock(&root->fs_info->fs_mutex);
1210 err = btrfs_check_free_space(root, 1, 0);
1211 mutex_unlock(&root->fs_info->fs_mutex);
1212 if (err)
1213 goto fail;
1214
1215 btrfs_truncate_page(inode->i_mapping, inode->i_size);
1216
1217 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1218 hole_size = block_end - hole_start;
1219
1220 mutex_lock(&root->fs_info->fs_mutex);
1221 trans = btrfs_start_transaction(root, 1);
1222 btrfs_set_trans_block_group(trans, inode);
1223 err = btrfs_drop_extents(trans, root, inode,
1224 hole_start, block_end, hole_start,
1225 &alloc_hint);
1226
1227 if (alloc_hint != EXTENT_MAP_INLINE) {
1228 err = btrfs_insert_file_extent(trans, root,
1229 inode->i_ino,
1230 hole_start, 0, 0,
1231 hole_size, 0);
1232 btrfs_drop_extent_cache(inode, hole_start,
1233 (u64)-1);
1234 btrfs_check_file(root, inode);
1235 }
1236 btrfs_end_transaction(trans, root);
1237 mutex_unlock(&root->fs_info->fs_mutex);
1238 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1239 if (err)
1240 return err;
1241 }
1242 out:
1243 err = inode_setattr(inode, attr);
1244 fail:
1245 return err;
1246 }
1247
1248 void btrfs_put_inode(struct inode *inode)
1249 {
1250 int ret;
1251
1252 if (!BTRFS_I(inode)->ordered_trans) {
1253 return;
1254 }
1255
1256 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY) ||
1257 mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1258 return;
1259
1260 ret = btrfs_del_ordered_inode(inode);
1261 if (ret == 1) {
1262 atomic_dec(&inode->i_count);
1263 }
1264 }
1265
1266 void btrfs_delete_inode(struct inode *inode)
1267 {
1268 struct btrfs_trans_handle *trans;
1269 struct btrfs_root *root = BTRFS_I(inode)->root;
1270 unsigned long nr;
1271 int ret;
1272
1273 truncate_inode_pages(&inode->i_data, 0);
1274 if (is_bad_inode(inode)) {
1275 goto no_delete;
1276 }
1277
1278 inode->i_size = 0;
1279 mutex_lock(&root->fs_info->fs_mutex);
1280 trans = btrfs_start_transaction(root, 1);
1281
1282 btrfs_set_trans_block_group(trans, inode);
1283 ret = btrfs_truncate_in_trans(trans, root, inode, 0);
1284 if (ret)
1285 goto no_delete_lock;
1286
1287 nr = trans->blocks_used;
1288 clear_inode(inode);
1289
1290 btrfs_end_transaction(trans, root);
1291 mutex_unlock(&root->fs_info->fs_mutex);
1292 btrfs_btree_balance_dirty(root, nr);
1293 btrfs_throttle(root);
1294 return;
1295
1296 no_delete_lock:
1297 nr = trans->blocks_used;
1298 btrfs_end_transaction(trans, root);
1299 mutex_unlock(&root->fs_info->fs_mutex);
1300 btrfs_btree_balance_dirty(root, nr);
1301 btrfs_throttle(root);
1302 no_delete:
1303 clear_inode(inode);
1304 }
1305
1306 /*
1307 * this returns the key found in the dir entry in the location pointer.
1308 * If no dir entries were found, location->objectid is 0.
1309 */
1310 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
1311 struct btrfs_key *location)
1312 {
1313 const char *name = dentry->d_name.name;
1314 int namelen = dentry->d_name.len;
1315 struct btrfs_dir_item *di;
1316 struct btrfs_path *path;
1317 struct btrfs_root *root = BTRFS_I(dir)->root;
1318 int ret = 0;
1319
1320 if (namelen == 1 && strcmp(name, ".") == 0) {
1321 location->objectid = dir->i_ino;
1322 location->type = BTRFS_INODE_ITEM_KEY;
1323 location->offset = 0;
1324 return 0;
1325 }
1326 path = btrfs_alloc_path();
1327 BUG_ON(!path);
1328
1329 if (namelen == 2 && strcmp(name, "..") == 0) {
1330 struct btrfs_key key;
1331 struct extent_buffer *leaf;
1332 u32 nritems;
1333 int slot;
1334
1335 key.objectid = dir->i_ino;
1336 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1337 key.offset = 0;
1338 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1339 BUG_ON(ret == 0);
1340 ret = 0;
1341
1342 leaf = path->nodes[0];
1343 slot = path->slots[0];
1344 nritems = btrfs_header_nritems(leaf);
1345 if (slot >= nritems)
1346 goto out_err;
1347
1348 btrfs_item_key_to_cpu(leaf, &key, slot);
1349 if (key.objectid != dir->i_ino ||
1350 key.type != BTRFS_INODE_REF_KEY) {
1351 goto out_err;
1352 }
1353 location->objectid = key.offset;
1354 location->type = BTRFS_INODE_ITEM_KEY;
1355 location->offset = 0;
1356 goto out;
1357 }
1358
1359 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
1360 namelen, 0);
1361 if (IS_ERR(di))
1362 ret = PTR_ERR(di);
1363 if (!di || IS_ERR(di)) {
1364 goto out_err;
1365 }
1366 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
1367 out:
1368 btrfs_free_path(path);
1369 return ret;
1370 out_err:
1371 location->objectid = 0;
1372 goto out;
1373 }
1374
1375 /*
1376 * when we hit a tree root in a directory, the btrfs part of the inode
1377 * needs to be changed to reflect the root directory of the tree root. This
1378 * is kind of like crossing a mount point.
1379 */
1380 static int fixup_tree_root_location(struct btrfs_root *root,
1381 struct btrfs_key *location,
1382 struct btrfs_root **sub_root,
1383 struct dentry *dentry)
1384 {
1385 struct btrfs_path *path;
1386 struct btrfs_root_item *ri;
1387
1388 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
1389 return 0;
1390 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1391 return 0;
1392
1393 path = btrfs_alloc_path();
1394 BUG_ON(!path);
1395 mutex_lock(&root->fs_info->fs_mutex);
1396
1397 *sub_root = btrfs_read_fs_root(root->fs_info, location,
1398 dentry->d_name.name,
1399 dentry->d_name.len);
1400 if (IS_ERR(*sub_root))
1401 return PTR_ERR(*sub_root);
1402
1403 ri = &(*sub_root)->root_item;
1404 location->objectid = btrfs_root_dirid(ri);
1405 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1406 location->offset = 0;
1407
1408 btrfs_free_path(path);
1409 mutex_unlock(&root->fs_info->fs_mutex);
1410 return 0;
1411 }
1412
1413 static int btrfs_init_locked_inode(struct inode *inode, void *p)
1414 {
1415 struct btrfs_iget_args *args = p;
1416 inode->i_ino = args->ino;
1417 BTRFS_I(inode)->root = args->root;
1418 BTRFS_I(inode)->delalloc_bytes = 0;
1419 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1420 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1421 inode->i_mapping, GFP_NOFS);
1422 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1423 inode->i_mapping, GFP_NOFS);
1424 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
1425 return 0;
1426 }
1427
1428 static int btrfs_find_actor(struct inode *inode, void *opaque)
1429 {
1430 struct btrfs_iget_args *args = opaque;
1431 return (args->ino == inode->i_ino &&
1432 args->root == BTRFS_I(inode)->root);
1433 }
1434
1435 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
1436 u64 root_objectid)
1437 {
1438 struct btrfs_iget_args args;
1439 args.ino = objectid;
1440 args.root = btrfs_lookup_fs_root(btrfs_sb(s)->fs_info, root_objectid);
1441
1442 if (!args.root)
1443 return NULL;
1444
1445 return ilookup5(s, objectid, btrfs_find_actor, (void *)&args);
1446 }
1447
1448 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
1449 struct btrfs_root *root)
1450 {
1451 struct inode *inode;
1452 struct btrfs_iget_args args;
1453 args.ino = objectid;
1454 args.root = root;
1455
1456 inode = iget5_locked(s, objectid, btrfs_find_actor,
1457 btrfs_init_locked_inode,
1458 (void *)&args);
1459 return inode;
1460 }
1461
1462 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
1463 struct nameidata *nd)
1464 {
1465 struct inode * inode;
1466 struct btrfs_inode *bi = BTRFS_I(dir);
1467 struct btrfs_root *root = bi->root;
1468 struct btrfs_root *sub_root = root;
1469 struct btrfs_key location;
1470 int ret;
1471
1472 if (dentry->d_name.len > BTRFS_NAME_LEN)
1473 return ERR_PTR(-ENAMETOOLONG);
1474
1475 mutex_lock(&root->fs_info->fs_mutex);
1476 ret = btrfs_inode_by_name(dir, dentry, &location);
1477 mutex_unlock(&root->fs_info->fs_mutex);
1478
1479 if (ret < 0)
1480 return ERR_PTR(ret);
1481
1482 inode = NULL;
1483 if (location.objectid) {
1484 ret = fixup_tree_root_location(root, &location, &sub_root,
1485 dentry);
1486 if (ret < 0)
1487 return ERR_PTR(ret);
1488 if (ret > 0)
1489 return ERR_PTR(-ENOENT);
1490 inode = btrfs_iget_locked(dir->i_sb, location.objectid,
1491 sub_root);
1492 if (!inode)
1493 return ERR_PTR(-EACCES);
1494 if (inode->i_state & I_NEW) {
1495 /* the inode and parent dir are two different roots */
1496 if (sub_root != root) {
1497 igrab(inode);
1498 sub_root->inode = inode;
1499 }
1500 BTRFS_I(inode)->root = sub_root;
1501 memcpy(&BTRFS_I(inode)->location, &location,
1502 sizeof(location));
1503 btrfs_read_locked_inode(inode);
1504 unlock_new_inode(inode);
1505 }
1506 }
1507 return d_splice_alias(inode, dentry);
1508 }
1509
1510 static unsigned char btrfs_filetype_table[] = {
1511 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
1512 };
1513
1514 static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1515 {
1516 struct inode *inode = filp->f_dentry->d_inode;
1517 struct btrfs_root *root = BTRFS_I(inode)->root;
1518 struct btrfs_item *item;
1519 struct btrfs_dir_item *di;
1520 struct btrfs_key key;
1521 struct btrfs_key found_key;
1522 struct btrfs_path *path;
1523 int ret;
1524 u32 nritems;
1525 struct extent_buffer *leaf;
1526 int slot;
1527 int advance;
1528 unsigned char d_type;
1529 int over = 0;
1530 u32 di_cur;
1531 u32 di_total;
1532 u32 di_len;
1533 int key_type = BTRFS_DIR_INDEX_KEY;
1534 char tmp_name[32];
1535 char *name_ptr;
1536 int name_len;
1537
1538 /* FIXME, use a real flag for deciding about the key type */
1539 if (root->fs_info->tree_root == root)
1540 key_type = BTRFS_DIR_ITEM_KEY;
1541
1542 /* special case for "." */
1543 if (filp->f_pos == 0) {
1544 over = filldir(dirent, ".", 1,
1545 1, inode->i_ino,
1546 DT_DIR);
1547 if (over)
1548 return 0;
1549 filp->f_pos = 1;
1550 }
1551
1552 mutex_lock(&root->fs_info->fs_mutex);
1553 key.objectid = inode->i_ino;
1554 path = btrfs_alloc_path();
1555 path->reada = 2;
1556
1557 /* special case for .., just use the back ref */
1558 if (filp->f_pos == 1) {
1559 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1560 key.offset = 0;
1561 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1562 BUG_ON(ret == 0);
1563 leaf = path->nodes[0];
1564 slot = path->slots[0];
1565 nritems = btrfs_header_nritems(leaf);
1566 if (slot >= nritems) {
1567 btrfs_release_path(root, path);
1568 goto read_dir_items;
1569 }
1570 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1571 btrfs_release_path(root, path);
1572 if (found_key.objectid != key.objectid ||
1573 found_key.type != BTRFS_INODE_REF_KEY)
1574 goto read_dir_items;
1575 over = filldir(dirent, "..", 2,
1576 2, found_key.offset, DT_DIR);
1577 if (over)
1578 goto nopos;
1579 filp->f_pos = 2;
1580 }
1581
1582 read_dir_items:
1583 btrfs_set_key_type(&key, key_type);
1584 key.offset = filp->f_pos;
1585
1586 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1587 if (ret < 0)
1588 goto err;
1589 advance = 0;
1590 while(1) {
1591 leaf = path->nodes[0];
1592 nritems = btrfs_header_nritems(leaf);
1593 slot = path->slots[0];
1594 if (advance || slot >= nritems) {
1595 if (slot >= nritems -1) {
1596 ret = btrfs_next_leaf(root, path);
1597 if (ret)
1598 break;
1599 leaf = path->nodes[0];
1600 nritems = btrfs_header_nritems(leaf);
1601 slot = path->slots[0];
1602 } else {
1603 slot++;
1604 path->slots[0]++;
1605 }
1606 }
1607 advance = 1;
1608 item = btrfs_item_nr(leaf, slot);
1609 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1610
1611 if (found_key.objectid != key.objectid)
1612 break;
1613 if (btrfs_key_type(&found_key) != key_type)
1614 break;
1615 if (found_key.offset < filp->f_pos)
1616 continue;
1617
1618 filp->f_pos = found_key.offset;
1619 advance = 1;
1620 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
1621 di_cur = 0;
1622 di_total = btrfs_item_size(leaf, item);
1623 while(di_cur < di_total) {
1624 struct btrfs_key location;
1625
1626 name_len = btrfs_dir_name_len(leaf, di);
1627 if (name_len < 32) {
1628 name_ptr = tmp_name;
1629 } else {
1630 name_ptr = kmalloc(name_len, GFP_NOFS);
1631 BUG_ON(!name_ptr);
1632 }
1633 read_extent_buffer(leaf, name_ptr,
1634 (unsigned long)(di + 1), name_len);
1635
1636 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
1637 btrfs_dir_item_key_to_cpu(leaf, di, &location);
1638 over = filldir(dirent, name_ptr, name_len,
1639 found_key.offset,
1640 location.objectid,
1641 d_type);
1642
1643 if (name_ptr != tmp_name)
1644 kfree(name_ptr);
1645
1646 if (over)
1647 goto nopos;
1648 di_len = btrfs_dir_name_len(leaf, di) +
1649 btrfs_dir_data_len(leaf, di) +sizeof(*di);
1650 di_cur += di_len;
1651 di = (struct btrfs_dir_item *)((char *)di + di_len);
1652 }
1653 }
1654 if (key_type == BTRFS_DIR_INDEX_KEY)
1655 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
1656 else
1657 filp->f_pos++;
1658 nopos:
1659 ret = 0;
1660 err:
1661 btrfs_release_path(root, path);
1662 btrfs_free_path(path);
1663 mutex_unlock(&root->fs_info->fs_mutex);
1664 return ret;
1665 }
1666
1667 int btrfs_write_inode(struct inode *inode, int wait)
1668 {
1669 struct btrfs_root *root = BTRFS_I(inode)->root;
1670 struct btrfs_trans_handle *trans;
1671 int ret = 0;
1672
1673 if (wait) {
1674 mutex_lock(&root->fs_info->fs_mutex);
1675 trans = btrfs_start_transaction(root, 1);
1676 btrfs_set_trans_block_group(trans, inode);
1677 ret = btrfs_commit_transaction(trans, root);
1678 mutex_unlock(&root->fs_info->fs_mutex);
1679 }
1680 return ret;
1681 }
1682
1683 /*
1684 * This is somewhat expensive, updating the tree every time the
1685 * inode changes. But, it is most likely to find the inode in cache.
1686 * FIXME, needs more benchmarking...there are no reasons other than performance
1687 * to keep or drop this code.
1688 */
1689 void btrfs_dirty_inode(struct inode *inode)
1690 {
1691 struct btrfs_root *root = BTRFS_I(inode)->root;
1692 struct btrfs_trans_handle *trans;
1693
1694 mutex_lock(&root->fs_info->fs_mutex);
1695 trans = btrfs_start_transaction(root, 1);
1696 btrfs_set_trans_block_group(trans, inode);
1697 btrfs_update_inode(trans, root, inode);
1698 btrfs_end_transaction(trans, root);
1699 mutex_unlock(&root->fs_info->fs_mutex);
1700 }
1701
1702 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
1703 struct btrfs_root *root,
1704 const char *name, int name_len,
1705 u64 ref_objectid,
1706 u64 objectid,
1707 struct btrfs_block_group_cache *group,
1708 int mode)
1709 {
1710 struct inode *inode;
1711 struct btrfs_inode_item *inode_item;
1712 struct btrfs_block_group_cache *new_inode_group;
1713 struct btrfs_key *location;
1714 struct btrfs_path *path;
1715 struct btrfs_inode_ref *ref;
1716 struct btrfs_key key[2];
1717 u32 sizes[2];
1718 unsigned long ptr;
1719 int ret;
1720 int owner;
1721
1722 path = btrfs_alloc_path();
1723 BUG_ON(!path);
1724
1725 inode = new_inode(root->fs_info->sb);
1726 if (!inode)
1727 return ERR_PTR(-ENOMEM);
1728
1729 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1730 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1731 inode->i_mapping, GFP_NOFS);
1732 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1733 inode->i_mapping, GFP_NOFS);
1734 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
1735 BTRFS_I(inode)->delalloc_bytes = 0;
1736 BTRFS_I(inode)->root = root;
1737
1738 if (mode & S_IFDIR)
1739 owner = 0;
1740 else
1741 owner = 1;
1742 new_inode_group = btrfs_find_block_group(root, group, 0,
1743 BTRFS_BLOCK_GROUP_METADATA, owner);
1744 if (!new_inode_group) {
1745 printk("find_block group failed\n");
1746 new_inode_group = group;
1747 }
1748 BTRFS_I(inode)->block_group = new_inode_group;
1749 BTRFS_I(inode)->flags = 0;
1750
1751 key[0].objectid = objectid;
1752 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
1753 key[0].offset = 0;
1754
1755 key[1].objectid = objectid;
1756 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
1757 key[1].offset = ref_objectid;
1758
1759 sizes[0] = sizeof(struct btrfs_inode_item);
1760 sizes[1] = name_len + sizeof(*ref);
1761
1762 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
1763 if (ret != 0)
1764 goto fail;
1765
1766 if (objectid > root->highest_inode)
1767 root->highest_inode = objectid;
1768
1769 inode->i_uid = current->fsuid;
1770 inode->i_gid = current->fsgid;
1771 inode->i_mode = mode;
1772 inode->i_ino = objectid;
1773 inode->i_blocks = 0;
1774 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1775 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1776 struct btrfs_inode_item);
1777 fill_inode_item(path->nodes[0], inode_item, inode);
1778
1779 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
1780 struct btrfs_inode_ref);
1781 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
1782 ptr = (unsigned long)(ref + 1);
1783 write_extent_buffer(path->nodes[0], name, ptr, name_len);
1784
1785 btrfs_mark_buffer_dirty(path->nodes[0]);
1786 btrfs_free_path(path);
1787
1788 location = &BTRFS_I(inode)->location;
1789 location->objectid = objectid;
1790 location->offset = 0;
1791 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1792
1793 insert_inode_hash(inode);
1794 return inode;
1795 fail:
1796 btrfs_free_path(path);
1797 return ERR_PTR(ret);
1798 }
1799
1800 static inline u8 btrfs_inode_type(struct inode *inode)
1801 {
1802 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
1803 }
1804
1805 static int btrfs_add_link(struct btrfs_trans_handle *trans,
1806 struct dentry *dentry, struct inode *inode,
1807 int add_backref)
1808 {
1809 int ret;
1810 struct btrfs_key key;
1811 struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
1812 struct inode *parent_inode;
1813
1814 key.objectid = inode->i_ino;
1815 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1816 key.offset = 0;
1817
1818 ret = btrfs_insert_dir_item(trans, root,
1819 dentry->d_name.name, dentry->d_name.len,
1820 dentry->d_parent->d_inode->i_ino,
1821 &key, btrfs_inode_type(inode));
1822 if (ret == 0) {
1823 if (add_backref) {
1824 ret = btrfs_insert_inode_ref(trans, root,
1825 dentry->d_name.name,
1826 dentry->d_name.len,
1827 inode->i_ino,
1828 dentry->d_parent->d_inode->i_ino);
1829 }
1830 parent_inode = dentry->d_parent->d_inode;
1831 parent_inode->i_size += dentry->d_name.len * 2;
1832 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1833 ret = btrfs_update_inode(trans, root,
1834 dentry->d_parent->d_inode);
1835 }
1836 return ret;
1837 }
1838
1839 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
1840 struct dentry *dentry, struct inode *inode,
1841 int backref)
1842 {
1843 int err = btrfs_add_link(trans, dentry, inode, backref);
1844 if (!err) {
1845 d_instantiate(dentry, inode);
1846 return 0;
1847 }
1848 if (err > 0)
1849 err = -EEXIST;
1850 return err;
1851 }
1852
1853 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
1854 int mode, dev_t rdev)
1855 {
1856 struct btrfs_trans_handle *trans;
1857 struct btrfs_root *root = BTRFS_I(dir)->root;
1858 struct inode *inode = NULL;
1859 int err;
1860 int drop_inode = 0;
1861 u64 objectid;
1862 unsigned long nr = 0;
1863
1864 if (!new_valid_dev(rdev))
1865 return -EINVAL;
1866
1867 mutex_lock(&root->fs_info->fs_mutex);
1868 err = btrfs_check_free_space(root, 1, 0);
1869 if (err)
1870 goto fail;
1871
1872 trans = btrfs_start_transaction(root, 1);
1873 btrfs_set_trans_block_group(trans, dir);
1874
1875 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1876 if (err) {
1877 err = -ENOSPC;
1878 goto out_unlock;
1879 }
1880
1881 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1882 dentry->d_name.len,
1883 dentry->d_parent->d_inode->i_ino, objectid,
1884 BTRFS_I(dir)->block_group, mode);
1885 err = PTR_ERR(inode);
1886 if (IS_ERR(inode))
1887 goto out_unlock;
1888
1889 btrfs_set_trans_block_group(trans, inode);
1890 err = btrfs_add_nondir(trans, dentry, inode, 0);
1891 if (err)
1892 drop_inode = 1;
1893 else {
1894 inode->i_op = &btrfs_special_inode_operations;
1895 init_special_inode(inode, inode->i_mode, rdev);
1896 btrfs_update_inode(trans, root, inode);
1897 }
1898 dir->i_sb->s_dirt = 1;
1899 btrfs_update_inode_block_group(trans, inode);
1900 btrfs_update_inode_block_group(trans, dir);
1901 out_unlock:
1902 nr = trans->blocks_used;
1903 btrfs_end_transaction(trans, root);
1904 fail:
1905 mutex_unlock(&root->fs_info->fs_mutex);
1906
1907 if (drop_inode) {
1908 inode_dec_link_count(inode);
1909 iput(inode);
1910 }
1911 btrfs_btree_balance_dirty(root, nr);
1912 btrfs_throttle(root);
1913 return err;
1914 }
1915
1916 static int btrfs_create(struct inode *dir, struct dentry *dentry,
1917 int mode, struct nameidata *nd)
1918 {
1919 struct btrfs_trans_handle *trans;
1920 struct btrfs_root *root = BTRFS_I(dir)->root;
1921 struct inode *inode = NULL;
1922 int err;
1923 int drop_inode = 0;
1924 unsigned long nr = 0;
1925 u64 objectid;
1926
1927 mutex_lock(&root->fs_info->fs_mutex);
1928 err = btrfs_check_free_space(root, 1, 0);
1929 if (err)
1930 goto fail;
1931 trans = btrfs_start_transaction(root, 1);
1932 btrfs_set_trans_block_group(trans, dir);
1933
1934 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1935 if (err) {
1936 err = -ENOSPC;
1937 goto out_unlock;
1938 }
1939
1940 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1941 dentry->d_name.len,
1942 dentry->d_parent->d_inode->i_ino,
1943 objectid, BTRFS_I(dir)->block_group, mode);
1944 err = PTR_ERR(inode);
1945 if (IS_ERR(inode))
1946 goto out_unlock;
1947
1948 btrfs_set_trans_block_group(trans, inode);
1949 err = btrfs_add_nondir(trans, dentry, inode, 0);
1950 if (err)
1951 drop_inode = 1;
1952 else {
1953 inode->i_mapping->a_ops = &btrfs_aops;
1954 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
1955 inode->i_fop = &btrfs_file_operations;
1956 inode->i_op = &btrfs_file_inode_operations;
1957 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1958 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1959 inode->i_mapping, GFP_NOFS);
1960 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1961 inode->i_mapping, GFP_NOFS);
1962 BTRFS_I(inode)->delalloc_bytes = 0;
1963 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
1964 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
1965 }
1966 dir->i_sb->s_dirt = 1;
1967 btrfs_update_inode_block_group(trans, inode);
1968 btrfs_update_inode_block_group(trans, dir);
1969 out_unlock:
1970 nr = trans->blocks_used;
1971 btrfs_end_transaction(trans, root);
1972 fail:
1973 mutex_unlock(&root->fs_info->fs_mutex);
1974
1975 if (drop_inode) {
1976 inode_dec_link_count(inode);
1977 iput(inode);
1978 }
1979 btrfs_btree_balance_dirty(root, nr);
1980 btrfs_throttle(root);
1981 return err;
1982 }
1983
1984 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
1985 struct dentry *dentry)
1986 {
1987 struct btrfs_trans_handle *trans;
1988 struct btrfs_root *root = BTRFS_I(dir)->root;
1989 struct inode *inode = old_dentry->d_inode;
1990 unsigned long nr = 0;
1991 int err;
1992 int drop_inode = 0;
1993
1994 if (inode->i_nlink == 0)
1995 return -ENOENT;
1996
1997 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1998 inode->i_nlink++;
1999 #else
2000 inc_nlink(inode);
2001 #endif
2002 mutex_lock(&root->fs_info->fs_mutex);
2003 err = btrfs_check_free_space(root, 1, 0);
2004 if (err)
2005 goto fail;
2006 trans = btrfs_start_transaction(root, 1);
2007
2008 btrfs_set_trans_block_group(trans, dir);
2009 atomic_inc(&inode->i_count);
2010 err = btrfs_add_nondir(trans, dentry, inode, 1);
2011
2012 if (err)
2013 drop_inode = 1;
2014
2015 dir->i_sb->s_dirt = 1;
2016 btrfs_update_inode_block_group(trans, dir);
2017 err = btrfs_update_inode(trans, root, inode);
2018
2019 if (err)
2020 drop_inode = 1;
2021
2022 nr = trans->blocks_used;
2023 btrfs_end_transaction(trans, root);
2024 fail:
2025 mutex_unlock(&root->fs_info->fs_mutex);
2026
2027 if (drop_inode) {
2028 inode_dec_link_count(inode);
2029 iput(inode);
2030 }
2031 btrfs_btree_balance_dirty(root, nr);
2032 btrfs_throttle(root);
2033 return err;
2034 }
2035
2036 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2037 {
2038 struct inode *inode = NULL;
2039 struct btrfs_trans_handle *trans;
2040 struct btrfs_root *root = BTRFS_I(dir)->root;
2041 int err = 0;
2042 int drop_on_err = 0;
2043 u64 objectid = 0;
2044 unsigned long nr = 1;
2045
2046 mutex_lock(&root->fs_info->fs_mutex);
2047 err = btrfs_check_free_space(root, 1, 0);
2048 if (err)
2049 goto out_unlock;
2050
2051 trans = btrfs_start_transaction(root, 1);
2052 btrfs_set_trans_block_group(trans, dir);
2053
2054 if (IS_ERR(trans)) {
2055 err = PTR_ERR(trans);
2056 goto out_unlock;
2057 }
2058
2059 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
2060 if (err) {
2061 err = -ENOSPC;
2062 goto out_unlock;
2063 }
2064
2065 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
2066 dentry->d_name.len,
2067 dentry->d_parent->d_inode->i_ino, objectid,
2068 BTRFS_I(dir)->block_group, S_IFDIR | mode);
2069 if (IS_ERR(inode)) {
2070 err = PTR_ERR(inode);
2071 goto out_fail;
2072 }
2073
2074 drop_on_err = 1;
2075 inode->i_op = &btrfs_dir_inode_operations;
2076 inode->i_fop = &btrfs_dir_file_operations;
2077 btrfs_set_trans_block_group(trans, inode);
2078
2079 inode->i_size = 0;
2080 err = btrfs_update_inode(trans, root, inode);
2081 if (err)
2082 goto out_fail;
2083
2084 err = btrfs_add_link(trans, dentry, inode, 0);
2085 if (err)
2086 goto out_fail;
2087
2088 d_instantiate(dentry, inode);
2089 drop_on_err = 0;
2090 dir->i_sb->s_dirt = 1;
2091 btrfs_update_inode_block_group(trans, inode);
2092 btrfs_update_inode_block_group(trans, dir);
2093
2094 out_fail:
2095 nr = trans->blocks_used;
2096 btrfs_end_transaction(trans, root);
2097
2098 out_unlock:
2099 mutex_unlock(&root->fs_info->fs_mutex);
2100 if (drop_on_err)
2101 iput(inode);
2102 btrfs_btree_balance_dirty(root, nr);
2103 btrfs_throttle(root);
2104 return err;
2105 }
2106
2107 static int merge_extent_mapping(struct extent_map_tree *em_tree,
2108 struct extent_map *existing,
2109 struct extent_map *em)
2110 {
2111 u64 start_diff;
2112 u64 new_end;
2113 int ret = 0;
2114 int real_blocks = existing->block_start < EXTENT_MAP_LAST_BYTE;
2115
2116 if (real_blocks && em->block_start >= EXTENT_MAP_LAST_BYTE)
2117 goto invalid;
2118
2119 if (!real_blocks && em->block_start != existing->block_start)
2120 goto invalid;
2121
2122 new_end = max(existing->start + existing->len, em->start + em->len);
2123
2124 if (existing->start >= em->start) {
2125 if (em->start + em->len < existing->start)
2126 goto invalid;
2127
2128 start_diff = existing->start - em->start;
2129 if (real_blocks && em->block_start + start_diff !=
2130 existing->block_start)
2131 goto invalid;
2132
2133 em->len = new_end - em->start;
2134
2135 remove_extent_mapping(em_tree, existing);
2136 /* free for the tree */
2137 free_extent_map(existing);
2138 ret = add_extent_mapping(em_tree, em);
2139
2140 } else if (em->start > existing->start) {
2141
2142 if (existing->start + existing->len < em->start)
2143 goto invalid;
2144
2145 start_diff = em->start - existing->start;
2146 if (real_blocks && existing->block_start + start_diff !=
2147 em->block_start)
2148 goto invalid;
2149
2150 remove_extent_mapping(em_tree, existing);
2151 em->block_start = existing->block_start;
2152 em->start = existing->start;
2153 em->len = new_end - existing->start;
2154 free_extent_map(existing);
2155
2156 ret = add_extent_mapping(em_tree, em);
2157 } else {
2158 goto invalid;
2159 }
2160 return ret;
2161
2162 invalid:
2163 printk("invalid extent map merge [%Lu %Lu %Lu] [%Lu %Lu %Lu]\n",
2164 existing->start, existing->len, existing->block_start,
2165 em->start, em->len, em->block_start);
2166 return -EIO;
2167 }
2168
2169 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
2170 size_t pg_offset, u64 start, u64 len,
2171 int create)
2172 {
2173 int ret;
2174 int err = 0;
2175 u64 bytenr;
2176 u64 extent_start = 0;
2177 u64 extent_end = 0;
2178 u64 objectid = inode->i_ino;
2179 u32 found_type;
2180 struct btrfs_path *path;
2181 struct btrfs_root *root = BTRFS_I(inode)->root;
2182 struct btrfs_file_extent_item *item;
2183 struct extent_buffer *leaf;
2184 struct btrfs_key found_key;
2185 struct extent_map *em = NULL;
2186 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2187 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2188 struct btrfs_trans_handle *trans = NULL;
2189
2190 path = btrfs_alloc_path();
2191 BUG_ON(!path);
2192 mutex_lock(&root->fs_info->fs_mutex);
2193
2194 again:
2195 spin_lock(&em_tree->lock);
2196 em = lookup_extent_mapping(em_tree, start, len);
2197 if (em)
2198 em->bdev = root->fs_info->fs_devices->latest_bdev;
2199 spin_unlock(&em_tree->lock);
2200
2201 if (em) {
2202 if (em->start > start || em->start + em->len <= start)
2203 free_extent_map(em);
2204 else if (em->block_start == EXTENT_MAP_INLINE && page)
2205 free_extent_map(em);
2206 else
2207 goto out;
2208 }
2209 em = alloc_extent_map(GFP_NOFS);
2210 if (!em) {
2211 err = -ENOMEM;
2212 goto out;
2213 }
2214
2215 em->start = EXTENT_MAP_HOLE;
2216 em->len = (u64)-1;
2217 em->bdev = root->fs_info->fs_devices->latest_bdev;
2218 ret = btrfs_lookup_file_extent(trans, root, path,
2219 objectid, start, trans != NULL);
2220 if (ret < 0) {
2221 err = ret;
2222 goto out;
2223 }
2224
2225 if (ret != 0) {
2226 if (path->slots[0] == 0)
2227 goto not_found;
2228 path->slots[0]--;
2229 }
2230
2231 leaf = path->nodes[0];
2232 item = btrfs_item_ptr(leaf, path->slots[0],
2233 struct btrfs_file_extent_item);
2234 /* are we inside the extent that was found? */
2235 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2236 found_type = btrfs_key_type(&found_key);
2237 if (found_key.objectid != objectid ||
2238 found_type != BTRFS_EXTENT_DATA_KEY) {
2239 goto not_found;
2240 }
2241
2242 found_type = btrfs_file_extent_type(leaf, item);
2243 extent_start = found_key.offset;
2244 if (found_type == BTRFS_FILE_EXTENT_REG) {
2245 extent_end = extent_start +
2246 btrfs_file_extent_num_bytes(leaf, item);
2247 err = 0;
2248 if (start < extent_start || start >= extent_end) {
2249 em->start = start;
2250 if (start < extent_start) {
2251 if (start + len <= extent_start)
2252 goto not_found;
2253 em->len = extent_end - extent_start;
2254 } else {
2255 em->len = len;
2256 }
2257 goto not_found_em;
2258 }
2259 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
2260 if (bytenr == 0) {
2261 em->start = extent_start;
2262 em->len = extent_end - extent_start;
2263 em->block_start = EXTENT_MAP_HOLE;
2264 goto insert;
2265 }
2266 bytenr += btrfs_file_extent_offset(leaf, item);
2267 em->block_start = bytenr;
2268 em->start = extent_start;
2269 em->len = extent_end - extent_start;
2270 goto insert;
2271 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
2272 u64 page_start;
2273 unsigned long ptr;
2274 char *map;
2275 size_t size;
2276 size_t extent_offset;
2277 size_t copy_size;
2278
2279 size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf,
2280 path->slots[0]));
2281 extent_end = (extent_start + size + root->sectorsize - 1) &
2282 ~((u64)root->sectorsize - 1);
2283 if (start < extent_start || start >= extent_end) {
2284 em->start = start;
2285 if (start < extent_start) {
2286 if (start + len <= extent_start)
2287 goto not_found;
2288 em->len = extent_end - extent_start;
2289 } else {
2290 em->len = len;
2291 }
2292 goto not_found_em;
2293 }
2294 em->block_start = EXTENT_MAP_INLINE;
2295
2296 if (!page) {
2297 em->start = extent_start;
2298 em->len = size;
2299 goto out;
2300 }
2301
2302 page_start = page_offset(page) + pg_offset;
2303 extent_offset = page_start - extent_start;
2304 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
2305 size - extent_offset);
2306 em->start = extent_start + extent_offset;
2307 em->len = (copy_size + root->sectorsize - 1) &
2308 ~((u64)root->sectorsize - 1);
2309 map = kmap(page);
2310 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
2311 if (create == 0 && !PageUptodate(page)) {
2312 read_extent_buffer(leaf, map + pg_offset, ptr,
2313 copy_size);
2314 flush_dcache_page(page);
2315 } else if (create && PageUptodate(page)) {
2316 if (!trans) {
2317 kunmap(page);
2318 free_extent_map(em);
2319 em = NULL;
2320 btrfs_release_path(root, path);
2321 trans = btrfs_start_transaction(root, 1);
2322 goto again;
2323 }
2324 write_extent_buffer(leaf, map + pg_offset, ptr,
2325 copy_size);
2326 btrfs_mark_buffer_dirty(leaf);
2327 }
2328 kunmap(page);
2329 set_extent_uptodate(io_tree, em->start,
2330 extent_map_end(em) - 1, GFP_NOFS);
2331 goto insert;
2332 } else {
2333 printk("unkknown found_type %d\n", found_type);
2334 WARN_ON(1);
2335 }
2336 not_found:
2337 em->start = start;
2338 em->len = len;
2339 not_found_em:
2340 em->block_start = EXTENT_MAP_HOLE;
2341 insert:
2342 btrfs_release_path(root, path);
2343 if (em->start > start || extent_map_end(em) <= start) {
2344 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
2345 err = -EIO;
2346 goto out;
2347 }
2348
2349 err = 0;
2350 spin_lock(&em_tree->lock);
2351 ret = add_extent_mapping(em_tree, em);
2352 /* it is possible that someone inserted the extent into the tree
2353 * while we had the lock dropped. It is also possible that
2354 * an overlapping map exists in the tree
2355 */
2356 if (ret == -EEXIST) {
2357 struct extent_map *existing;
2358 existing = lookup_extent_mapping(em_tree, start, len);
2359 if (existing && (existing->start > start ||
2360 existing->start + existing->len <= start)) {
2361 free_extent_map(existing);
2362 existing = NULL;
2363 }
2364 if (!existing) {
2365 existing = lookup_extent_mapping(em_tree, em->start,
2366 em->len);
2367 if (existing) {
2368 err = merge_extent_mapping(em_tree, existing,
2369 em);
2370 free_extent_map(existing);
2371 if (err) {
2372 free_extent_map(em);
2373 em = NULL;
2374 }
2375 } else {
2376 err = -EIO;
2377 printk("failing to insert %Lu %Lu\n",
2378 start, len);
2379 free_extent_map(em);
2380 em = NULL;
2381 }
2382 } else {
2383 free_extent_map(em);
2384 em = existing;
2385 }
2386 }
2387 spin_unlock(&em_tree->lock);
2388 out:
2389 btrfs_free_path(path);
2390 if (trans) {
2391 ret = btrfs_end_transaction(trans, root);
2392 if (!err)
2393 err = ret;
2394 }
2395 mutex_unlock(&root->fs_info->fs_mutex);
2396 if (err) {
2397 free_extent_map(em);
2398 WARN_ON(1);
2399 return ERR_PTR(err);
2400 }
2401 return em;
2402 }
2403
2404 #if 0 /* waiting for O_DIRECT reads */
2405 static int btrfs_get_block(struct inode *inode, sector_t iblock,
2406 struct buffer_head *bh_result, int create)
2407 {
2408 struct extent_map *em;
2409 u64 start = (u64)iblock << inode->i_blkbits;
2410 struct btrfs_multi_bio *multi = NULL;
2411 struct btrfs_root *root = BTRFS_I(inode)->root;
2412 u64 len;
2413 u64 logical;
2414 u64 map_length;
2415 int ret = 0;
2416
2417 em = btrfs_get_extent(inode, NULL, 0, start, bh_result->b_size, 0);
2418
2419 if (!em || IS_ERR(em))
2420 goto out;
2421
2422 if (em->start > start || em->start + em->len <= start) {
2423 goto out;
2424 }
2425
2426 if (em->block_start == EXTENT_MAP_INLINE) {
2427 ret = -EINVAL;
2428 goto out;
2429 }
2430
2431 len = em->start + em->len - start;
2432 len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size)));
2433
2434 if (em->block_start == EXTENT_MAP_HOLE ||
2435 em->block_start == EXTENT_MAP_DELALLOC) {
2436 bh_result->b_size = len;
2437 goto out;
2438 }
2439
2440 logical = start - em->start;
2441 logical = em->block_start + logical;
2442
2443 map_length = len;
2444 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
2445 logical, &map_length, &multi, 0);
2446 BUG_ON(ret);
2447 bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits;
2448 bh_result->b_size = min(map_length, len);
2449
2450 bh_result->b_bdev = multi->stripes[0].dev->bdev;
2451 set_buffer_mapped(bh_result);
2452 kfree(multi);
2453 out:
2454 free_extent_map(em);
2455 return ret;
2456 }
2457 #endif
2458
2459 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
2460 const struct iovec *iov, loff_t offset,
2461 unsigned long nr_segs)
2462 {
2463 return -EINVAL;
2464 #if 0
2465 struct file *file = iocb->ki_filp;
2466 struct inode *inode = file->f_mapping->host;
2467
2468 if (rw == WRITE)
2469 return -EINVAL;
2470
2471 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
2472 offset, nr_segs, btrfs_get_block, NULL);
2473 #endif
2474 }
2475
2476 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
2477 {
2478 return extent_bmap(mapping, iblock, btrfs_get_extent);
2479 }
2480
2481 int btrfs_readpage(struct file *file, struct page *page)
2482 {
2483 struct extent_io_tree *tree;
2484 tree = &BTRFS_I(page->mapping->host)->io_tree;
2485 return extent_read_full_page(tree, page, btrfs_get_extent);
2486 }
2487
2488 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
2489 {
2490 struct extent_io_tree *tree;
2491
2492
2493 if (current->flags & PF_MEMALLOC) {
2494 redirty_page_for_writepage(wbc, page);
2495 unlock_page(page);
2496 return 0;
2497 }
2498 tree = &BTRFS_I(page->mapping->host)->io_tree;
2499 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
2500 }
2501
2502 static int btrfs_writepages(struct address_space *mapping,
2503 struct writeback_control *wbc)
2504 {
2505 struct extent_io_tree *tree;
2506 tree = &BTRFS_I(mapping->host)->io_tree;
2507 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
2508 }
2509
2510 static int
2511 btrfs_readpages(struct file *file, struct address_space *mapping,
2512 struct list_head *pages, unsigned nr_pages)
2513 {
2514 struct extent_io_tree *tree;
2515 tree = &BTRFS_I(mapping->host)->io_tree;
2516 return extent_readpages(tree, mapping, pages, nr_pages,
2517 btrfs_get_extent);
2518 }
2519
2520 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
2521 {
2522 struct extent_io_tree *tree;
2523 struct extent_map_tree *map;
2524 int ret;
2525
2526 tree = &BTRFS_I(page->mapping->host)->io_tree;
2527 map = &BTRFS_I(page->mapping->host)->extent_tree;
2528 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
2529 if (ret == 1) {
2530 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
2531 ClearPagePrivate(page);
2532 set_page_private(page, 0);
2533 page_cache_release(page);
2534 }
2535 return ret;
2536 }
2537
2538 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
2539 {
2540 struct extent_io_tree *tree;
2541
2542 tree = &BTRFS_I(page->mapping->host)->io_tree;
2543 extent_invalidatepage(tree, page, offset);
2544 btrfs_releasepage(page, GFP_NOFS);
2545 if (PagePrivate(page)) {
2546 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
2547 ClearPagePrivate(page);
2548 set_page_private(page, 0);
2549 page_cache_release(page);
2550 }
2551 }
2552
2553 /*
2554 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
2555 * called from a page fault handler when a page is first dirtied. Hence we must
2556 * be careful to check for EOF conditions here. We set the page up correctly
2557 * for a written page which means we get ENOSPC checking when writing into
2558 * holes and correct delalloc and unwritten extent mapping on filesystems that
2559 * support these features.
2560 *
2561 * We are not allowed to take the i_mutex here so we have to play games to
2562 * protect against truncate races as the page could now be beyond EOF. Because
2563 * vmtruncate() writes the inode size before removing pages, once we have the
2564 * page lock we can determine safely if the page is beyond EOF. If it is not
2565 * beyond EOF, then the page is guaranteed safe against truncation until we
2566 * unlock the page.
2567 */
2568 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
2569 {
2570 struct inode *inode = fdentry(vma->vm_file)->d_inode;
2571 struct btrfs_root *root = BTRFS_I(inode)->root;
2572 unsigned long end;
2573 loff_t size;
2574 int ret;
2575 u64 page_start;
2576
2577 mutex_lock(&root->fs_info->fs_mutex);
2578 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
2579 mutex_unlock(&root->fs_info->fs_mutex);
2580 if (ret)
2581 goto out;
2582
2583 ret = -EINVAL;
2584
2585 lock_page(page);
2586 wait_on_page_writeback(page);
2587 size = i_size_read(inode);
2588 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2589
2590 if ((page->mapping != inode->i_mapping) ||
2591 (page_start > size)) {
2592 /* page got truncated out from underneath us */
2593 goto out_unlock;
2594 }
2595
2596 /* page is wholly or partially inside EOF */
2597 if (page_start + PAGE_CACHE_SIZE > size)
2598 end = size & ~PAGE_CACHE_MASK;
2599 else
2600 end = PAGE_CACHE_SIZE;
2601
2602 ret = btrfs_cow_one_page(inode, page, end);
2603
2604 out_unlock:
2605 unlock_page(page);
2606 out:
2607 return ret;
2608 }
2609
2610 static void btrfs_truncate(struct inode *inode)
2611 {
2612 struct btrfs_root *root = BTRFS_I(inode)->root;
2613 int ret;
2614 struct btrfs_trans_handle *trans;
2615 unsigned long nr;
2616
2617 if (!S_ISREG(inode->i_mode))
2618 return;
2619 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2620 return;
2621
2622 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2623
2624 mutex_lock(&root->fs_info->fs_mutex);
2625 trans = btrfs_start_transaction(root, 1);
2626 btrfs_set_trans_block_group(trans, inode);
2627
2628 /* FIXME, add redo link to tree so we don't leak on crash */
2629 ret = btrfs_truncate_in_trans(trans, root, inode,
2630 BTRFS_EXTENT_DATA_KEY);
2631 btrfs_update_inode(trans, root, inode);
2632 nr = trans->blocks_used;
2633
2634 ret = btrfs_end_transaction(trans, root);
2635 BUG_ON(ret);
2636 mutex_unlock(&root->fs_info->fs_mutex);
2637 btrfs_btree_balance_dirty(root, nr);
2638 btrfs_throttle(root);
2639 }
2640
2641 static int noinline create_subvol(struct btrfs_root *root, char *name,
2642 int namelen)
2643 {
2644 struct btrfs_trans_handle *trans;
2645 struct btrfs_key key;
2646 struct btrfs_root_item root_item;
2647 struct btrfs_inode_item *inode_item;
2648 struct extent_buffer *leaf;
2649 struct btrfs_root *new_root = root;
2650 struct inode *inode;
2651 struct inode *dir;
2652 int ret;
2653 int err;
2654 u64 objectid;
2655 u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID;
2656 unsigned long nr = 1;
2657
2658 mutex_lock(&root->fs_info->fs_mutex);
2659 ret = btrfs_check_free_space(root, 1, 0);
2660 if (ret)
2661 goto fail_commit;
2662
2663 trans = btrfs_start_transaction(root, 1);
2664 BUG_ON(!trans);
2665
2666 ret = btrfs_find_free_objectid(trans, root->fs_info->tree_root,
2667 0, &objectid);
2668 if (ret)
2669 goto fail;
2670
2671 leaf = __btrfs_alloc_free_block(trans, root, root->leafsize,
2672 objectid, trans->transid, 0, 0,
2673 0, 0);
2674 if (IS_ERR(leaf))
2675 return PTR_ERR(leaf);
2676
2677 btrfs_set_header_nritems(leaf, 0);
2678 btrfs_set_header_level(leaf, 0);
2679 btrfs_set_header_bytenr(leaf, leaf->start);
2680 btrfs_set_header_generation(leaf, trans->transid);
2681 btrfs_set_header_owner(leaf, objectid);
2682
2683 write_extent_buffer(leaf, root->fs_info->fsid,
2684 (unsigned long)btrfs_header_fsid(leaf),
2685 BTRFS_FSID_SIZE);
2686 btrfs_mark_buffer_dirty(leaf);
2687
2688 inode_item = &root_item.inode;
2689 memset(inode_item, 0, sizeof(*inode_item));
2690 inode_item->generation = cpu_to_le64(1);
2691 inode_item->size = cpu_to_le64(3);
2692 inode_item->nlink = cpu_to_le32(1);
2693 inode_item->nblocks = cpu_to_le64(1);
2694 inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
2695
2696 btrfs_set_root_bytenr(&root_item, leaf->start);
2697 btrfs_set_root_level(&root_item, 0);
2698 btrfs_set_root_refs(&root_item, 1);
2699 btrfs_set_root_used(&root_item, 0);
2700
2701 memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
2702 root_item.drop_level = 0;
2703
2704 free_extent_buffer(leaf);
2705 leaf = NULL;
2706
2707 btrfs_set_root_dirid(&root_item, new_dirid);
2708
2709 key.objectid = objectid;
2710 key.offset = 1;
2711 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
2712 ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
2713 &root_item);
2714 if (ret)
2715 goto fail;
2716
2717 /*
2718 * insert the directory item
2719 */
2720 key.offset = (u64)-1;
2721 dir = root->fs_info->sb->s_root->d_inode;
2722 ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
2723 name, namelen, dir->i_ino, &key,
2724 BTRFS_FT_DIR);
2725 if (ret)
2726 goto fail;
2727
2728 ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
2729 name, namelen, objectid,
2730 root->fs_info->sb->s_root->d_inode->i_ino);
2731 if (ret)
2732 goto fail;
2733
2734 ret = btrfs_commit_transaction(trans, root);
2735 if (ret)
2736 goto fail_commit;
2737
2738 new_root = btrfs_read_fs_root(root->fs_info, &key, name, namelen);
2739 BUG_ON(!new_root);
2740
2741 trans = btrfs_start_transaction(new_root, 1);
2742 BUG_ON(!trans);
2743
2744 inode = btrfs_new_inode(trans, new_root, "..", 2, new_dirid,
2745 new_dirid,
2746 BTRFS_I(dir)->block_group, S_IFDIR | 0700);
2747 if (IS_ERR(inode))
2748 goto fail;
2749 inode->i_op = &btrfs_dir_inode_operations;
2750 inode->i_fop = &btrfs_dir_file_operations;
2751 new_root->inode = inode;
2752
2753 ret = btrfs_insert_inode_ref(trans, new_root, "..", 2, new_dirid,
2754 new_dirid);
2755 inode->i_nlink = 1;
2756 inode->i_size = 0;
2757 ret = btrfs_update_inode(trans, new_root, inode);
2758 if (ret)
2759 goto fail;
2760 fail:
2761 nr = trans->blocks_used;
2762 err = btrfs_commit_transaction(trans, new_root);
2763 if (err && !ret)
2764 ret = err;
2765 fail_commit:
2766 mutex_unlock(&root->fs_info->fs_mutex);
2767 btrfs_btree_balance_dirty(root, nr);
2768 btrfs_throttle(root);
2769 return ret;
2770 }
2771
2772 static int create_snapshot(struct btrfs_root *root, char *name, int namelen)
2773 {
2774 struct btrfs_pending_snapshot *pending_snapshot;
2775 struct btrfs_trans_handle *trans;
2776 int ret;
2777 int err;
2778 unsigned long nr = 0;
2779
2780 if (!root->ref_cows)
2781 return -EINVAL;
2782
2783 mutex_lock(&root->fs_info->fs_mutex);
2784 ret = btrfs_check_free_space(root, 1, 0);
2785 if (ret)
2786 goto fail_unlock;
2787
2788 pending_snapshot = kmalloc(sizeof(*pending_snapshot), GFP_NOFS);
2789 if (!pending_snapshot) {
2790 ret = -ENOMEM;
2791 goto fail_unlock;
2792 }
2793 pending_snapshot->name = kmalloc(namelen + 1, GFP_NOFS);
2794 if (!pending_snapshot->name) {
2795 ret = -ENOMEM;
2796 kfree(pending_snapshot);
2797 goto fail_unlock;
2798 }
2799 memcpy(pending_snapshot->name, name, namelen);
2800 pending_snapshot->name[namelen] = '\0';
2801 trans = btrfs_start_transaction(root, 1);
2802 BUG_ON(!trans);
2803 pending_snapshot->root = root;
2804 list_add(&pending_snapshot->list,
2805 &trans->transaction->pending_snapshots);
2806 ret = btrfs_update_inode(trans, root, root->inode);
2807 err = btrfs_commit_transaction(trans, root);
2808
2809 fail_unlock:
2810 mutex_unlock(&root->fs_info->fs_mutex);
2811 btrfs_btree_balance_dirty(root, nr);
2812 btrfs_throttle(root);
2813 return ret;
2814 }
2815
2816 unsigned long btrfs_force_ra(struct address_space *mapping,
2817 struct file_ra_state *ra, struct file *file,
2818 pgoff_t offset, pgoff_t last_index)
2819 {
2820 pgoff_t req_size = last_index - offset + 1;
2821
2822 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2823 offset = page_cache_readahead(mapping, ra, file, offset, req_size);
2824 return offset;
2825 #else
2826 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
2827 return offset + req_size;
2828 #endif
2829 }
2830
2831 int btrfs_defrag_file(struct file *file) {
2832 struct inode *inode = fdentry(file)->d_inode;
2833 struct btrfs_root *root = BTRFS_I(inode)->root;
2834 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2835 struct page *page;
2836 unsigned long last_index;
2837 unsigned long ra_pages = root->fs_info->bdi.ra_pages;
2838 unsigned long total_read = 0;
2839 u64 page_start;
2840 u64 page_end;
2841 unsigned long i;
2842 int ret;
2843
2844 mutex_lock(&root->fs_info->fs_mutex);
2845 ret = btrfs_check_free_space(root, inode->i_size, 0);
2846 mutex_unlock(&root->fs_info->fs_mutex);
2847 if (ret)
2848 return -ENOSPC;
2849
2850 mutex_lock(&inode->i_mutex);
2851 last_index = inode->i_size >> PAGE_CACHE_SHIFT;
2852 for (i = 0; i <= last_index; i++) {
2853 if (total_read % ra_pages == 0) {
2854 btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i,
2855 min(last_index, i + ra_pages - 1));
2856 }
2857 total_read++;
2858 page = grab_cache_page(inode->i_mapping, i);
2859 if (!page)
2860 goto out_unlock;
2861 if (!PageUptodate(page)) {
2862 btrfs_readpage(NULL, page);
2863 lock_page(page);
2864 if (!PageUptodate(page)) {
2865 unlock_page(page);
2866 page_cache_release(page);
2867 goto out_unlock;
2868 }
2869 }
2870
2871 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
2872 ClearPageDirty(page);
2873 #else
2874 cancel_dirty_page(page, PAGE_CACHE_SIZE);
2875 #endif
2876 wait_on_page_writeback(page);
2877 set_page_extent_mapped(page);
2878
2879 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2880 page_end = page_start + PAGE_CACHE_SIZE - 1;
2881
2882 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2883 set_extent_delalloc(io_tree, page_start,
2884 page_end, GFP_NOFS);
2885
2886 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2887 set_page_dirty(page);
2888 unlock_page(page);
2889 page_cache_release(page);
2890 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1);
2891 }
2892
2893 out_unlock:
2894 mutex_unlock(&inode->i_mutex);
2895 return 0;
2896 }
2897
2898 static int btrfs_ioctl_resize(struct btrfs_root *root, void __user *arg)
2899 {
2900 u64 new_size;
2901 u64 old_size;
2902 u64 devid = 1;
2903 struct btrfs_ioctl_vol_args *vol_args;
2904 struct btrfs_trans_handle *trans;
2905 struct btrfs_device *device = NULL;
2906 char *sizestr;
2907 char *devstr = NULL;
2908 int ret = 0;
2909 int namelen;
2910 int mod = 0;
2911
2912 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
2913
2914 if (!vol_args)
2915 return -ENOMEM;
2916
2917 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
2918 ret = -EFAULT;
2919 goto out;
2920 }
2921 namelen = strlen(vol_args->name);
2922 if (namelen > BTRFS_VOL_NAME_MAX) {
2923 ret = -EINVAL;
2924 goto out;
2925 }
2926
2927 mutex_lock(&root->fs_info->fs_mutex);
2928 sizestr = vol_args->name;
2929 devstr = strchr(sizestr, ':');
2930 if (devstr) {
2931 char *end;
2932 sizestr = devstr + 1;
2933 *devstr = '\0';
2934 devstr = vol_args->name;
2935 devid = simple_strtoull(devstr, &end, 10);
2936 printk("resizing devid %Lu\n", devid);
2937 }
2938 device = btrfs_find_device(root, devid, NULL);
2939 if (!device) {
2940 printk("resizer unable to find device %Lu\n", devid);
2941 ret = -EINVAL;
2942 goto out_unlock;
2943 }
2944 if (!strcmp(sizestr, "max"))
2945 new_size = device->bdev->bd_inode->i_size;
2946 else {
2947 if (sizestr[0] == '-') {
2948 mod = -1;
2949 sizestr++;
2950 } else if (sizestr[0] == '+') {
2951 mod = 1;
2952 sizestr++;
2953 }
2954 new_size = btrfs_parse_size(sizestr);
2955 if (new_size == 0) {
2956 ret = -EINVAL;
2957 goto out_unlock;
2958 }
2959 }
2960
2961 old_size = device->total_bytes;
2962
2963 if (mod < 0) {
2964 if (new_size > old_size) {
2965 ret = -EINVAL;
2966 goto out_unlock;
2967 }
2968 new_size = old_size - new_size;
2969 } else if (mod > 0) {
2970 new_size = old_size + new_size;
2971 }
2972
2973 if (new_size < 256 * 1024 * 1024) {
2974 ret = -EINVAL;
2975 goto out_unlock;
2976 }
2977 if (new_size > device->bdev->bd_inode->i_size) {
2978 ret = -EFBIG;
2979 goto out_unlock;
2980 }
2981
2982 do_div(new_size, root->sectorsize);
2983 new_size *= root->sectorsize;
2984
2985 printk("new size for %s is %llu\n", device->name, (unsigned long long)new_size);
2986
2987 if (new_size > old_size) {
2988 trans = btrfs_start_transaction(root, 1);
2989 ret = btrfs_grow_device(trans, device, new_size);
2990 btrfs_commit_transaction(trans, root);
2991 } else {
2992 ret = btrfs_shrink_device(device, new_size);
2993 }
2994
2995 out_unlock:
2996 mutex_unlock(&root->fs_info->fs_mutex);
2997 out:
2998 kfree(vol_args);
2999 return ret;
3000 }
3001
3002 static int noinline btrfs_ioctl_snap_create(struct btrfs_root *root,
3003 void __user *arg)
3004 {
3005 struct btrfs_ioctl_vol_args *vol_args;
3006 struct btrfs_dir_item *di;
3007 struct btrfs_path *path;
3008 u64 root_dirid;
3009 int namelen;
3010 int ret;
3011
3012 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
3013
3014 if (!vol_args)
3015 return -ENOMEM;
3016
3017 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
3018 ret = -EFAULT;
3019 goto out;
3020 }
3021
3022 namelen = strlen(vol_args->name);
3023 if (namelen > BTRFS_VOL_NAME_MAX) {
3024 ret = -EINVAL;
3025 goto out;
3026 }
3027 if (strchr(vol_args->name, '/')) {
3028 ret = -EINVAL;
3029 goto out;
3030 }
3031
3032 path = btrfs_alloc_path();
3033 if (!path) {
3034 ret = -ENOMEM;
3035 goto out;
3036 }
3037
3038 root_dirid = root->fs_info->sb->s_root->d_inode->i_ino,
3039 mutex_lock(&root->fs_info->fs_mutex);
3040 di = btrfs_lookup_dir_item(NULL, root->fs_info->tree_root,
3041 path, root_dirid,
3042 vol_args->name, namelen, 0);
3043 mutex_unlock(&root->fs_info->fs_mutex);
3044 btrfs_free_path(path);
3045
3046 if (di && !IS_ERR(di)) {
3047 ret = -EEXIST;
3048 goto out;
3049 }
3050
3051 if (IS_ERR(di)) {
3052 ret = PTR_ERR(di);
3053 goto out;
3054 }
3055
3056 if (root == root->fs_info->tree_root)
3057 ret = create_subvol(root, vol_args->name, namelen);
3058 else
3059 ret = create_snapshot(root, vol_args->name, namelen);
3060 out:
3061 kfree(vol_args);
3062 return ret;
3063 }
3064
3065 static int btrfs_ioctl_defrag(struct file *file)
3066 {
3067 struct inode *inode = fdentry(file)->d_inode;
3068 struct btrfs_root *root = BTRFS_I(inode)->root;
3069
3070 switch (inode->i_mode & S_IFMT) {
3071 case S_IFDIR:
3072 mutex_lock(&root->fs_info->fs_mutex);
3073 btrfs_defrag_root(root, 0);
3074 btrfs_defrag_root(root->fs_info->extent_root, 0);
3075 mutex_unlock(&root->fs_info->fs_mutex);
3076 break;
3077 case S_IFREG:
3078 btrfs_defrag_file(file);
3079 break;
3080 }
3081
3082 return 0;
3083 }
3084
3085 long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
3086 {
3087 struct btrfs_ioctl_vol_args *vol_args;
3088 int ret;
3089
3090 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
3091
3092 if (!vol_args)
3093 return -ENOMEM;
3094
3095 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
3096 ret = -EFAULT;
3097 goto out;
3098 }
3099 ret = btrfs_init_new_device(root, vol_args->name);
3100
3101 out:
3102 kfree(vol_args);
3103 return ret;
3104 }
3105
3106 long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
3107 {
3108 struct btrfs_ioctl_vol_args *vol_args;
3109 int ret;
3110
3111 vol_args = kmalloc(sizeof(*vol_args), GFP_NOFS);
3112
3113 if (!vol_args)
3114 return -ENOMEM;
3115
3116 if (copy_from_user(vol_args, arg, sizeof(*vol_args))) {
3117 ret = -EFAULT;
3118 goto out;
3119 }
3120 ret = btrfs_rm_device(root, vol_args->name);
3121
3122 out:
3123 kfree(vol_args);
3124 return ret;
3125 }
3126
3127 int dup_item_to_inode(struct btrfs_trans_handle *trans,
3128 struct btrfs_root *root,
3129 struct btrfs_path *path,
3130 struct extent_buffer *leaf,
3131 int slot,
3132 struct btrfs_key *key,
3133 u64 destino)
3134 {
3135 char *dup;
3136 int len = btrfs_item_size_nr(leaf, slot);
3137 struct btrfs_key ckey = *key;
3138 int ret = 0;
3139
3140 dup = kmalloc(len, GFP_NOFS);
3141 if (!dup)
3142 return -ENOMEM;
3143
3144 read_extent_buffer(leaf, dup, btrfs_item_ptr_offset(leaf, slot), len);
3145 btrfs_release_path(root, path);
3146
3147 ckey.objectid = destino;
3148 ret = btrfs_insert_item(trans, root, &ckey, dup, len);
3149 kfree(dup);
3150 return ret;
3151 }
3152
3153 long btrfs_ioctl_clone(struct file *file, unsigned long src_fd)
3154 {
3155 struct inode *inode = fdentry(file)->d_inode;
3156 struct btrfs_root *root = BTRFS_I(inode)->root;
3157 struct file *src_file;
3158 struct inode *src;
3159 struct btrfs_trans_handle *trans;
3160 int ret;
3161 u64 pos;
3162 struct btrfs_path *path;
3163 struct btrfs_key key;
3164 struct extent_buffer *leaf;
3165 u32 nritems;
3166 int slot;
3167
3168 src_file = fget(src_fd);
3169 if (!src_file)
3170 return -EBADF;
3171 src = src_file->f_dentry->d_inode;
3172
3173 ret = -EXDEV;
3174 if (src->i_sb != inode->i_sb)
3175 goto out_fput;
3176
3177 if (inode < src) {
3178 mutex_lock(&inode->i_mutex);
3179 mutex_lock(&src->i_mutex);
3180 } else {
3181 mutex_lock(&src->i_mutex);
3182 mutex_lock(&inode->i_mutex);
3183 }
3184
3185 ret = -ENOTEMPTY;
3186 if (inode->i_size)
3187 goto out_unlock;
3188
3189 /* do any pending delalloc/csum calc on src, one way or
3190 another, and lock file content */
3191 while (1) {
3192 filemap_write_and_wait(src->i_mapping);
3193 lock_extent(&BTRFS_I(src)->io_tree, 0, (u64)-1, GFP_NOFS);
3194 if (BTRFS_I(src)->delalloc_bytes == 0)
3195 break;
3196 unlock_extent(&BTRFS_I(src)->io_tree, 0, (u64)-1, GFP_NOFS);
3197 }
3198
3199 mutex_lock(&root->fs_info->fs_mutex);
3200 trans = btrfs_start_transaction(root, 0);
3201 path = btrfs_alloc_path();
3202 if (!path) {
3203 ret = -ENOMEM;
3204 goto out;
3205 }
3206 key.offset = 0;
3207 key.type = BTRFS_EXTENT_DATA_KEY;
3208 key.objectid = src->i_ino;
3209 pos = 0;
3210 path->reada = 2;
3211
3212 while (1) {
3213 /*
3214 * note the key will change type as we walk through the
3215 * tree.
3216 */
3217 ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
3218 if (ret < 0)
3219 goto out;
3220
3221 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
3222 ret = btrfs_next_leaf(root, path);
3223 if (ret < 0)
3224 goto out;
3225 if (ret > 0)
3226 break;
3227 }
3228 leaf = path->nodes[0];
3229 slot = path->slots[0];
3230 btrfs_item_key_to_cpu(leaf, &key, slot);
3231 nritems = btrfs_header_nritems(leaf);
3232
3233 if (btrfs_key_type(&key) > BTRFS_CSUM_ITEM_KEY ||
3234 key.objectid != src->i_ino)
3235 break;
3236
3237 if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) {
3238 struct btrfs_file_extent_item *extent;
3239 int found_type;
3240 pos = key.offset;
3241 extent = btrfs_item_ptr(leaf, slot,
3242 struct btrfs_file_extent_item);
3243 found_type = btrfs_file_extent_type(leaf, extent);
3244 if (found_type == BTRFS_FILE_EXTENT_REG) {
3245 u64 len = btrfs_file_extent_num_bytes(leaf,
3246 extent);
3247 u64 ds = btrfs_file_extent_disk_bytenr(leaf,
3248 extent);
3249 u64 dl = btrfs_file_extent_disk_num_bytes(leaf,
3250 extent);
3251 u64 off = btrfs_file_extent_offset(leaf,
3252 extent);
3253 btrfs_insert_file_extent(trans, root,
3254 inode->i_ino, pos,
3255 ds, dl, len, off);
3256 /* ds == 0 means there's a hole */
3257 if (ds != 0) {
3258 btrfs_inc_extent_ref(trans, root,
3259 ds, dl,
3260 root->root_key.objectid,
3261 trans->transid,
3262 inode->i_ino, pos);
3263 }
3264 pos = key.offset + len;
3265 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
3266 ret = dup_item_to_inode(trans, root, path,
3267 leaf, slot, &key,
3268 inode->i_ino);
3269 if (ret)
3270 goto out;
3271 pos = key.offset + btrfs_item_size_nr(leaf,
3272 slot);
3273 }
3274 } else if (btrfs_key_type(&key) == BTRFS_CSUM_ITEM_KEY) {
3275 ret = dup_item_to_inode(trans, root, path, leaf,
3276 slot, &key, inode->i_ino);
3277
3278 if (ret)
3279 goto out;
3280 }
3281 key.offset++;
3282 btrfs_release_path(root, path);
3283 }
3284
3285 ret = 0;
3286 out:
3287 btrfs_free_path(path);
3288
3289 inode->i_blocks = src->i_blocks;
3290 i_size_write(inode, src->i_size);
3291 btrfs_update_inode(trans, root, inode);
3292
3293 unlock_extent(&BTRFS_I(src)->io_tree, 0, (u64)-1, GFP_NOFS);
3294
3295 btrfs_end_transaction(trans, root);
3296 mutex_unlock(&root->fs_info->fs_mutex);
3297
3298 out_unlock:
3299 mutex_unlock(&src->i_mutex);
3300 mutex_unlock(&inode->i_mutex);
3301 out_fput:
3302 fput(src_file);
3303 return ret;
3304 }
3305
3306 long btrfs_ioctl(struct file *file, unsigned int
3307 cmd, unsigned long arg)
3308 {
3309 struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
3310
3311 switch (cmd) {
3312 case BTRFS_IOC_SNAP_CREATE:
3313 return btrfs_ioctl_snap_create(root, (void __user *)arg);
3314 case BTRFS_IOC_DEFRAG:
3315 return btrfs_ioctl_defrag(file);
3316 case BTRFS_IOC_RESIZE:
3317 return btrfs_ioctl_resize(root, (void __user *)arg);
3318 case BTRFS_IOC_ADD_DEV:
3319 return btrfs_ioctl_add_dev(root, (void __user *)arg);
3320 case BTRFS_IOC_RM_DEV:
3321 return btrfs_ioctl_rm_dev(root, (void __user *)arg);
3322 case BTRFS_IOC_BALANCE:
3323 return btrfs_balance(root->fs_info->dev_root);
3324 case BTRFS_IOC_CLONE:
3325 return btrfs_ioctl_clone(file, arg);
3326 }
3327
3328 return -ENOTTY;
3329 }
3330
3331 /*
3332 * Called inside transaction, so use GFP_NOFS
3333 */
3334 struct inode *btrfs_alloc_inode(struct super_block *sb)
3335 {
3336 struct btrfs_inode *ei;
3337
3338 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
3339 if (!ei)
3340 return NULL;
3341 ei->last_trans = 0;
3342 ei->ordered_trans = 0;
3343 return &ei->vfs_inode;
3344 }
3345
3346 void btrfs_destroy_inode(struct inode *inode)
3347 {
3348 WARN_ON(!list_empty(&inode->i_dentry));
3349 WARN_ON(inode->i_data.nrpages);
3350
3351 btrfs_drop_extent_cache(inode, 0, (u64)-1);
3352 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
3353 }
3354
3355 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
3356 static void init_once(struct kmem_cache * cachep, void *foo)
3357 #else
3358 static void init_once(void * foo, struct kmem_cache * cachep,
3359 unsigned long flags)
3360 #endif
3361 {
3362 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
3363
3364 inode_init_once(&ei->vfs_inode);
3365 }
3366
3367 void btrfs_destroy_cachep(void)
3368 {
3369 if (btrfs_inode_cachep)
3370 kmem_cache_destroy(btrfs_inode_cachep);
3371 if (btrfs_trans_handle_cachep)
3372 kmem_cache_destroy(btrfs_trans_handle_cachep);
3373 if (btrfs_transaction_cachep)
3374 kmem_cache_destroy(btrfs_transaction_cachep);
3375 if (btrfs_bit_radix_cachep)
3376 kmem_cache_destroy(btrfs_bit_radix_cachep);
3377 if (btrfs_path_cachep)
3378 kmem_cache_destroy(btrfs_path_cachep);
3379 }
3380
3381 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
3382 unsigned long extra_flags,
3383 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
3384 void (*ctor)(struct kmem_cache *, void *)
3385 #else
3386 void (*ctor)(void *, struct kmem_cache *,
3387 unsigned long)
3388 #endif
3389 )
3390 {
3391 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
3392 SLAB_MEM_SPREAD | extra_flags), ctor
3393 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
3394 ,NULL
3395 #endif
3396 );
3397 }
3398
3399 int btrfs_init_cachep(void)
3400 {
3401 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
3402 sizeof(struct btrfs_inode),
3403 0, init_once);
3404 if (!btrfs_inode_cachep)
3405 goto fail;
3406 btrfs_trans_handle_cachep =
3407 btrfs_cache_create("btrfs_trans_handle_cache",
3408 sizeof(struct btrfs_trans_handle),
3409 0, NULL);
3410 if (!btrfs_trans_handle_cachep)
3411 goto fail;
3412 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
3413 sizeof(struct btrfs_transaction),
3414 0, NULL);
3415 if (!btrfs_transaction_cachep)
3416 goto fail;
3417 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
3418 sizeof(struct btrfs_path),
3419 0, NULL);
3420 if (!btrfs_path_cachep)
3421 goto fail;
3422 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
3423 SLAB_DESTROY_BY_RCU, NULL);
3424 if (!btrfs_bit_radix_cachep)
3425 goto fail;
3426 return 0;
3427 fail:
3428 btrfs_destroy_cachep();
3429 return -ENOMEM;
3430 }
3431
3432 static int btrfs_getattr(struct vfsmount *mnt,
3433 struct dentry *dentry, struct kstat *stat)
3434 {
3435 struct inode *inode = dentry->d_inode;
3436 generic_fillattr(inode, stat);
3437 stat->blksize = PAGE_CACHE_SIZE;
3438 stat->blocks = inode->i_blocks + (BTRFS_I(inode)->delalloc_bytes >> 9);
3439 return 0;
3440 }
3441
3442 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
3443 struct inode * new_dir,struct dentry *new_dentry)
3444 {
3445 struct btrfs_trans_handle *trans;
3446 struct btrfs_root *root = BTRFS_I(old_dir)->root;
3447 struct inode *new_inode = new_dentry->d_inode;
3448 struct inode *old_inode = old_dentry->d_inode;
3449 struct timespec ctime = CURRENT_TIME;
3450 struct btrfs_path *path;
3451 int ret;
3452
3453 if (S_ISDIR(old_inode->i_mode) && new_inode &&
3454 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
3455 return -ENOTEMPTY;
3456 }
3457
3458 mutex_lock(&root->fs_info->fs_mutex);
3459 ret = btrfs_check_free_space(root, 1, 0);
3460 if (ret)
3461 goto out_unlock;
3462
3463 trans = btrfs_start_transaction(root, 1);
3464
3465 btrfs_set_trans_block_group(trans, new_dir);
3466 path = btrfs_alloc_path();
3467 if (!path) {
3468 ret = -ENOMEM;
3469 goto out_fail;
3470 }
3471
3472 old_dentry->d_inode->i_nlink++;
3473 old_dir->i_ctime = old_dir->i_mtime = ctime;
3474 new_dir->i_ctime = new_dir->i_mtime = ctime;
3475 old_inode->i_ctime = ctime;
3476
3477 ret = btrfs_unlink_trans(trans, root, old_dir, old_dentry);
3478 if (ret)
3479 goto out_fail;
3480
3481 if (new_inode) {
3482 new_inode->i_ctime = CURRENT_TIME;
3483 ret = btrfs_unlink_trans(trans, root, new_dir, new_dentry);
3484 if (ret)
3485 goto out_fail;
3486 }
3487 ret = btrfs_add_link(trans, new_dentry, old_inode, 1);
3488 if (ret)
3489 goto out_fail;
3490
3491 out_fail:
3492 btrfs_free_path(path);
3493 btrfs_end_transaction(trans, root);
3494 out_unlock:
3495 mutex_unlock(&root->fs_info->fs_mutex);
3496 return ret;
3497 }
3498
3499 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
3500 const char *symname)
3501 {
3502 struct btrfs_trans_handle *trans;
3503 struct btrfs_root *root = BTRFS_I(dir)->root;
3504 struct btrfs_path *path;
3505 struct btrfs_key key;
3506 struct inode *inode = NULL;
3507 int err;
3508 int drop_inode = 0;
3509 u64 objectid;
3510 int name_len;
3511 int datasize;
3512 unsigned long ptr;
3513 struct btrfs_file_extent_item *ei;
3514 struct extent_buffer *leaf;
3515 unsigned long nr = 0;
3516
3517 name_len = strlen(symname) + 1;
3518 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
3519 return -ENAMETOOLONG;
3520
3521 mutex_lock(&root->fs_info->fs_mutex);
3522 err = btrfs_check_free_space(root, 1, 0);
3523 if (err)
3524 goto out_fail;
3525
3526 trans = btrfs_start_transaction(root, 1);
3527 btrfs_set_trans_block_group(trans, dir);
3528
3529 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3530 if (err) {
3531 err = -ENOSPC;
3532 goto out_unlock;
3533 }
3534
3535 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
3536 dentry->d_name.len,
3537 dentry->d_parent->d_inode->i_ino, objectid,
3538 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO);
3539 err = PTR_ERR(inode);
3540 if (IS_ERR(inode))
3541 goto out_unlock;
3542
3543 btrfs_set_trans_block_group(trans, inode);
3544 err = btrfs_add_nondir(trans, dentry, inode, 0);
3545 if (err)
3546 drop_inode = 1;
3547 else {
3548 inode->i_mapping->a_ops = &btrfs_aops;
3549 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3550 inode->i_fop = &btrfs_file_operations;
3551 inode->i_op = &btrfs_file_inode_operations;
3552 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3553 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3554 inode->i_mapping, GFP_NOFS);
3555 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3556 inode->i_mapping, GFP_NOFS);
3557 BTRFS_I(inode)->delalloc_bytes = 0;
3558 atomic_set(&BTRFS_I(inode)->ordered_writeback, 0);
3559 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3560 }
3561 dir->i_sb->s_dirt = 1;
3562 btrfs_update_inode_block_group(trans, inode);
3563 btrfs_update_inode_block_group(trans, dir);
3564 if (drop_inode)
3565 goto out_unlock;
3566
3567 path = btrfs_alloc_path();
3568 BUG_ON(!path);
3569 key.objectid = inode->i_ino;
3570 key.offset = 0;
3571 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
3572 datasize = btrfs_file_extent_calc_inline_size(name_len);
3573 err = btrfs_insert_empty_item(trans, root, path, &key,
3574 datasize);
3575 if (err) {
3576 drop_inode = 1;
3577 goto out_unlock;
3578 }
3579 leaf = path->nodes[0];
3580 ei = btrfs_item_ptr(leaf, path->slots[0],
3581 struct btrfs_file_extent_item);
3582 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
3583 btrfs_set_file_extent_type(leaf, ei,
3584 BTRFS_FILE_EXTENT_INLINE);
3585 ptr = btrfs_file_extent_inline_start(ei);
3586 write_extent_buffer(leaf, symname, ptr, name_len);
3587 btrfs_mark_buffer_dirty(leaf);
3588 btrfs_free_path(path);
3589
3590 inode->i_op = &btrfs_symlink_inode_operations;
3591 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3592 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3593 inode->i_size = name_len - 1;
3594 err = btrfs_update_inode(trans, root, inode);
3595 if (err)
3596 drop_inode = 1;
3597
3598 out_unlock:
3599 nr = trans->blocks_used;
3600 btrfs_end_transaction(trans, root);
3601 out_fail:
3602 mutex_unlock(&root->fs_info->fs_mutex);
3603 if (drop_inode) {
3604 inode_dec_link_count(inode);
3605 iput(inode);
3606 }
3607 btrfs_btree_balance_dirty(root, nr);
3608 btrfs_throttle(root);
3609 return err;
3610 }
3611
3612 static int btrfs_permission(struct inode *inode, int mask,
3613 struct nameidata *nd)
3614 {
3615 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
3616 return -EACCES;
3617 return generic_permission(inode, mask, NULL);
3618 }
3619
3620 static struct inode_operations btrfs_dir_inode_operations = {
3621 .lookup = btrfs_lookup,
3622 .create = btrfs_create,
3623 .unlink = btrfs_unlink,
3624 .link = btrfs_link,
3625 .mkdir = btrfs_mkdir,
3626 .rmdir = btrfs_rmdir,
3627 .rename = btrfs_rename,
3628 .symlink = btrfs_symlink,
3629 .setattr = btrfs_setattr,
3630 .mknod = btrfs_mknod,
3631 .setxattr = generic_setxattr,
3632 .getxattr = generic_getxattr,
3633 .listxattr = btrfs_listxattr,
3634 .removexattr = generic_removexattr,
3635 .permission = btrfs_permission,
3636 };
3637 static struct inode_operations btrfs_dir_ro_inode_operations = {
3638 .lookup = btrfs_lookup,
3639 .permission = btrfs_permission,
3640 };
3641 static struct file_operations btrfs_dir_file_operations = {
3642 .llseek = generic_file_llseek,
3643 .read = generic_read_dir,
3644 .readdir = btrfs_readdir,
3645 .unlocked_ioctl = btrfs_ioctl,
3646 #ifdef CONFIG_COMPAT
3647 .compat_ioctl = btrfs_ioctl,
3648 #endif
3649 };
3650
3651 static struct extent_io_ops btrfs_extent_io_ops = {
3652 .fill_delalloc = run_delalloc_range,
3653 .submit_bio_hook = btrfs_submit_bio_hook,
3654 .merge_bio_hook = btrfs_merge_bio_hook,
3655 .readpage_io_hook = btrfs_readpage_io_hook,
3656 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
3657 .readpage_io_failed_hook = btrfs_readpage_io_failed_hook,
3658 .set_bit_hook = btrfs_set_bit_hook,
3659 .clear_bit_hook = btrfs_clear_bit_hook,
3660 };
3661
3662 static struct address_space_operations btrfs_aops = {
3663 .readpage = btrfs_readpage,
3664 .writepage = btrfs_writepage,
3665 .writepages = btrfs_writepages,
3666 .readpages = btrfs_readpages,
3667 .sync_page = block_sync_page,
3668 .bmap = btrfs_bmap,
3669 .direct_IO = btrfs_direct_IO,
3670 .invalidatepage = btrfs_invalidatepage,
3671 .releasepage = btrfs_releasepage,
3672 .set_page_dirty = __set_page_dirty_nobuffers,
3673 };
3674
3675 static struct address_space_operations btrfs_symlink_aops = {
3676 .readpage = btrfs_readpage,
3677 .writepage = btrfs_writepage,
3678 .invalidatepage = btrfs_invalidatepage,
3679 .releasepage = btrfs_releasepage,
3680 };
3681
3682 static struct inode_operations btrfs_file_inode_operations = {
3683 .truncate = btrfs_truncate,
3684 .getattr = btrfs_getattr,
3685 .setattr = btrfs_setattr,
3686 .setxattr = generic_setxattr,
3687 .getxattr = generic_getxattr,
3688 .listxattr = btrfs_listxattr,
3689 .removexattr = generic_removexattr,
3690 .permission = btrfs_permission,
3691 };
3692 static struct inode_operations btrfs_special_inode_operations = {
3693 .getattr = btrfs_getattr,
3694 .setattr = btrfs_setattr,
3695 .permission = btrfs_permission,
3696 };
3697 static struct inode_operations btrfs_symlink_inode_operations = {
3698 .readlink = generic_readlink,
3699 .follow_link = page_follow_link_light,
3700 .put_link = page_put_link,
3701 .permission = btrfs_permission,
3702 };
This page took 0.109246 seconds and 6 git commands to generate.