Btrfs: New data=ordered implementation
[deliverable/linux.git] / fs / btrfs / inode.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/smp_lock.h>
30 #include <linux/backing-dev.h>
31 #include <linux/mpage.h>
32 #include <linux/swap.h>
33 #include <linux/writeback.h>
34 #include <linux/statfs.h>
35 #include <linux/compat.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/version.h>
38 #include <linux/xattr.h>
39 #include "ctree.h"
40 #include "disk-io.h"
41 #include "transaction.h"
42 #include "btrfs_inode.h"
43 #include "ioctl.h"
44 #include "print-tree.h"
45 #include "volumes.h"
46 #include "ordered-data.h"
47
48 struct btrfs_iget_args {
49 u64 ino;
50 struct btrfs_root *root;
51 };
52
53 static struct inode_operations btrfs_dir_inode_operations;
54 static struct inode_operations btrfs_symlink_inode_operations;
55 static struct inode_operations btrfs_dir_ro_inode_operations;
56 static struct inode_operations btrfs_special_inode_operations;
57 static struct inode_operations btrfs_file_inode_operations;
58 static struct address_space_operations btrfs_aops;
59 static struct address_space_operations btrfs_symlink_aops;
60 static struct file_operations btrfs_dir_file_operations;
61 static struct extent_io_ops btrfs_extent_io_ops;
62
63 static struct kmem_cache *btrfs_inode_cachep;
64 struct kmem_cache *btrfs_trans_handle_cachep;
65 struct kmem_cache *btrfs_transaction_cachep;
66 struct kmem_cache *btrfs_bit_radix_cachep;
67 struct kmem_cache *btrfs_path_cachep;
68
69 #define S_SHIFT 12
70 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
71 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
72 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
73 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
74 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
75 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
76 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
77 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
78 };
79
80 int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
81 int for_del)
82 {
83 u64 total;
84 u64 used;
85 u64 thresh;
86 unsigned long flags;
87 int ret = 0;
88
89 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
90 total = btrfs_super_total_bytes(&root->fs_info->super_copy);
91 used = btrfs_super_bytes_used(&root->fs_info->super_copy);
92 if (for_del)
93 thresh = total * 90;
94 else
95 thresh = total * 85;
96
97 do_div(thresh, 100);
98
99 if (used + root->fs_info->delalloc_bytes + num_required > thresh)
100 ret = -ENOSPC;
101 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
102 return ret;
103 }
104
105 static int cow_file_range(struct inode *inode, u64 start, u64 end)
106 {
107 struct btrfs_root *root = BTRFS_I(inode)->root;
108 struct btrfs_trans_handle *trans;
109 u64 alloc_hint = 0;
110 u64 num_bytes;
111 u64 cur_alloc_size;
112 u64 blocksize = root->sectorsize;
113 u64 orig_num_bytes;
114 struct btrfs_key ins;
115 struct extent_map *em;
116 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
117 int ret = 0;
118
119 trans = btrfs_start_transaction(root, 1);
120 BUG_ON(!trans);
121 btrfs_set_trans_block_group(trans, inode);
122
123 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
124 num_bytes = max(blocksize, num_bytes);
125 orig_num_bytes = num_bytes;
126
127 if (alloc_hint == EXTENT_MAP_INLINE)
128 goto out;
129
130 BUG_ON(num_bytes > btrfs_super_total_bytes(&root->fs_info->super_copy));
131 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1);
132
133 while(num_bytes > 0) {
134 cur_alloc_size = min(num_bytes, root->fs_info->max_extent);
135 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
136 root->sectorsize, 0, 0,
137 (u64)-1, &ins, 1);
138 if (ret) {
139 WARN_ON(1);
140 goto out;
141 }
142 em = alloc_extent_map(GFP_NOFS);
143 em->start = start;
144 em->len = ins.offset;
145 em->block_start = ins.objectid;
146 em->bdev = root->fs_info->fs_devices->latest_bdev;
147 while(1) {
148 spin_lock(&em_tree->lock);
149 ret = add_extent_mapping(em_tree, em);
150 spin_unlock(&em_tree->lock);
151 if (ret != -EEXIST) {
152 free_extent_map(em);
153 break;
154 }
155 btrfs_drop_extent_cache(inode, start,
156 start + ins.offset - 1);
157 }
158
159 cur_alloc_size = ins.offset;
160 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
161 ins.offset);
162 BUG_ON(ret);
163 if (num_bytes < cur_alloc_size) {
164 printk("num_bytes %Lu cur_alloc %Lu\n", num_bytes,
165 cur_alloc_size);
166 break;
167 }
168 num_bytes -= cur_alloc_size;
169 alloc_hint = ins.objectid + ins.offset;
170 start += cur_alloc_size;
171 }
172 out:
173 btrfs_end_transaction(trans, root);
174 return ret;
175 }
176
177 static int run_delalloc_nocow(struct inode *inode, u64 start, u64 end)
178 {
179 u64 extent_start;
180 u64 extent_end;
181 u64 bytenr;
182 u64 cow_end;
183 u64 loops = 0;
184 u64 total_fs_bytes;
185 struct btrfs_root *root = BTRFS_I(inode)->root;
186 struct btrfs_block_group_cache *block_group;
187 struct extent_buffer *leaf;
188 int found_type;
189 struct btrfs_path *path;
190 struct btrfs_file_extent_item *item;
191 int ret;
192 int err;
193 struct btrfs_key found_key;
194
195 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
196 path = btrfs_alloc_path();
197 BUG_ON(!path);
198 again:
199 ret = btrfs_lookup_file_extent(NULL, root, path,
200 inode->i_ino, start, 0);
201 if (ret < 0) {
202 btrfs_free_path(path);
203 return ret;
204 }
205
206 cow_end = end;
207 if (ret != 0) {
208 if (path->slots[0] == 0)
209 goto not_found;
210 path->slots[0]--;
211 }
212
213 leaf = path->nodes[0];
214 item = btrfs_item_ptr(leaf, path->slots[0],
215 struct btrfs_file_extent_item);
216
217 /* are we inside the extent that was found? */
218 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
219 found_type = btrfs_key_type(&found_key);
220 if (found_key.objectid != inode->i_ino ||
221 found_type != BTRFS_EXTENT_DATA_KEY)
222 goto not_found;
223
224 found_type = btrfs_file_extent_type(leaf, item);
225 extent_start = found_key.offset;
226 if (found_type == BTRFS_FILE_EXTENT_REG) {
227 u64 extent_num_bytes;
228
229 extent_num_bytes = btrfs_file_extent_num_bytes(leaf, item);
230 extent_end = extent_start + extent_num_bytes;
231 err = 0;
232
233 if (loops && start != extent_start)
234 goto not_found;
235
236 if (start < extent_start || start >= extent_end)
237 goto not_found;
238
239 cow_end = min(end, extent_end - 1);
240 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
241 if (bytenr == 0)
242 goto not_found;
243
244 if (btrfs_count_snapshots_in_path(root, path, inode->i_ino,
245 bytenr) != 1) {
246 goto not_found;
247 }
248
249 /*
250 * we may be called by the resizer, make sure we're inside
251 * the limits of the FS
252 */
253 block_group = btrfs_lookup_block_group(root->fs_info,
254 bytenr);
255 if (!block_group || block_group->ro)
256 goto not_found;
257
258 start = extent_end;
259 } else {
260 goto not_found;
261 }
262 loop:
263 if (start > end) {
264 btrfs_free_path(path);
265 return 0;
266 }
267 btrfs_release_path(root, path);
268 loops++;
269 goto again;
270
271 not_found:
272 cow_file_range(inode, start, end);
273 start = end + 1;
274 goto loop;
275 }
276
277 static int run_delalloc_range(struct inode *inode, u64 start, u64 end)
278 {
279 struct btrfs_root *root = BTRFS_I(inode)->root;
280 int ret;
281
282 if (btrfs_test_opt(root, NODATACOW) ||
283 btrfs_test_flag(inode, NODATACOW))
284 ret = run_delalloc_nocow(inode, start, end);
285 else
286 ret = cow_file_range(inode, start, end);
287
288 return ret;
289 }
290
291 int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
292 unsigned long old, unsigned long bits)
293 {
294 unsigned long flags;
295 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
296 struct btrfs_root *root = BTRFS_I(inode)->root;
297 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
298 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
299 root->fs_info->delalloc_bytes += end - start + 1;
300 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
301 }
302 return 0;
303 }
304
305 int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
306 unsigned long old, unsigned long bits)
307 {
308 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
309 struct btrfs_root *root = BTRFS_I(inode)->root;
310 unsigned long flags;
311
312 spin_lock_irqsave(&root->fs_info->delalloc_lock, flags);
313 if (end - start + 1 > root->fs_info->delalloc_bytes) {
314 printk("warning: delalloc account %Lu %Lu\n",
315 end - start + 1, root->fs_info->delalloc_bytes);
316 root->fs_info->delalloc_bytes = 0;
317 BTRFS_I(inode)->delalloc_bytes = 0;
318 } else {
319 root->fs_info->delalloc_bytes -= end - start + 1;
320 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
321 }
322 spin_unlock_irqrestore(&root->fs_info->delalloc_lock, flags);
323 }
324 return 0;
325 }
326
327 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
328 size_t size, struct bio *bio)
329 {
330 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
331 struct btrfs_mapping_tree *map_tree;
332 u64 logical = bio->bi_sector << 9;
333 u64 length = 0;
334 u64 map_length;
335 int ret;
336
337 length = bio->bi_size;
338 map_tree = &root->fs_info->mapping_tree;
339 map_length = length;
340 ret = btrfs_map_block(map_tree, READ, logical,
341 &map_length, NULL, 0);
342
343 if (map_length < length + size) {
344 return 1;
345 }
346 return 0;
347 }
348
349 int __btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
350 int mirror_num)
351 {
352 struct btrfs_root *root = BTRFS_I(inode)->root;
353 int ret = 0;
354 struct btrfs_ordered_sum *sums;
355
356 ret = btrfs_csum_one_bio(root, bio, &sums);
357 BUG_ON(ret);
358
359 ret = btrfs_add_ordered_sum(inode, sums);
360 BUG_ON(ret);
361
362 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
363 }
364
365 int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
366 int mirror_num)
367 {
368 struct btrfs_root *root = BTRFS_I(inode)->root;
369 int ret = 0;
370
371 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
372 BUG_ON(ret);
373
374 if (!(rw & (1 << BIO_RW))) {
375 goto mapit;
376 }
377
378 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
379 inode, rw, bio, mirror_num,
380 __btrfs_submit_bio_hook);
381 mapit:
382 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
383 }
384
385 static int add_pending_csums(struct btrfs_trans_handle *trans,
386 struct inode *inode, u64 file_offset,
387 struct list_head *list)
388 {
389 struct list_head *cur;
390 struct btrfs_ordered_sum *sum;
391
392 btrfs_set_trans_block_group(trans, inode);
393 while(!list_empty(list)) {
394 cur = list->next;
395 sum = list_entry(cur, struct btrfs_ordered_sum, list);
396 mutex_lock(&BTRFS_I(inode)->csum_mutex);
397 btrfs_csum_file_blocks(trans, BTRFS_I(inode)->root,
398 inode, sum);
399 mutex_unlock(&BTRFS_I(inode)->csum_mutex);
400 list_del(&sum->list);
401 kfree(sum);
402 }
403 return 0;
404 }
405
406 int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
407 struct extent_state *state, int uptodate)
408 {
409 struct inode *inode = page->mapping->host;
410 struct btrfs_root *root = BTRFS_I(inode)->root;
411 struct btrfs_trans_handle *trans;
412 struct btrfs_ordered_extent *ordered_extent;
413 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
414 u64 alloc_hint = 0;
415 struct list_head list;
416 struct btrfs_key ins;
417 int ret;
418
419 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
420 if (!ret) {
421 return 0;
422 }
423
424 trans = btrfs_start_transaction(root, 1);
425
426 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
427 BUG_ON(!ordered_extent);
428
429 lock_extent(io_tree, ordered_extent->file_offset,
430 ordered_extent->file_offset + ordered_extent->len - 1,
431 GFP_NOFS);
432
433 INIT_LIST_HEAD(&list);
434
435 ins.objectid = ordered_extent->start;
436 ins.offset = ordered_extent->len;
437 ins.type = BTRFS_EXTENT_ITEM_KEY;
438 ret = btrfs_alloc_reserved_extent(trans, root, root->root_key.objectid,
439 trans->transid, inode->i_ino,
440 ordered_extent->file_offset, &ins);
441 BUG_ON(ret);
442 ret = btrfs_drop_extents(trans, root, inode,
443 ordered_extent->file_offset,
444 ordered_extent->file_offset +
445 ordered_extent->len,
446 ordered_extent->file_offset, &alloc_hint);
447 BUG_ON(ret);
448 ret = btrfs_insert_file_extent(trans, root, inode->i_ino,
449 ordered_extent->file_offset,
450 ordered_extent->start,
451 ordered_extent->len,
452 ordered_extent->len, 0);
453 BUG_ON(ret);
454 btrfs_drop_extent_cache(inode, ordered_extent->file_offset,
455 ordered_extent->file_offset +
456 ordered_extent->len - 1);
457 inode->i_blocks += ordered_extent->len >> 9;
458 unlock_extent(io_tree, ordered_extent->file_offset,
459 ordered_extent->file_offset + ordered_extent->len - 1,
460 GFP_NOFS);
461 add_pending_csums(trans, inode, ordered_extent->file_offset,
462 &ordered_extent->list);
463
464 btrfs_remove_ordered_extent(inode, ordered_extent);
465 /* once for us */
466 btrfs_put_ordered_extent(ordered_extent);
467 /* once for the tree */
468 btrfs_put_ordered_extent(ordered_extent);
469
470 btrfs_update_inode(trans, root, inode);
471 btrfs_end_transaction(trans, root);
472 return 0;
473 }
474
475 int btrfs_readpage_io_hook(struct page *page, u64 start, u64 end)
476 {
477 int ret = 0;
478 struct inode *inode = page->mapping->host;
479 struct btrfs_root *root = BTRFS_I(inode)->root;
480 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
481 struct btrfs_csum_item *item;
482 struct btrfs_path *path = NULL;
483 u32 csum;
484
485 if (btrfs_test_opt(root, NODATASUM) ||
486 btrfs_test_flag(inode, NODATASUM))
487 return 0;
488
489 path = btrfs_alloc_path();
490 item = btrfs_lookup_csum(NULL, root, path, inode->i_ino, start, 0);
491 if (IS_ERR(item)) {
492 ret = PTR_ERR(item);
493 /* a csum that isn't present is a preallocated region. */
494 if (ret == -ENOENT || ret == -EFBIG)
495 ret = 0;
496 csum = 0;
497 printk("no csum found for inode %lu start %Lu\n", inode->i_ino,
498 start);
499 goto out;
500 }
501 read_extent_buffer(path->nodes[0], &csum, (unsigned long)item,
502 BTRFS_CRC32_SIZE);
503 set_state_private(io_tree, start, csum);
504 out:
505 if (path)
506 btrfs_free_path(path);
507 return ret;
508 }
509
510 struct io_failure_record {
511 struct page *page;
512 u64 start;
513 u64 len;
514 u64 logical;
515 int last_mirror;
516 };
517
518 int btrfs_io_failed_hook(struct bio *failed_bio,
519 struct page *page, u64 start, u64 end,
520 struct extent_state *state)
521 {
522 struct io_failure_record *failrec = NULL;
523 u64 private;
524 struct extent_map *em;
525 struct inode *inode = page->mapping->host;
526 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
527 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
528 struct bio *bio;
529 int num_copies;
530 int ret;
531 int rw;
532 u64 logical;
533
534 ret = get_state_private(failure_tree, start, &private);
535 if (ret) {
536 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
537 if (!failrec)
538 return -ENOMEM;
539 failrec->start = start;
540 failrec->len = end - start + 1;
541 failrec->last_mirror = 0;
542
543 spin_lock(&em_tree->lock);
544 em = lookup_extent_mapping(em_tree, start, failrec->len);
545 if (em->start > start || em->start + em->len < start) {
546 free_extent_map(em);
547 em = NULL;
548 }
549 spin_unlock(&em_tree->lock);
550
551 if (!em || IS_ERR(em)) {
552 kfree(failrec);
553 return -EIO;
554 }
555 logical = start - em->start;
556 logical = em->block_start + logical;
557 failrec->logical = logical;
558 free_extent_map(em);
559 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
560 EXTENT_DIRTY, GFP_NOFS);
561 set_state_private(failure_tree, start,
562 (u64)(unsigned long)failrec);
563 } else {
564 failrec = (struct io_failure_record *)(unsigned long)private;
565 }
566 num_copies = btrfs_num_copies(
567 &BTRFS_I(inode)->root->fs_info->mapping_tree,
568 failrec->logical, failrec->len);
569 failrec->last_mirror++;
570 if (!state) {
571 spin_lock_irq(&BTRFS_I(inode)->io_tree.lock);
572 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
573 failrec->start,
574 EXTENT_LOCKED);
575 if (state && state->start != failrec->start)
576 state = NULL;
577 spin_unlock_irq(&BTRFS_I(inode)->io_tree.lock);
578 }
579 if (!state || failrec->last_mirror > num_copies) {
580 set_state_private(failure_tree, failrec->start, 0);
581 clear_extent_bits(failure_tree, failrec->start,
582 failrec->start + failrec->len - 1,
583 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
584 kfree(failrec);
585 return -EIO;
586 }
587 bio = bio_alloc(GFP_NOFS, 1);
588 bio->bi_private = state;
589 bio->bi_end_io = failed_bio->bi_end_io;
590 bio->bi_sector = failrec->logical >> 9;
591 bio->bi_bdev = failed_bio->bi_bdev;
592 bio->bi_size = 0;
593 bio_add_page(bio, page, failrec->len, start - page_offset(page));
594 if (failed_bio->bi_rw & (1 << BIO_RW))
595 rw = WRITE;
596 else
597 rw = READ;
598
599 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
600 failrec->last_mirror);
601 return 0;
602 }
603
604 int btrfs_clean_io_failures(struct inode *inode, u64 start)
605 {
606 u64 private;
607 u64 private_failure;
608 struct io_failure_record *failure;
609 int ret;
610
611 private = 0;
612 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
613 (u64)-1, 1, EXTENT_DIRTY)) {
614 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
615 start, &private_failure);
616 if (ret == 0) {
617 failure = (struct io_failure_record *)(unsigned long)
618 private_failure;
619 set_state_private(&BTRFS_I(inode)->io_failure_tree,
620 failure->start, 0);
621 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
622 failure->start,
623 failure->start + failure->len - 1,
624 EXTENT_DIRTY | EXTENT_LOCKED,
625 GFP_NOFS);
626 kfree(failure);
627 }
628 }
629 return 0;
630 }
631
632 int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
633 struct extent_state *state)
634 {
635 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
636 struct inode *inode = page->mapping->host;
637 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
638 char *kaddr;
639 u64 private = ~(u32)0;
640 int ret;
641 struct btrfs_root *root = BTRFS_I(inode)->root;
642 u32 csum = ~(u32)0;
643 unsigned long flags;
644
645 if (btrfs_test_opt(root, NODATASUM) ||
646 btrfs_test_flag(inode, NODATASUM))
647 return 0;
648 if (state && state->start == start) {
649 private = state->private;
650 ret = 0;
651 } else {
652 ret = get_state_private(io_tree, start, &private);
653 }
654 local_irq_save(flags);
655 kaddr = kmap_atomic(page, KM_IRQ0);
656 if (ret) {
657 goto zeroit;
658 }
659 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
660 btrfs_csum_final(csum, (char *)&csum);
661 if (csum != private) {
662 goto zeroit;
663 }
664 kunmap_atomic(kaddr, KM_IRQ0);
665 local_irq_restore(flags);
666
667 /* if the io failure tree for this inode is non-empty,
668 * check to see if we've recovered from a failed IO
669 */
670 btrfs_clean_io_failures(inode, start);
671 return 0;
672
673 zeroit:
674 printk("btrfs csum failed ino %lu off %llu csum %u private %Lu\n",
675 page->mapping->host->i_ino, (unsigned long long)start, csum,
676 private);
677 memset(kaddr + offset, 1, end - start + 1);
678 flush_dcache_page(page);
679 kunmap_atomic(kaddr, KM_IRQ0);
680 local_irq_restore(flags);
681 if (private == 0)
682 return 0;
683 return -EIO;
684 }
685
686 void btrfs_read_locked_inode(struct inode *inode)
687 {
688 struct btrfs_path *path;
689 struct extent_buffer *leaf;
690 struct btrfs_inode_item *inode_item;
691 struct btrfs_timespec *tspec;
692 struct btrfs_root *root = BTRFS_I(inode)->root;
693 struct btrfs_key location;
694 u64 alloc_group_block;
695 u32 rdev;
696 int ret;
697
698 path = btrfs_alloc_path();
699 BUG_ON(!path);
700 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
701
702 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
703 if (ret)
704 goto make_bad;
705
706 leaf = path->nodes[0];
707 inode_item = btrfs_item_ptr(leaf, path->slots[0],
708 struct btrfs_inode_item);
709
710 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
711 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
712 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
713 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
714 inode->i_size = btrfs_inode_size(leaf, inode_item);
715
716 tspec = btrfs_inode_atime(inode_item);
717 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
718 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
719
720 tspec = btrfs_inode_mtime(inode_item);
721 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
722 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
723
724 tspec = btrfs_inode_ctime(inode_item);
725 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
726 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
727
728 inode->i_blocks = btrfs_inode_nblocks(leaf, inode_item);
729 inode->i_generation = btrfs_inode_generation(leaf, inode_item);
730 inode->i_rdev = 0;
731 rdev = btrfs_inode_rdev(leaf, inode_item);
732
733 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
734 BTRFS_I(inode)->block_group = btrfs_lookup_block_group(root->fs_info,
735 alloc_group_block);
736 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
737 if (!BTRFS_I(inode)->block_group) {
738 BTRFS_I(inode)->block_group = btrfs_find_block_group(root,
739 NULL, 0,
740 BTRFS_BLOCK_GROUP_METADATA, 0);
741 }
742 btrfs_free_path(path);
743 inode_item = NULL;
744
745 switch (inode->i_mode & S_IFMT) {
746 case S_IFREG:
747 inode->i_mapping->a_ops = &btrfs_aops;
748 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
749 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
750 inode->i_fop = &btrfs_file_operations;
751 inode->i_op = &btrfs_file_inode_operations;
752 break;
753 case S_IFDIR:
754 inode->i_fop = &btrfs_dir_file_operations;
755 if (root == root->fs_info->tree_root)
756 inode->i_op = &btrfs_dir_ro_inode_operations;
757 else
758 inode->i_op = &btrfs_dir_inode_operations;
759 break;
760 case S_IFLNK:
761 inode->i_op = &btrfs_symlink_inode_operations;
762 inode->i_mapping->a_ops = &btrfs_symlink_aops;
763 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
764 break;
765 default:
766 init_special_inode(inode, inode->i_mode, rdev);
767 break;
768 }
769 return;
770
771 make_bad:
772 btrfs_free_path(path);
773 make_bad_inode(inode);
774 }
775
776 static void fill_inode_item(struct extent_buffer *leaf,
777 struct btrfs_inode_item *item,
778 struct inode *inode)
779 {
780 btrfs_set_inode_uid(leaf, item, inode->i_uid);
781 btrfs_set_inode_gid(leaf, item, inode->i_gid);
782 btrfs_set_inode_size(leaf, item, inode->i_size);
783 btrfs_set_inode_mode(leaf, item, inode->i_mode);
784 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
785
786 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
787 inode->i_atime.tv_sec);
788 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
789 inode->i_atime.tv_nsec);
790
791 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
792 inode->i_mtime.tv_sec);
793 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
794 inode->i_mtime.tv_nsec);
795
796 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
797 inode->i_ctime.tv_sec);
798 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
799 inode->i_ctime.tv_nsec);
800
801 btrfs_set_inode_nblocks(leaf, item, inode->i_blocks);
802 btrfs_set_inode_generation(leaf, item, inode->i_generation);
803 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
804 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
805 btrfs_set_inode_block_group(leaf, item,
806 BTRFS_I(inode)->block_group->key.objectid);
807 }
808
809 int btrfs_update_inode(struct btrfs_trans_handle *trans,
810 struct btrfs_root *root,
811 struct inode *inode)
812 {
813 struct btrfs_inode_item *inode_item;
814 struct btrfs_path *path;
815 struct extent_buffer *leaf;
816 int ret;
817
818 path = btrfs_alloc_path();
819 BUG_ON(!path);
820 ret = btrfs_lookup_inode(trans, root, path,
821 &BTRFS_I(inode)->location, 1);
822 if (ret) {
823 if (ret > 0)
824 ret = -ENOENT;
825 goto failed;
826 }
827
828 leaf = path->nodes[0];
829 inode_item = btrfs_item_ptr(leaf, path->slots[0],
830 struct btrfs_inode_item);
831
832 fill_inode_item(leaf, inode_item, inode);
833 btrfs_mark_buffer_dirty(leaf);
834 btrfs_set_inode_last_trans(trans, inode);
835 ret = 0;
836 failed:
837 btrfs_free_path(path);
838 return ret;
839 }
840
841
842 static int btrfs_unlink_trans(struct btrfs_trans_handle *trans,
843 struct btrfs_root *root,
844 struct inode *dir,
845 struct dentry *dentry)
846 {
847 struct btrfs_path *path;
848 const char *name = dentry->d_name.name;
849 int name_len = dentry->d_name.len;
850 int ret = 0;
851 struct extent_buffer *leaf;
852 struct btrfs_dir_item *di;
853 struct btrfs_key key;
854
855 path = btrfs_alloc_path();
856 if (!path) {
857 ret = -ENOMEM;
858 goto err;
859 }
860
861 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
862 name, name_len, -1);
863 if (IS_ERR(di)) {
864 ret = PTR_ERR(di);
865 goto err;
866 }
867 if (!di) {
868 ret = -ENOENT;
869 goto err;
870 }
871 leaf = path->nodes[0];
872 btrfs_dir_item_key_to_cpu(leaf, di, &key);
873 ret = btrfs_delete_one_dir_name(trans, root, path, di);
874 if (ret)
875 goto err;
876 btrfs_release_path(root, path);
877
878 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
879 key.objectid, name, name_len, -1);
880 if (IS_ERR(di)) {
881 ret = PTR_ERR(di);
882 goto err;
883 }
884 if (!di) {
885 ret = -ENOENT;
886 goto err;
887 }
888 ret = btrfs_delete_one_dir_name(trans, root, path, di);
889 btrfs_release_path(root, path);
890
891 dentry->d_inode->i_ctime = dir->i_ctime;
892 ret = btrfs_del_inode_ref(trans, root, name, name_len,
893 dentry->d_inode->i_ino,
894 dentry->d_parent->d_inode->i_ino);
895 if (ret) {
896 printk("failed to delete reference to %.*s, "
897 "inode %lu parent %lu\n", name_len, name,
898 dentry->d_inode->i_ino,
899 dentry->d_parent->d_inode->i_ino);
900 }
901 err:
902 btrfs_free_path(path);
903 if (!ret) {
904 dir->i_size -= name_len * 2;
905 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
906 btrfs_update_inode(trans, root, dir);
907 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
908 dentry->d_inode->i_nlink--;
909 #else
910 drop_nlink(dentry->d_inode);
911 #endif
912 ret = btrfs_update_inode(trans, root, dentry->d_inode);
913 dir->i_sb->s_dirt = 1;
914 }
915 return ret;
916 }
917
918 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
919 {
920 struct btrfs_root *root;
921 struct btrfs_trans_handle *trans;
922 int ret;
923 unsigned long nr = 0;
924
925 root = BTRFS_I(dir)->root;
926
927 ret = btrfs_check_free_space(root, 1, 1);
928 if (ret)
929 goto fail;
930
931 trans = btrfs_start_transaction(root, 1);
932
933 btrfs_set_trans_block_group(trans, dir);
934 ret = btrfs_unlink_trans(trans, root, dir, dentry);
935 nr = trans->blocks_used;
936
937 btrfs_end_transaction_throttle(trans, root);
938 fail:
939 btrfs_btree_balance_dirty(root, nr);
940 return ret;
941 }
942
943 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
944 {
945 struct inode *inode = dentry->d_inode;
946 int err = 0;
947 int ret;
948 struct btrfs_root *root = BTRFS_I(dir)->root;
949 struct btrfs_trans_handle *trans;
950 unsigned long nr = 0;
951
952 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
953 return -ENOTEMPTY;
954 }
955
956 ret = btrfs_check_free_space(root, 1, 1);
957 if (ret)
958 goto fail;
959
960 trans = btrfs_start_transaction(root, 1);
961 btrfs_set_trans_block_group(trans, dir);
962
963 /* now the directory is empty */
964 err = btrfs_unlink_trans(trans, root, dir, dentry);
965 if (!err) {
966 inode->i_size = 0;
967 }
968
969 nr = trans->blocks_used;
970 ret = btrfs_end_transaction_throttle(trans, root);
971 fail:
972 btrfs_btree_balance_dirty(root, nr);
973
974 if (ret && !err)
975 err = ret;
976 return err;
977 }
978
979 /*
980 * this can truncate away extent items, csum items and directory items.
981 * It starts at a high offset and removes keys until it can't find
982 * any higher than i_size.
983 *
984 * csum items that cross the new i_size are truncated to the new size
985 * as well.
986 */
987 static int btrfs_truncate_in_trans(struct btrfs_trans_handle *trans,
988 struct btrfs_root *root,
989 struct inode *inode,
990 u32 min_type)
991 {
992 int ret;
993 struct btrfs_path *path;
994 struct btrfs_key key;
995 struct btrfs_key found_key;
996 u32 found_type;
997 struct extent_buffer *leaf;
998 struct btrfs_file_extent_item *fi;
999 u64 extent_start = 0;
1000 u64 extent_num_bytes = 0;
1001 u64 item_end = 0;
1002 u64 root_gen = 0;
1003 u64 root_owner = 0;
1004 int found_extent;
1005 int del_item;
1006 int pending_del_nr = 0;
1007 int pending_del_slot = 0;
1008 int extent_type = -1;
1009 u64 mask = root->sectorsize - 1;
1010
1011 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
1012 btrfs_drop_extent_cache(inode, inode->i_size & (~mask), (u64)-1);
1013 path = btrfs_alloc_path();
1014 path->reada = -1;
1015 BUG_ON(!path);
1016
1017 /* FIXME, add redo link to tree so we don't leak on crash */
1018 key.objectid = inode->i_ino;
1019 key.offset = (u64)-1;
1020 key.type = (u8)-1;
1021
1022 btrfs_init_path(path);
1023 search_again:
1024 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1025 if (ret < 0) {
1026 goto error;
1027 }
1028 if (ret > 0) {
1029 BUG_ON(path->slots[0] == 0);
1030 path->slots[0]--;
1031 }
1032
1033 while(1) {
1034 fi = NULL;
1035 leaf = path->nodes[0];
1036 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1037 found_type = btrfs_key_type(&found_key);
1038
1039 if (found_key.objectid != inode->i_ino)
1040 break;
1041
1042 if (found_type < min_type)
1043 break;
1044
1045 item_end = found_key.offset;
1046 if (found_type == BTRFS_EXTENT_DATA_KEY) {
1047 fi = btrfs_item_ptr(leaf, path->slots[0],
1048 struct btrfs_file_extent_item);
1049 extent_type = btrfs_file_extent_type(leaf, fi);
1050 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
1051 item_end +=
1052 btrfs_file_extent_num_bytes(leaf, fi);
1053 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1054 struct btrfs_item *item = btrfs_item_nr(leaf,
1055 path->slots[0]);
1056 item_end += btrfs_file_extent_inline_len(leaf,
1057 item);
1058 }
1059 item_end--;
1060 }
1061 if (found_type == BTRFS_CSUM_ITEM_KEY) {
1062 ret = btrfs_csum_truncate(trans, root, path,
1063 inode->i_size);
1064 BUG_ON(ret);
1065 }
1066 if (item_end < inode->i_size) {
1067 if (found_type == BTRFS_DIR_ITEM_KEY) {
1068 found_type = BTRFS_INODE_ITEM_KEY;
1069 } else if (found_type == BTRFS_EXTENT_ITEM_KEY) {
1070 found_type = BTRFS_CSUM_ITEM_KEY;
1071 } else if (found_type == BTRFS_EXTENT_DATA_KEY) {
1072 found_type = BTRFS_XATTR_ITEM_KEY;
1073 } else if (found_type == BTRFS_XATTR_ITEM_KEY) {
1074 found_type = BTRFS_INODE_REF_KEY;
1075 } else if (found_type) {
1076 found_type--;
1077 } else {
1078 break;
1079 }
1080 btrfs_set_key_type(&key, found_type);
1081 goto next;
1082 }
1083 if (found_key.offset >= inode->i_size)
1084 del_item = 1;
1085 else
1086 del_item = 0;
1087 found_extent = 0;
1088
1089 /* FIXME, shrink the extent if the ref count is only 1 */
1090 if (found_type != BTRFS_EXTENT_DATA_KEY)
1091 goto delete;
1092
1093 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
1094 u64 num_dec;
1095 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
1096 if (!del_item) {
1097 u64 orig_num_bytes =
1098 btrfs_file_extent_num_bytes(leaf, fi);
1099 extent_num_bytes = inode->i_size -
1100 found_key.offset + root->sectorsize - 1;
1101 extent_num_bytes = extent_num_bytes &
1102 ~((u64)root->sectorsize - 1);
1103 btrfs_set_file_extent_num_bytes(leaf, fi,
1104 extent_num_bytes);
1105 num_dec = (orig_num_bytes -
1106 extent_num_bytes);
1107 if (extent_start != 0)
1108 dec_i_blocks(inode, num_dec);
1109 btrfs_mark_buffer_dirty(leaf);
1110 } else {
1111 extent_num_bytes =
1112 btrfs_file_extent_disk_num_bytes(leaf,
1113 fi);
1114 /* FIXME blocksize != 4096 */
1115 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
1116 if (extent_start != 0) {
1117 found_extent = 1;
1118 dec_i_blocks(inode, num_dec);
1119 }
1120 root_gen = btrfs_header_generation(leaf);
1121 root_owner = btrfs_header_owner(leaf);
1122 }
1123 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1124 if (!del_item) {
1125 u32 newsize = inode->i_size - found_key.offset;
1126 dec_i_blocks(inode, item_end + 1 -
1127 found_key.offset - newsize);
1128 newsize =
1129 btrfs_file_extent_calc_inline_size(newsize);
1130 ret = btrfs_truncate_item(trans, root, path,
1131 newsize, 1);
1132 BUG_ON(ret);
1133 } else {
1134 dec_i_blocks(inode, item_end + 1 -
1135 found_key.offset);
1136 }
1137 }
1138 delete:
1139 if (del_item) {
1140 if (!pending_del_nr) {
1141 /* no pending yet, add ourselves */
1142 pending_del_slot = path->slots[0];
1143 pending_del_nr = 1;
1144 } else if (pending_del_nr &&
1145 path->slots[0] + 1 == pending_del_slot) {
1146 /* hop on the pending chunk */
1147 pending_del_nr++;
1148 pending_del_slot = path->slots[0];
1149 } else {
1150 printk("bad pending slot %d pending_del_nr %d pending_del_slot %d\n", path->slots[0], pending_del_nr, pending_del_slot);
1151 }
1152 } else {
1153 break;
1154 }
1155 if (found_extent) {
1156 ret = btrfs_free_extent(trans, root, extent_start,
1157 extent_num_bytes,
1158 root_owner,
1159 root_gen, inode->i_ino,
1160 found_key.offset, 0);
1161 BUG_ON(ret);
1162 }
1163 next:
1164 if (path->slots[0] == 0) {
1165 if (pending_del_nr)
1166 goto del_pending;
1167 btrfs_release_path(root, path);
1168 goto search_again;
1169 }
1170
1171 path->slots[0]--;
1172 if (pending_del_nr &&
1173 path->slots[0] + 1 != pending_del_slot) {
1174 struct btrfs_key debug;
1175 del_pending:
1176 btrfs_item_key_to_cpu(path->nodes[0], &debug,
1177 pending_del_slot);
1178 ret = btrfs_del_items(trans, root, path,
1179 pending_del_slot,
1180 pending_del_nr);
1181 BUG_ON(ret);
1182 pending_del_nr = 0;
1183 btrfs_release_path(root, path);
1184 goto search_again;
1185 }
1186 }
1187 ret = 0;
1188 error:
1189 if (pending_del_nr) {
1190 ret = btrfs_del_items(trans, root, path, pending_del_slot,
1191 pending_del_nr);
1192 }
1193 btrfs_free_path(path);
1194 inode->i_sb->s_dirt = 1;
1195 return ret;
1196 }
1197
1198 /*
1199 * taken from block_truncate_page, but does cow as it zeros out
1200 * any bytes left in the last page in the file.
1201 */
1202 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
1203 {
1204 struct inode *inode = mapping->host;
1205 struct btrfs_root *root = BTRFS_I(inode)->root;
1206 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1207 struct btrfs_ordered_extent *ordered;
1208 char *kaddr;
1209 u32 blocksize = root->sectorsize;
1210 pgoff_t index = from >> PAGE_CACHE_SHIFT;
1211 unsigned offset = from & (PAGE_CACHE_SIZE-1);
1212 struct page *page;
1213 int ret = 0;
1214 u64 page_start;
1215 u64 page_end;
1216
1217 if ((offset & (blocksize - 1)) == 0)
1218 goto out;
1219
1220 ret = -ENOMEM;
1221 again:
1222 page = grab_cache_page(mapping, index);
1223 if (!page)
1224 goto out;
1225
1226 page_start = page_offset(page);
1227 page_end = page_start + PAGE_CACHE_SIZE - 1;
1228
1229 if (!PageUptodate(page)) {
1230 ret = btrfs_readpage(NULL, page);
1231 lock_page(page);
1232 if (page->mapping != mapping) {
1233 unlock_page(page);
1234 page_cache_release(page);
1235 goto again;
1236 }
1237 if (!PageUptodate(page)) {
1238 ret = -EIO;
1239 goto out;
1240 }
1241 }
1242 wait_on_page_writeback(page);
1243
1244 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
1245 set_page_extent_mapped(page);
1246
1247 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1248 if (ordered) {
1249 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
1250 unlock_page(page);
1251 page_cache_release(page);
1252 btrfs_wait_ordered_extent(inode, ordered);
1253 btrfs_put_ordered_extent(ordered);
1254 goto again;
1255 }
1256
1257 set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
1258 page_end, GFP_NOFS);
1259 ret = 0;
1260 if (offset != PAGE_CACHE_SIZE) {
1261 kaddr = kmap(page);
1262 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
1263 flush_dcache_page(page);
1264 kunmap(page);
1265 }
1266 set_page_dirty(page);
1267 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
1268
1269 unlock_page(page);
1270 page_cache_release(page);
1271 out:
1272 return ret;
1273 }
1274
1275 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
1276 {
1277 struct inode *inode = dentry->d_inode;
1278 int err;
1279
1280 err = inode_change_ok(inode, attr);
1281 if (err)
1282 return err;
1283
1284 if (S_ISREG(inode->i_mode) &&
1285 attr->ia_valid & ATTR_SIZE && attr->ia_size > inode->i_size) {
1286 struct btrfs_trans_handle *trans;
1287 struct btrfs_root *root = BTRFS_I(inode)->root;
1288 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1289
1290 u64 mask = root->sectorsize - 1;
1291 u64 hole_start = (inode->i_size + mask) & ~mask;
1292 u64 block_end = (attr->ia_size + mask) & ~mask;
1293 u64 hole_size;
1294 u64 alloc_hint = 0;
1295
1296 if (attr->ia_size <= hole_start)
1297 goto out;
1298
1299 err = btrfs_check_free_space(root, 1, 0);
1300 if (err)
1301 goto fail;
1302
1303 btrfs_truncate_page(inode->i_mapping, inode->i_size);
1304
1305 hole_size = block_end - hole_start;
1306 btrfs_wait_ordered_range(inode, hole_start, hole_size);
1307 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1308
1309 trans = btrfs_start_transaction(root, 1);
1310 btrfs_set_trans_block_group(trans, inode);
1311 err = btrfs_drop_extents(trans, root, inode,
1312 hole_start, block_end, hole_start,
1313 &alloc_hint);
1314
1315 if (alloc_hint != EXTENT_MAP_INLINE) {
1316 err = btrfs_insert_file_extent(trans, root,
1317 inode->i_ino,
1318 hole_start, 0, 0,
1319 hole_size, 0);
1320 btrfs_drop_extent_cache(inode, hole_start,
1321 (u64)-1);
1322 btrfs_check_file(root, inode);
1323 }
1324 btrfs_end_transaction(trans, root);
1325 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
1326 if (err)
1327 return err;
1328 }
1329 out:
1330 err = inode_setattr(inode, attr);
1331 fail:
1332 return err;
1333 }
1334
1335 void btrfs_delete_inode(struct inode *inode)
1336 {
1337 struct btrfs_trans_handle *trans;
1338 struct btrfs_root *root = BTRFS_I(inode)->root;
1339 unsigned long nr;
1340 int ret;
1341
1342 btrfs_wait_ordered_range(inode, 0, (u64)-1);
1343 truncate_inode_pages(&inode->i_data, 0);
1344 if (is_bad_inode(inode)) {
1345 goto no_delete;
1346 }
1347
1348 inode->i_size = 0;
1349 trans = btrfs_start_transaction(root, 1);
1350
1351 btrfs_set_trans_block_group(trans, inode);
1352 ret = btrfs_truncate_in_trans(trans, root, inode, 0);
1353 if (ret)
1354 goto no_delete_lock;
1355
1356 nr = trans->blocks_used;
1357 clear_inode(inode);
1358
1359 btrfs_end_transaction(trans, root);
1360 btrfs_btree_balance_dirty(root, nr);
1361 return;
1362
1363 no_delete_lock:
1364 nr = trans->blocks_used;
1365 btrfs_end_transaction(trans, root);
1366 btrfs_btree_balance_dirty(root, nr);
1367 no_delete:
1368 clear_inode(inode);
1369 }
1370
1371 /*
1372 * this returns the key found in the dir entry in the location pointer.
1373 * If no dir entries were found, location->objectid is 0.
1374 */
1375 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
1376 struct btrfs_key *location)
1377 {
1378 const char *name = dentry->d_name.name;
1379 int namelen = dentry->d_name.len;
1380 struct btrfs_dir_item *di;
1381 struct btrfs_path *path;
1382 struct btrfs_root *root = BTRFS_I(dir)->root;
1383 int ret = 0;
1384
1385 if (namelen == 1 && strcmp(name, ".") == 0) {
1386 location->objectid = dir->i_ino;
1387 location->type = BTRFS_INODE_ITEM_KEY;
1388 location->offset = 0;
1389 return 0;
1390 }
1391 path = btrfs_alloc_path();
1392 BUG_ON(!path);
1393
1394 if (namelen == 2 && strcmp(name, "..") == 0) {
1395 struct btrfs_key key;
1396 struct extent_buffer *leaf;
1397 u32 nritems;
1398 int slot;
1399
1400 key.objectid = dir->i_ino;
1401 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1402 key.offset = 0;
1403 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1404 BUG_ON(ret == 0);
1405 ret = 0;
1406
1407 leaf = path->nodes[0];
1408 slot = path->slots[0];
1409 nritems = btrfs_header_nritems(leaf);
1410 if (slot >= nritems)
1411 goto out_err;
1412
1413 btrfs_item_key_to_cpu(leaf, &key, slot);
1414 if (key.objectid != dir->i_ino ||
1415 key.type != BTRFS_INODE_REF_KEY) {
1416 goto out_err;
1417 }
1418 location->objectid = key.offset;
1419 location->type = BTRFS_INODE_ITEM_KEY;
1420 location->offset = 0;
1421 goto out;
1422 }
1423
1424 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
1425 namelen, 0);
1426 if (IS_ERR(di))
1427 ret = PTR_ERR(di);
1428 if (!di || IS_ERR(di)) {
1429 goto out_err;
1430 }
1431 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
1432 out:
1433 btrfs_free_path(path);
1434 return ret;
1435 out_err:
1436 location->objectid = 0;
1437 goto out;
1438 }
1439
1440 /*
1441 * when we hit a tree root in a directory, the btrfs part of the inode
1442 * needs to be changed to reflect the root directory of the tree root. This
1443 * is kind of like crossing a mount point.
1444 */
1445 static int fixup_tree_root_location(struct btrfs_root *root,
1446 struct btrfs_key *location,
1447 struct btrfs_root **sub_root,
1448 struct dentry *dentry)
1449 {
1450 struct btrfs_path *path;
1451 struct btrfs_root_item *ri;
1452
1453 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
1454 return 0;
1455 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
1456 return 0;
1457
1458 path = btrfs_alloc_path();
1459 BUG_ON(!path);
1460
1461 *sub_root = btrfs_read_fs_root(root->fs_info, location,
1462 dentry->d_name.name,
1463 dentry->d_name.len);
1464 if (IS_ERR(*sub_root))
1465 return PTR_ERR(*sub_root);
1466
1467 ri = &(*sub_root)->root_item;
1468 location->objectid = btrfs_root_dirid(ri);
1469 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1470 location->offset = 0;
1471
1472 btrfs_free_path(path);
1473 return 0;
1474 }
1475
1476 static int btrfs_init_locked_inode(struct inode *inode, void *p)
1477 {
1478 struct btrfs_iget_args *args = p;
1479 inode->i_ino = args->ino;
1480 BTRFS_I(inode)->root = args->root;
1481 BTRFS_I(inode)->delalloc_bytes = 0;
1482 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1483 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1484 inode->i_mapping, GFP_NOFS);
1485 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1486 inode->i_mapping, GFP_NOFS);
1487 mutex_init(&BTRFS_I(inode)->csum_mutex);
1488 return 0;
1489 }
1490
1491 static int btrfs_find_actor(struct inode *inode, void *opaque)
1492 {
1493 struct btrfs_iget_args *args = opaque;
1494 return (args->ino == inode->i_ino &&
1495 args->root == BTRFS_I(inode)->root);
1496 }
1497
1498 struct inode *btrfs_ilookup(struct super_block *s, u64 objectid,
1499 u64 root_objectid)
1500 {
1501 struct btrfs_iget_args args;
1502 args.ino = objectid;
1503 args.root = btrfs_lookup_fs_root(btrfs_sb(s)->fs_info, root_objectid);
1504
1505 if (!args.root)
1506 return NULL;
1507
1508 return ilookup5(s, objectid, btrfs_find_actor, (void *)&args);
1509 }
1510
1511 struct inode *btrfs_iget_locked(struct super_block *s, u64 objectid,
1512 struct btrfs_root *root)
1513 {
1514 struct inode *inode;
1515 struct btrfs_iget_args args;
1516 args.ino = objectid;
1517 args.root = root;
1518
1519 inode = iget5_locked(s, objectid, btrfs_find_actor,
1520 btrfs_init_locked_inode,
1521 (void *)&args);
1522 return inode;
1523 }
1524
1525 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
1526 struct nameidata *nd)
1527 {
1528 struct inode * inode;
1529 struct btrfs_inode *bi = BTRFS_I(dir);
1530 struct btrfs_root *root = bi->root;
1531 struct btrfs_root *sub_root = root;
1532 struct btrfs_key location;
1533 int ret;
1534
1535 if (dentry->d_name.len > BTRFS_NAME_LEN)
1536 return ERR_PTR(-ENAMETOOLONG);
1537
1538 ret = btrfs_inode_by_name(dir, dentry, &location);
1539
1540 if (ret < 0)
1541 return ERR_PTR(ret);
1542
1543 inode = NULL;
1544 if (location.objectid) {
1545 ret = fixup_tree_root_location(root, &location, &sub_root,
1546 dentry);
1547 if (ret < 0)
1548 return ERR_PTR(ret);
1549 if (ret > 0)
1550 return ERR_PTR(-ENOENT);
1551 inode = btrfs_iget_locked(dir->i_sb, location.objectid,
1552 sub_root);
1553 if (!inode)
1554 return ERR_PTR(-EACCES);
1555 if (inode->i_state & I_NEW) {
1556 /* the inode and parent dir are two different roots */
1557 if (sub_root != root) {
1558 igrab(inode);
1559 sub_root->inode = inode;
1560 }
1561 BTRFS_I(inode)->root = sub_root;
1562 memcpy(&BTRFS_I(inode)->location, &location,
1563 sizeof(location));
1564 btrfs_read_locked_inode(inode);
1565 unlock_new_inode(inode);
1566 }
1567 }
1568 return d_splice_alias(inode, dentry);
1569 }
1570
1571 static unsigned char btrfs_filetype_table[] = {
1572 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
1573 };
1574
1575 static int btrfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
1576 {
1577 struct inode *inode = filp->f_dentry->d_inode;
1578 struct btrfs_root *root = BTRFS_I(inode)->root;
1579 struct btrfs_item *item;
1580 struct btrfs_dir_item *di;
1581 struct btrfs_key key;
1582 struct btrfs_key found_key;
1583 struct btrfs_path *path;
1584 int ret;
1585 u32 nritems;
1586 struct extent_buffer *leaf;
1587 int slot;
1588 int advance;
1589 unsigned char d_type;
1590 int over = 0;
1591 u32 di_cur;
1592 u32 di_total;
1593 u32 di_len;
1594 int key_type = BTRFS_DIR_INDEX_KEY;
1595 char tmp_name[32];
1596 char *name_ptr;
1597 int name_len;
1598
1599 /* FIXME, use a real flag for deciding about the key type */
1600 if (root->fs_info->tree_root == root)
1601 key_type = BTRFS_DIR_ITEM_KEY;
1602
1603 /* special case for "." */
1604 if (filp->f_pos == 0) {
1605 over = filldir(dirent, ".", 1,
1606 1, inode->i_ino,
1607 DT_DIR);
1608 if (over)
1609 return 0;
1610 filp->f_pos = 1;
1611 }
1612
1613 key.objectid = inode->i_ino;
1614 path = btrfs_alloc_path();
1615 path->reada = 2;
1616
1617 /* special case for .., just use the back ref */
1618 if (filp->f_pos == 1) {
1619 btrfs_set_key_type(&key, BTRFS_INODE_REF_KEY);
1620 key.offset = 0;
1621 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1622 BUG_ON(ret == 0);
1623 leaf = path->nodes[0];
1624 slot = path->slots[0];
1625 nritems = btrfs_header_nritems(leaf);
1626 if (slot >= nritems) {
1627 btrfs_release_path(root, path);
1628 goto read_dir_items;
1629 }
1630 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1631 btrfs_release_path(root, path);
1632 if (found_key.objectid != key.objectid ||
1633 found_key.type != BTRFS_INODE_REF_KEY)
1634 goto read_dir_items;
1635 over = filldir(dirent, "..", 2,
1636 2, found_key.offset, DT_DIR);
1637 if (over)
1638 goto nopos;
1639 filp->f_pos = 2;
1640 }
1641
1642 read_dir_items:
1643 btrfs_set_key_type(&key, key_type);
1644 key.offset = filp->f_pos;
1645
1646 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1647 if (ret < 0)
1648 goto err;
1649 advance = 0;
1650 while(1) {
1651 leaf = path->nodes[0];
1652 nritems = btrfs_header_nritems(leaf);
1653 slot = path->slots[0];
1654 if (advance || slot >= nritems) {
1655 if (slot >= nritems -1) {
1656 ret = btrfs_next_leaf(root, path);
1657 if (ret)
1658 break;
1659 leaf = path->nodes[0];
1660 nritems = btrfs_header_nritems(leaf);
1661 slot = path->slots[0];
1662 } else {
1663 slot++;
1664 path->slots[0]++;
1665 }
1666 }
1667 advance = 1;
1668 item = btrfs_item_nr(leaf, slot);
1669 btrfs_item_key_to_cpu(leaf, &found_key, slot);
1670
1671 if (found_key.objectid != key.objectid)
1672 break;
1673 if (btrfs_key_type(&found_key) != key_type)
1674 break;
1675 if (found_key.offset < filp->f_pos)
1676 continue;
1677
1678 filp->f_pos = found_key.offset;
1679 advance = 1;
1680 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
1681 di_cur = 0;
1682 di_total = btrfs_item_size(leaf, item);
1683 while(di_cur < di_total) {
1684 struct btrfs_key location;
1685
1686 name_len = btrfs_dir_name_len(leaf, di);
1687 if (name_len < 32) {
1688 name_ptr = tmp_name;
1689 } else {
1690 name_ptr = kmalloc(name_len, GFP_NOFS);
1691 BUG_ON(!name_ptr);
1692 }
1693 read_extent_buffer(leaf, name_ptr,
1694 (unsigned long)(di + 1), name_len);
1695
1696 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
1697 btrfs_dir_item_key_to_cpu(leaf, di, &location);
1698 over = filldir(dirent, name_ptr, name_len,
1699 found_key.offset,
1700 location.objectid,
1701 d_type);
1702
1703 if (name_ptr != tmp_name)
1704 kfree(name_ptr);
1705
1706 if (over)
1707 goto nopos;
1708 di_len = btrfs_dir_name_len(leaf, di) +
1709 btrfs_dir_data_len(leaf, di) +sizeof(*di);
1710 di_cur += di_len;
1711 di = (struct btrfs_dir_item *)((char *)di + di_len);
1712 }
1713 }
1714 if (key_type == BTRFS_DIR_INDEX_KEY)
1715 filp->f_pos = INT_LIMIT(typeof(filp->f_pos));
1716 else
1717 filp->f_pos++;
1718 nopos:
1719 ret = 0;
1720 err:
1721 btrfs_free_path(path);
1722 return ret;
1723 }
1724
1725 int btrfs_write_inode(struct inode *inode, int wait)
1726 {
1727 struct btrfs_root *root = BTRFS_I(inode)->root;
1728 struct btrfs_trans_handle *trans;
1729 int ret = 0;
1730
1731 if (wait) {
1732 trans = btrfs_start_transaction(root, 1);
1733 btrfs_set_trans_block_group(trans, inode);
1734 ret = btrfs_commit_transaction(trans, root);
1735 }
1736 return ret;
1737 }
1738
1739 /*
1740 * This is somewhat expensive, updating the tree every time the
1741 * inode changes. But, it is most likely to find the inode in cache.
1742 * FIXME, needs more benchmarking...there are no reasons other than performance
1743 * to keep or drop this code.
1744 */
1745 void btrfs_dirty_inode(struct inode *inode)
1746 {
1747 struct btrfs_root *root = BTRFS_I(inode)->root;
1748 struct btrfs_trans_handle *trans;
1749
1750 trans = btrfs_start_transaction(root, 1);
1751 btrfs_set_trans_block_group(trans, inode);
1752 btrfs_update_inode(trans, root, inode);
1753 btrfs_end_transaction(trans, root);
1754 }
1755
1756 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
1757 struct btrfs_root *root,
1758 const char *name, int name_len,
1759 u64 ref_objectid,
1760 u64 objectid,
1761 struct btrfs_block_group_cache *group,
1762 int mode)
1763 {
1764 struct inode *inode;
1765 struct btrfs_inode_item *inode_item;
1766 struct btrfs_block_group_cache *new_inode_group;
1767 struct btrfs_key *location;
1768 struct btrfs_path *path;
1769 struct btrfs_inode_ref *ref;
1770 struct btrfs_key key[2];
1771 u32 sizes[2];
1772 unsigned long ptr;
1773 int ret;
1774 int owner;
1775
1776 path = btrfs_alloc_path();
1777 BUG_ON(!path);
1778
1779 inode = new_inode(root->fs_info->sb);
1780 if (!inode)
1781 return ERR_PTR(-ENOMEM);
1782
1783 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
1784 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
1785 inode->i_mapping, GFP_NOFS);
1786 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
1787 inode->i_mapping, GFP_NOFS);
1788 mutex_init(&BTRFS_I(inode)->csum_mutex);
1789 BTRFS_I(inode)->delalloc_bytes = 0;
1790 BTRFS_I(inode)->root = root;
1791
1792 if (mode & S_IFDIR)
1793 owner = 0;
1794 else
1795 owner = 1;
1796 new_inode_group = btrfs_find_block_group(root, group, 0,
1797 BTRFS_BLOCK_GROUP_METADATA, owner);
1798 if (!new_inode_group) {
1799 printk("find_block group failed\n");
1800 new_inode_group = group;
1801 }
1802 BTRFS_I(inode)->block_group = new_inode_group;
1803 BTRFS_I(inode)->flags = 0;
1804
1805 key[0].objectid = objectid;
1806 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
1807 key[0].offset = 0;
1808
1809 key[1].objectid = objectid;
1810 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
1811 key[1].offset = ref_objectid;
1812
1813 sizes[0] = sizeof(struct btrfs_inode_item);
1814 sizes[1] = name_len + sizeof(*ref);
1815
1816 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
1817 if (ret != 0)
1818 goto fail;
1819
1820 if (objectid > root->highest_inode)
1821 root->highest_inode = objectid;
1822
1823 inode->i_uid = current->fsuid;
1824 inode->i_gid = current->fsgid;
1825 inode->i_mode = mode;
1826 inode->i_ino = objectid;
1827 inode->i_blocks = 0;
1828 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
1829 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
1830 struct btrfs_inode_item);
1831 fill_inode_item(path->nodes[0], inode_item, inode);
1832
1833 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
1834 struct btrfs_inode_ref);
1835 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
1836 ptr = (unsigned long)(ref + 1);
1837 write_extent_buffer(path->nodes[0], name, ptr, name_len);
1838
1839 btrfs_mark_buffer_dirty(path->nodes[0]);
1840 btrfs_free_path(path);
1841
1842 location = &BTRFS_I(inode)->location;
1843 location->objectid = objectid;
1844 location->offset = 0;
1845 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
1846
1847 insert_inode_hash(inode);
1848 return inode;
1849 fail:
1850 btrfs_free_path(path);
1851 return ERR_PTR(ret);
1852 }
1853
1854 static inline u8 btrfs_inode_type(struct inode *inode)
1855 {
1856 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
1857 }
1858
1859 static int btrfs_add_link(struct btrfs_trans_handle *trans,
1860 struct dentry *dentry, struct inode *inode,
1861 int add_backref)
1862 {
1863 int ret;
1864 struct btrfs_key key;
1865 struct btrfs_root *root = BTRFS_I(dentry->d_parent->d_inode)->root;
1866 struct inode *parent_inode;
1867
1868 key.objectid = inode->i_ino;
1869 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
1870 key.offset = 0;
1871
1872 ret = btrfs_insert_dir_item(trans, root,
1873 dentry->d_name.name, dentry->d_name.len,
1874 dentry->d_parent->d_inode->i_ino,
1875 &key, btrfs_inode_type(inode));
1876 if (ret == 0) {
1877 if (add_backref) {
1878 ret = btrfs_insert_inode_ref(trans, root,
1879 dentry->d_name.name,
1880 dentry->d_name.len,
1881 inode->i_ino,
1882 dentry->d_parent->d_inode->i_ino);
1883 }
1884 parent_inode = dentry->d_parent->d_inode;
1885 parent_inode->i_size += dentry->d_name.len * 2;
1886 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
1887 ret = btrfs_update_inode(trans, root,
1888 dentry->d_parent->d_inode);
1889 }
1890 return ret;
1891 }
1892
1893 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
1894 struct dentry *dentry, struct inode *inode,
1895 int backref)
1896 {
1897 int err = btrfs_add_link(trans, dentry, inode, backref);
1898 if (!err) {
1899 d_instantiate(dentry, inode);
1900 return 0;
1901 }
1902 if (err > 0)
1903 err = -EEXIST;
1904 return err;
1905 }
1906
1907 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
1908 int mode, dev_t rdev)
1909 {
1910 struct btrfs_trans_handle *trans;
1911 struct btrfs_root *root = BTRFS_I(dir)->root;
1912 struct inode *inode = NULL;
1913 int err;
1914 int drop_inode = 0;
1915 u64 objectid;
1916 unsigned long nr = 0;
1917
1918 if (!new_valid_dev(rdev))
1919 return -EINVAL;
1920
1921 err = btrfs_check_free_space(root, 1, 0);
1922 if (err)
1923 goto fail;
1924
1925 trans = btrfs_start_transaction(root, 1);
1926 btrfs_set_trans_block_group(trans, dir);
1927
1928 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1929 if (err) {
1930 err = -ENOSPC;
1931 goto out_unlock;
1932 }
1933
1934 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1935 dentry->d_name.len,
1936 dentry->d_parent->d_inode->i_ino, objectid,
1937 BTRFS_I(dir)->block_group, mode);
1938 err = PTR_ERR(inode);
1939 if (IS_ERR(inode))
1940 goto out_unlock;
1941
1942 btrfs_set_trans_block_group(trans, inode);
1943 err = btrfs_add_nondir(trans, dentry, inode, 0);
1944 if (err)
1945 drop_inode = 1;
1946 else {
1947 inode->i_op = &btrfs_special_inode_operations;
1948 init_special_inode(inode, inode->i_mode, rdev);
1949 btrfs_update_inode(trans, root, inode);
1950 }
1951 dir->i_sb->s_dirt = 1;
1952 btrfs_update_inode_block_group(trans, inode);
1953 btrfs_update_inode_block_group(trans, dir);
1954 out_unlock:
1955 nr = trans->blocks_used;
1956 btrfs_end_transaction_throttle(trans, root);
1957 fail:
1958 if (drop_inode) {
1959 inode_dec_link_count(inode);
1960 iput(inode);
1961 }
1962 btrfs_btree_balance_dirty(root, nr);
1963 return err;
1964 }
1965
1966 static int btrfs_create(struct inode *dir, struct dentry *dentry,
1967 int mode, struct nameidata *nd)
1968 {
1969 struct btrfs_trans_handle *trans;
1970 struct btrfs_root *root = BTRFS_I(dir)->root;
1971 struct inode *inode = NULL;
1972 int err;
1973 int drop_inode = 0;
1974 unsigned long nr = 0;
1975 u64 objectid;
1976
1977 err = btrfs_check_free_space(root, 1, 0);
1978 if (err)
1979 goto fail;
1980 trans = btrfs_start_transaction(root, 1);
1981 btrfs_set_trans_block_group(trans, dir);
1982
1983 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
1984 if (err) {
1985 err = -ENOSPC;
1986 goto out_unlock;
1987 }
1988
1989 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
1990 dentry->d_name.len,
1991 dentry->d_parent->d_inode->i_ino,
1992 objectid, BTRFS_I(dir)->block_group, mode);
1993 err = PTR_ERR(inode);
1994 if (IS_ERR(inode))
1995 goto out_unlock;
1996
1997 btrfs_set_trans_block_group(trans, inode);
1998 err = btrfs_add_nondir(trans, dentry, inode, 0);
1999 if (err)
2000 drop_inode = 1;
2001 else {
2002 inode->i_mapping->a_ops = &btrfs_aops;
2003 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2004 inode->i_fop = &btrfs_file_operations;
2005 inode->i_op = &btrfs_file_inode_operations;
2006 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2007 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2008 inode->i_mapping, GFP_NOFS);
2009 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
2010 inode->i_mapping, GFP_NOFS);
2011 mutex_init(&BTRFS_I(inode)->csum_mutex);
2012 BTRFS_I(inode)->delalloc_bytes = 0;
2013 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2014 }
2015 dir->i_sb->s_dirt = 1;
2016 btrfs_update_inode_block_group(trans, inode);
2017 btrfs_update_inode_block_group(trans, dir);
2018 out_unlock:
2019 nr = trans->blocks_used;
2020 btrfs_end_transaction_throttle(trans, root);
2021 fail:
2022 if (drop_inode) {
2023 inode_dec_link_count(inode);
2024 iput(inode);
2025 }
2026 btrfs_btree_balance_dirty(root, nr);
2027 return err;
2028 }
2029
2030 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
2031 struct dentry *dentry)
2032 {
2033 struct btrfs_trans_handle *trans;
2034 struct btrfs_root *root = BTRFS_I(dir)->root;
2035 struct inode *inode = old_dentry->d_inode;
2036 unsigned long nr = 0;
2037 int err;
2038 int drop_inode = 0;
2039
2040 if (inode->i_nlink == 0)
2041 return -ENOENT;
2042
2043 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
2044 inode->i_nlink++;
2045 #else
2046 inc_nlink(inode);
2047 #endif
2048 err = btrfs_check_free_space(root, 1, 0);
2049 if (err)
2050 goto fail;
2051 trans = btrfs_start_transaction(root, 1);
2052
2053 btrfs_set_trans_block_group(trans, dir);
2054 atomic_inc(&inode->i_count);
2055 err = btrfs_add_nondir(trans, dentry, inode, 1);
2056
2057 if (err)
2058 drop_inode = 1;
2059
2060 dir->i_sb->s_dirt = 1;
2061 btrfs_update_inode_block_group(trans, dir);
2062 err = btrfs_update_inode(trans, root, inode);
2063
2064 if (err)
2065 drop_inode = 1;
2066
2067 nr = trans->blocks_used;
2068 btrfs_end_transaction_throttle(trans, root);
2069 fail:
2070 if (drop_inode) {
2071 inode_dec_link_count(inode);
2072 iput(inode);
2073 }
2074 btrfs_btree_balance_dirty(root, nr);
2075 return err;
2076 }
2077
2078 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
2079 {
2080 struct inode *inode = NULL;
2081 struct btrfs_trans_handle *trans;
2082 struct btrfs_root *root = BTRFS_I(dir)->root;
2083 int err = 0;
2084 int drop_on_err = 0;
2085 u64 objectid = 0;
2086 unsigned long nr = 1;
2087
2088 err = btrfs_check_free_space(root, 1, 0);
2089 if (err)
2090 goto out_unlock;
2091
2092 trans = btrfs_start_transaction(root, 1);
2093 btrfs_set_trans_block_group(trans, dir);
2094
2095 if (IS_ERR(trans)) {
2096 err = PTR_ERR(trans);
2097 goto out_unlock;
2098 }
2099
2100 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
2101 if (err) {
2102 err = -ENOSPC;
2103 goto out_unlock;
2104 }
2105
2106 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
2107 dentry->d_name.len,
2108 dentry->d_parent->d_inode->i_ino, objectid,
2109 BTRFS_I(dir)->block_group, S_IFDIR | mode);
2110 if (IS_ERR(inode)) {
2111 err = PTR_ERR(inode);
2112 goto out_fail;
2113 }
2114
2115 drop_on_err = 1;
2116 inode->i_op = &btrfs_dir_inode_operations;
2117 inode->i_fop = &btrfs_dir_file_operations;
2118 btrfs_set_trans_block_group(trans, inode);
2119
2120 inode->i_size = 0;
2121 err = btrfs_update_inode(trans, root, inode);
2122 if (err)
2123 goto out_fail;
2124
2125 err = btrfs_add_link(trans, dentry, inode, 0);
2126 if (err)
2127 goto out_fail;
2128
2129 d_instantiate(dentry, inode);
2130 drop_on_err = 0;
2131 dir->i_sb->s_dirt = 1;
2132 btrfs_update_inode_block_group(trans, inode);
2133 btrfs_update_inode_block_group(trans, dir);
2134
2135 out_fail:
2136 nr = trans->blocks_used;
2137 btrfs_end_transaction_throttle(trans, root);
2138
2139 out_unlock:
2140 if (drop_on_err)
2141 iput(inode);
2142 btrfs_btree_balance_dirty(root, nr);
2143 return err;
2144 }
2145
2146 static int merge_extent_mapping(struct extent_map_tree *em_tree,
2147 struct extent_map *existing,
2148 struct extent_map *em,
2149 u64 map_start, u64 map_len)
2150 {
2151 u64 start_diff;
2152
2153 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
2154 start_diff = map_start - em->start;
2155 em->start = map_start;
2156 em->len = map_len;
2157 if (em->block_start < EXTENT_MAP_LAST_BYTE)
2158 em->block_start += start_diff;
2159 return add_extent_mapping(em_tree, em);
2160 }
2161
2162 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
2163 size_t pg_offset, u64 start, u64 len,
2164 int create)
2165 {
2166 int ret;
2167 int err = 0;
2168 u64 bytenr;
2169 u64 extent_start = 0;
2170 u64 extent_end = 0;
2171 u64 objectid = inode->i_ino;
2172 u32 found_type;
2173 struct btrfs_path *path;
2174 struct btrfs_root *root = BTRFS_I(inode)->root;
2175 struct btrfs_file_extent_item *item;
2176 struct extent_buffer *leaf;
2177 struct btrfs_key found_key;
2178 struct extent_map *em = NULL;
2179 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
2180 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2181 struct btrfs_trans_handle *trans = NULL;
2182
2183 path = btrfs_alloc_path();
2184 BUG_ON(!path);
2185
2186 again:
2187 spin_lock(&em_tree->lock);
2188 em = lookup_extent_mapping(em_tree, start, len);
2189 if (em)
2190 em->bdev = root->fs_info->fs_devices->latest_bdev;
2191 spin_unlock(&em_tree->lock);
2192
2193 if (em) {
2194 if (em->start > start || em->start + em->len <= start)
2195 free_extent_map(em);
2196 else if (em->block_start == EXTENT_MAP_INLINE && page)
2197 free_extent_map(em);
2198 else
2199 goto out;
2200 }
2201 em = alloc_extent_map(GFP_NOFS);
2202 if (!em) {
2203 err = -ENOMEM;
2204 goto out;
2205 }
2206 em->bdev = root->fs_info->fs_devices->latest_bdev;
2207 em->start = EXTENT_MAP_HOLE;
2208 em->len = (u64)-1;
2209 ret = btrfs_lookup_file_extent(trans, root, path,
2210 objectid, start, trans != NULL);
2211 if (ret < 0) {
2212 err = ret;
2213 goto out;
2214 }
2215
2216 if (ret != 0) {
2217 if (path->slots[0] == 0)
2218 goto not_found;
2219 path->slots[0]--;
2220 }
2221
2222 leaf = path->nodes[0];
2223 item = btrfs_item_ptr(leaf, path->slots[0],
2224 struct btrfs_file_extent_item);
2225 /* are we inside the extent that was found? */
2226 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2227 found_type = btrfs_key_type(&found_key);
2228 if (found_key.objectid != objectid ||
2229 found_type != BTRFS_EXTENT_DATA_KEY) {
2230 goto not_found;
2231 }
2232
2233 found_type = btrfs_file_extent_type(leaf, item);
2234 extent_start = found_key.offset;
2235 if (found_type == BTRFS_FILE_EXTENT_REG) {
2236 extent_end = extent_start +
2237 btrfs_file_extent_num_bytes(leaf, item);
2238 err = 0;
2239 if (start < extent_start || start >= extent_end) {
2240 em->start = start;
2241 if (start < extent_start) {
2242 if (start + len <= extent_start)
2243 goto not_found;
2244 em->len = extent_end - extent_start;
2245 } else {
2246 em->len = len;
2247 }
2248 goto not_found_em;
2249 }
2250 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
2251 if (bytenr == 0) {
2252 em->start = extent_start;
2253 em->len = extent_end - extent_start;
2254 em->block_start = EXTENT_MAP_HOLE;
2255 goto insert;
2256 }
2257 bytenr += btrfs_file_extent_offset(leaf, item);
2258 em->block_start = bytenr;
2259 em->start = extent_start;
2260 em->len = extent_end - extent_start;
2261 goto insert;
2262 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
2263 u64 page_start;
2264 unsigned long ptr;
2265 char *map;
2266 size_t size;
2267 size_t extent_offset;
2268 size_t copy_size;
2269
2270 size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf,
2271 path->slots[0]));
2272 extent_end = (extent_start + size + root->sectorsize - 1) &
2273 ~((u64)root->sectorsize - 1);
2274 if (start < extent_start || start >= extent_end) {
2275 em->start = start;
2276 if (start < extent_start) {
2277 if (start + len <= extent_start)
2278 goto not_found;
2279 em->len = extent_end - extent_start;
2280 } else {
2281 em->len = len;
2282 }
2283 goto not_found_em;
2284 }
2285 em->block_start = EXTENT_MAP_INLINE;
2286
2287 if (!page) {
2288 em->start = extent_start;
2289 em->len = size;
2290 goto out;
2291 }
2292
2293 page_start = page_offset(page) + pg_offset;
2294 extent_offset = page_start - extent_start;
2295 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
2296 size - extent_offset);
2297 em->start = extent_start + extent_offset;
2298 em->len = (copy_size + root->sectorsize - 1) &
2299 ~((u64)root->sectorsize - 1);
2300 map = kmap(page);
2301 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
2302 if (create == 0 && !PageUptodate(page)) {
2303 read_extent_buffer(leaf, map + pg_offset, ptr,
2304 copy_size);
2305 flush_dcache_page(page);
2306 } else if (create && PageUptodate(page)) {
2307 if (!trans) {
2308 kunmap(page);
2309 free_extent_map(em);
2310 em = NULL;
2311 btrfs_release_path(root, path);
2312 trans = btrfs_start_transaction(root, 1);
2313 goto again;
2314 }
2315 write_extent_buffer(leaf, map + pg_offset, ptr,
2316 copy_size);
2317 btrfs_mark_buffer_dirty(leaf);
2318 }
2319 kunmap(page);
2320 set_extent_uptodate(io_tree, em->start,
2321 extent_map_end(em) - 1, GFP_NOFS);
2322 goto insert;
2323 } else {
2324 printk("unkknown found_type %d\n", found_type);
2325 WARN_ON(1);
2326 }
2327 not_found:
2328 em->start = start;
2329 em->len = len;
2330 not_found_em:
2331 em->block_start = EXTENT_MAP_HOLE;
2332 insert:
2333 btrfs_release_path(root, path);
2334 if (em->start > start || extent_map_end(em) <= start) {
2335 printk("bad extent! em: [%Lu %Lu] passed [%Lu %Lu]\n", em->start, em->len, start, len);
2336 err = -EIO;
2337 goto out;
2338 }
2339
2340 err = 0;
2341 spin_lock(&em_tree->lock);
2342 ret = add_extent_mapping(em_tree, em);
2343 /* it is possible that someone inserted the extent into the tree
2344 * while we had the lock dropped. It is also possible that
2345 * an overlapping map exists in the tree
2346 */
2347 if (ret == -EEXIST) {
2348 struct extent_map *existing;
2349
2350 ret = 0;
2351
2352 existing = lookup_extent_mapping(em_tree, start, len);
2353 if (existing && (existing->start > start ||
2354 existing->start + existing->len <= start)) {
2355 free_extent_map(existing);
2356 existing = NULL;
2357 }
2358 if (!existing) {
2359 existing = lookup_extent_mapping(em_tree, em->start,
2360 em->len);
2361 if (existing) {
2362 err = merge_extent_mapping(em_tree, existing,
2363 em, start,
2364 root->sectorsize);
2365 free_extent_map(existing);
2366 if (err) {
2367 free_extent_map(em);
2368 em = NULL;
2369 }
2370 } else {
2371 err = -EIO;
2372 printk("failing to insert %Lu %Lu\n",
2373 start, len);
2374 free_extent_map(em);
2375 em = NULL;
2376 }
2377 } else {
2378 free_extent_map(em);
2379 em = existing;
2380 err = 0;
2381 }
2382 }
2383 spin_unlock(&em_tree->lock);
2384 out:
2385 btrfs_free_path(path);
2386 if (trans) {
2387 ret = btrfs_end_transaction(trans, root);
2388 if (!err) {
2389 err = ret;
2390 }
2391 }
2392 if (err) {
2393 free_extent_map(em);
2394 WARN_ON(1);
2395 return ERR_PTR(err);
2396 }
2397 return em;
2398 }
2399
2400 #if 0 /* waiting for O_DIRECT reads */
2401 static int btrfs_get_block(struct inode *inode, sector_t iblock,
2402 struct buffer_head *bh_result, int create)
2403 {
2404 struct extent_map *em;
2405 u64 start = (u64)iblock << inode->i_blkbits;
2406 struct btrfs_multi_bio *multi = NULL;
2407 struct btrfs_root *root = BTRFS_I(inode)->root;
2408 u64 len;
2409 u64 logical;
2410 u64 map_length;
2411 int ret = 0;
2412
2413 em = btrfs_get_extent(inode, NULL, 0, start, bh_result->b_size, 0);
2414
2415 if (!em || IS_ERR(em))
2416 goto out;
2417
2418 if (em->start > start || em->start + em->len <= start) {
2419 goto out;
2420 }
2421
2422 if (em->block_start == EXTENT_MAP_INLINE) {
2423 ret = -EINVAL;
2424 goto out;
2425 }
2426
2427 len = em->start + em->len - start;
2428 len = min_t(u64, len, INT_LIMIT(typeof(bh_result->b_size)));
2429
2430 if (em->block_start == EXTENT_MAP_HOLE ||
2431 em->block_start == EXTENT_MAP_DELALLOC) {
2432 bh_result->b_size = len;
2433 goto out;
2434 }
2435
2436 logical = start - em->start;
2437 logical = em->block_start + logical;
2438
2439 map_length = len;
2440 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
2441 logical, &map_length, &multi, 0);
2442 BUG_ON(ret);
2443 bh_result->b_blocknr = multi->stripes[0].physical >> inode->i_blkbits;
2444 bh_result->b_size = min(map_length, len);
2445
2446 bh_result->b_bdev = multi->stripes[0].dev->bdev;
2447 set_buffer_mapped(bh_result);
2448 kfree(multi);
2449 out:
2450 free_extent_map(em);
2451 return ret;
2452 }
2453 #endif
2454
2455 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
2456 const struct iovec *iov, loff_t offset,
2457 unsigned long nr_segs)
2458 {
2459 return -EINVAL;
2460 #if 0
2461 struct file *file = iocb->ki_filp;
2462 struct inode *inode = file->f_mapping->host;
2463
2464 if (rw == WRITE)
2465 return -EINVAL;
2466
2467 return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
2468 offset, nr_segs, btrfs_get_block, NULL);
2469 #endif
2470 }
2471
2472 static sector_t btrfs_bmap(struct address_space *mapping, sector_t iblock)
2473 {
2474 return extent_bmap(mapping, iblock, btrfs_get_extent);
2475 }
2476
2477 int btrfs_readpage(struct file *file, struct page *page)
2478 {
2479 struct extent_io_tree *tree;
2480 tree = &BTRFS_I(page->mapping->host)->io_tree;
2481 return extent_read_full_page(tree, page, btrfs_get_extent);
2482 }
2483
2484 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
2485 {
2486 struct extent_io_tree *tree;
2487
2488
2489 if (current->flags & PF_MEMALLOC) {
2490 redirty_page_for_writepage(wbc, page);
2491 unlock_page(page);
2492 return 0;
2493 }
2494 tree = &BTRFS_I(page->mapping->host)->io_tree;
2495 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
2496 }
2497
2498 static int btrfs_writepages(struct address_space *mapping,
2499 struct writeback_control *wbc)
2500 {
2501 struct extent_io_tree *tree;
2502 tree = &BTRFS_I(mapping->host)->io_tree;
2503 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
2504 }
2505
2506 static int
2507 btrfs_readpages(struct file *file, struct address_space *mapping,
2508 struct list_head *pages, unsigned nr_pages)
2509 {
2510 struct extent_io_tree *tree;
2511 tree = &BTRFS_I(mapping->host)->io_tree;
2512 return extent_readpages(tree, mapping, pages, nr_pages,
2513 btrfs_get_extent);
2514 }
2515 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
2516 {
2517 struct extent_io_tree *tree;
2518 struct extent_map_tree *map;
2519 int ret;
2520
2521 tree = &BTRFS_I(page->mapping->host)->io_tree;
2522 map = &BTRFS_I(page->mapping->host)->extent_tree;
2523 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
2524 if (ret == 1) {
2525 invalidate_extent_lru(tree, page_offset(page), PAGE_CACHE_SIZE);
2526 ClearPagePrivate(page);
2527 set_page_private(page, 0);
2528 page_cache_release(page);
2529 }
2530 return ret;
2531 }
2532
2533 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
2534 {
2535 struct btrfs_ordered_extent *ordered;
2536
2537 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
2538 page_offset(page));
2539 if (ordered) {
2540 btrfs_put_ordered_extent(ordered);
2541 return 0;
2542 }
2543 return __btrfs_releasepage(page, gfp_flags);
2544 }
2545
2546 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
2547 {
2548 struct extent_io_tree *tree;
2549 struct btrfs_ordered_extent *ordered;
2550 u64 page_start = page_offset(page);
2551 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2552
2553 wait_on_page_writeback(page);
2554 tree = &BTRFS_I(page->mapping->host)->io_tree;
2555 if (offset) {
2556 btrfs_releasepage(page, GFP_NOFS);
2557 return;
2558 }
2559
2560 lock_extent(tree, page_start, page_end, GFP_NOFS);
2561 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
2562 page_offset(page));
2563 if (ordered) {
2564 clear_extent_bit(tree, page_start, page_end,
2565 EXTENT_DIRTY | EXTENT_DELALLOC |
2566 EXTENT_LOCKED, 1, 0, GFP_NOFS);
2567 btrfs_writepage_end_io_hook(page, page_start,
2568 page_end, NULL, 1);
2569 btrfs_put_ordered_extent(ordered);
2570 lock_extent(tree, page_start, page_end, GFP_NOFS);
2571 }
2572 clear_extent_bit(tree, page_start, page_end,
2573 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC |
2574 EXTENT_ORDERED,
2575 1, 1, GFP_NOFS);
2576 __btrfs_releasepage(page, GFP_NOFS);
2577
2578 if (PagePrivate(page)) {
2579 invalidate_extent_lru(tree, page_offset(page),
2580 PAGE_CACHE_SIZE);
2581 ClearPagePrivate(page);
2582 set_page_private(page, 0);
2583 page_cache_release(page);
2584 }
2585 }
2586
2587 /*
2588 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
2589 * called from a page fault handler when a page is first dirtied. Hence we must
2590 * be careful to check for EOF conditions here. We set the page up correctly
2591 * for a written page which means we get ENOSPC checking when writing into
2592 * holes and correct delalloc and unwritten extent mapping on filesystems that
2593 * support these features.
2594 *
2595 * We are not allowed to take the i_mutex here so we have to play games to
2596 * protect against truncate races as the page could now be beyond EOF. Because
2597 * vmtruncate() writes the inode size before removing pages, once we have the
2598 * page lock we can determine safely if the page is beyond EOF. If it is not
2599 * beyond EOF, then the page is guaranteed safe against truncation until we
2600 * unlock the page.
2601 */
2602 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
2603 {
2604 struct inode *inode = fdentry(vma->vm_file)->d_inode;
2605 struct btrfs_root *root = BTRFS_I(inode)->root;
2606 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2607 struct btrfs_ordered_extent *ordered;
2608 char *kaddr;
2609 unsigned long zero_start;
2610 loff_t size;
2611 int ret;
2612 u64 page_start;
2613 u64 page_end;
2614
2615 ret = btrfs_check_free_space(root, PAGE_CACHE_SIZE, 0);
2616 if (ret)
2617 goto out;
2618
2619 ret = -EINVAL;
2620 again:
2621 lock_page(page);
2622 size = i_size_read(inode);
2623 page_start = page_offset(page);
2624 page_end = page_start + PAGE_CACHE_SIZE - 1;
2625
2626 if ((page->mapping != inode->i_mapping) ||
2627 (page_start >= size)) {
2628 /* page got truncated out from underneath us */
2629 goto out_unlock;
2630 }
2631 wait_on_page_writeback(page);
2632
2633 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2634 set_page_extent_mapped(page);
2635
2636 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2637 if (ordered) {
2638 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2639 unlock_page(page);
2640 btrfs_wait_ordered_extent(inode, ordered);
2641 btrfs_put_ordered_extent(ordered);
2642 goto again;
2643 }
2644
2645 set_extent_delalloc(&BTRFS_I(inode)->io_tree, page_start,
2646 page_end, GFP_NOFS);
2647 ret = 0;
2648
2649 /* page is wholly or partially inside EOF */
2650 if (page_start + PAGE_CACHE_SIZE > size)
2651 zero_start = size & ~PAGE_CACHE_MASK;
2652 else
2653 zero_start = PAGE_CACHE_SIZE;
2654
2655 if (zero_start != PAGE_CACHE_SIZE) {
2656 kaddr = kmap(page);
2657 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
2658 flush_dcache_page(page);
2659 kunmap(page);
2660 }
2661 set_page_dirty(page);
2662 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2663
2664 out_unlock:
2665 unlock_page(page);
2666 out:
2667 return ret;
2668 }
2669
2670 static void btrfs_truncate(struct inode *inode)
2671 {
2672 struct btrfs_root *root = BTRFS_I(inode)->root;
2673 int ret;
2674 struct btrfs_trans_handle *trans;
2675 unsigned long nr;
2676
2677 if (!S_ISREG(inode->i_mode))
2678 return;
2679 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
2680 return;
2681
2682 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2683
2684 trans = btrfs_start_transaction(root, 1);
2685 btrfs_set_trans_block_group(trans, inode);
2686
2687 /* FIXME, add redo link to tree so we don't leak on crash */
2688 ret = btrfs_truncate_in_trans(trans, root, inode,
2689 BTRFS_EXTENT_DATA_KEY);
2690 btrfs_update_inode(trans, root, inode);
2691 nr = trans->blocks_used;
2692
2693 ret = btrfs_end_transaction_throttle(trans, root);
2694 BUG_ON(ret);
2695 btrfs_btree_balance_dirty(root, nr);
2696 }
2697
2698 /*
2699 * Invalidate a single dcache entry at the root of the filesystem.
2700 * Needed after creation of snapshot or subvolume.
2701 */
2702 void btrfs_invalidate_dcache_root(struct btrfs_root *root, char *name,
2703 int namelen)
2704 {
2705 struct dentry *alias, *entry;
2706 struct qstr qstr;
2707
2708 alias = d_find_alias(root->fs_info->sb->s_root->d_inode);
2709 if (alias) {
2710 qstr.name = name;
2711 qstr.len = namelen;
2712 /* change me if btrfs ever gets a d_hash operation */
2713 qstr.hash = full_name_hash(qstr.name, qstr.len);
2714 entry = d_lookup(alias, &qstr);
2715 dput(alias);
2716 if (entry) {
2717 d_invalidate(entry);
2718 dput(entry);
2719 }
2720 }
2721 }
2722
2723 int btrfs_create_subvol_root(struct btrfs_root *new_root,
2724 struct btrfs_trans_handle *trans, u64 new_dirid,
2725 struct btrfs_block_group_cache *block_group)
2726 {
2727 struct inode *inode;
2728 int ret;
2729
2730 inode = btrfs_new_inode(trans, new_root, "..", 2, new_dirid,
2731 new_dirid, block_group, S_IFDIR | 0700);
2732 if (IS_ERR(inode))
2733 return PTR_ERR(inode);
2734 inode->i_op = &btrfs_dir_inode_operations;
2735 inode->i_fop = &btrfs_dir_file_operations;
2736 new_root->inode = inode;
2737
2738 ret = btrfs_insert_inode_ref(trans, new_root, "..", 2, new_dirid,
2739 new_dirid);
2740 inode->i_nlink = 1;
2741 inode->i_size = 0;
2742
2743 return btrfs_update_inode(trans, new_root, inode);
2744 }
2745
2746 unsigned long btrfs_force_ra(struct address_space *mapping,
2747 struct file_ra_state *ra, struct file *file,
2748 pgoff_t offset, pgoff_t last_index)
2749 {
2750 pgoff_t req_size = last_index - offset + 1;
2751
2752 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2753 offset = page_cache_readahead(mapping, ra, file, offset, req_size);
2754 return offset;
2755 #else
2756 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
2757 return offset + req_size;
2758 #endif
2759 }
2760
2761 struct inode *btrfs_alloc_inode(struct super_block *sb)
2762 {
2763 struct btrfs_inode *ei;
2764
2765 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
2766 if (!ei)
2767 return NULL;
2768 ei->last_trans = 0;
2769 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
2770 return &ei->vfs_inode;
2771 }
2772
2773 void btrfs_destroy_inode(struct inode *inode)
2774 {
2775 struct btrfs_ordered_extent *ordered;
2776 WARN_ON(!list_empty(&inode->i_dentry));
2777 WARN_ON(inode->i_data.nrpages);
2778
2779 while(1) {
2780 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
2781 if (!ordered)
2782 break;
2783 else {
2784 printk("found ordered extent %Lu %Lu\n",
2785 ordered->file_offset, ordered->len);
2786 btrfs_remove_ordered_extent(inode, ordered);
2787 btrfs_put_ordered_extent(ordered);
2788 btrfs_put_ordered_extent(ordered);
2789 }
2790 }
2791 btrfs_drop_extent_cache(inode, 0, (u64)-1);
2792 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
2793 }
2794
2795 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2796 static void init_once(struct kmem_cache * cachep, void *foo)
2797 #else
2798 static void init_once(void * foo, struct kmem_cache * cachep,
2799 unsigned long flags)
2800 #endif
2801 {
2802 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
2803
2804 inode_init_once(&ei->vfs_inode);
2805 }
2806
2807 void btrfs_destroy_cachep(void)
2808 {
2809 if (btrfs_inode_cachep)
2810 kmem_cache_destroy(btrfs_inode_cachep);
2811 if (btrfs_trans_handle_cachep)
2812 kmem_cache_destroy(btrfs_trans_handle_cachep);
2813 if (btrfs_transaction_cachep)
2814 kmem_cache_destroy(btrfs_transaction_cachep);
2815 if (btrfs_bit_radix_cachep)
2816 kmem_cache_destroy(btrfs_bit_radix_cachep);
2817 if (btrfs_path_cachep)
2818 kmem_cache_destroy(btrfs_path_cachep);
2819 }
2820
2821 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
2822 unsigned long extra_flags,
2823 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2824 void (*ctor)(struct kmem_cache *, void *)
2825 #else
2826 void (*ctor)(void *, struct kmem_cache *,
2827 unsigned long)
2828 #endif
2829 )
2830 {
2831 return kmem_cache_create(name, size, 0, (SLAB_RECLAIM_ACCOUNT |
2832 SLAB_MEM_SPREAD | extra_flags), ctor
2833 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
2834 ,NULL
2835 #endif
2836 );
2837 }
2838
2839 int btrfs_init_cachep(void)
2840 {
2841 btrfs_inode_cachep = btrfs_cache_create("btrfs_inode_cache",
2842 sizeof(struct btrfs_inode),
2843 0, init_once);
2844 if (!btrfs_inode_cachep)
2845 goto fail;
2846 btrfs_trans_handle_cachep =
2847 btrfs_cache_create("btrfs_trans_handle_cache",
2848 sizeof(struct btrfs_trans_handle),
2849 0, NULL);
2850 if (!btrfs_trans_handle_cachep)
2851 goto fail;
2852 btrfs_transaction_cachep = btrfs_cache_create("btrfs_transaction_cache",
2853 sizeof(struct btrfs_transaction),
2854 0, NULL);
2855 if (!btrfs_transaction_cachep)
2856 goto fail;
2857 btrfs_path_cachep = btrfs_cache_create("btrfs_path_cache",
2858 sizeof(struct btrfs_path),
2859 0, NULL);
2860 if (!btrfs_path_cachep)
2861 goto fail;
2862 btrfs_bit_radix_cachep = btrfs_cache_create("btrfs_radix", 256,
2863 SLAB_DESTROY_BY_RCU, NULL);
2864 if (!btrfs_bit_radix_cachep)
2865 goto fail;
2866 return 0;
2867 fail:
2868 btrfs_destroy_cachep();
2869 return -ENOMEM;
2870 }
2871
2872 static int btrfs_getattr(struct vfsmount *mnt,
2873 struct dentry *dentry, struct kstat *stat)
2874 {
2875 struct inode *inode = dentry->d_inode;
2876 generic_fillattr(inode, stat);
2877 stat->blksize = PAGE_CACHE_SIZE;
2878 stat->blocks = inode->i_blocks + (BTRFS_I(inode)->delalloc_bytes >> 9);
2879 return 0;
2880 }
2881
2882 static int btrfs_rename(struct inode * old_dir, struct dentry *old_dentry,
2883 struct inode * new_dir,struct dentry *new_dentry)
2884 {
2885 struct btrfs_trans_handle *trans;
2886 struct btrfs_root *root = BTRFS_I(old_dir)->root;
2887 struct inode *new_inode = new_dentry->d_inode;
2888 struct inode *old_inode = old_dentry->d_inode;
2889 struct timespec ctime = CURRENT_TIME;
2890 int ret;
2891
2892 if (S_ISDIR(old_inode->i_mode) && new_inode &&
2893 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
2894 return -ENOTEMPTY;
2895 }
2896
2897 ret = btrfs_check_free_space(root, 1, 0);
2898 if (ret)
2899 goto out_unlock;
2900
2901 trans = btrfs_start_transaction(root, 1);
2902
2903 btrfs_set_trans_block_group(trans, new_dir);
2904
2905 old_dentry->d_inode->i_nlink++;
2906 old_dir->i_ctime = old_dir->i_mtime = ctime;
2907 new_dir->i_ctime = new_dir->i_mtime = ctime;
2908 old_inode->i_ctime = ctime;
2909
2910 ret = btrfs_unlink_trans(trans, root, old_dir, old_dentry);
2911 if (ret)
2912 goto out_fail;
2913
2914 if (new_inode) {
2915 new_inode->i_ctime = CURRENT_TIME;
2916 ret = btrfs_unlink_trans(trans, root, new_dir, new_dentry);
2917 if (ret)
2918 goto out_fail;
2919 }
2920 ret = btrfs_add_link(trans, new_dentry, old_inode, 1);
2921 if (ret)
2922 goto out_fail;
2923
2924 out_fail:
2925 btrfs_end_transaction(trans, root);
2926 out_unlock:
2927 return ret;
2928 }
2929
2930 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
2931 const char *symname)
2932 {
2933 struct btrfs_trans_handle *trans;
2934 struct btrfs_root *root = BTRFS_I(dir)->root;
2935 struct btrfs_path *path;
2936 struct btrfs_key key;
2937 struct inode *inode = NULL;
2938 int err;
2939 int drop_inode = 0;
2940 u64 objectid;
2941 int name_len;
2942 int datasize;
2943 unsigned long ptr;
2944 struct btrfs_file_extent_item *ei;
2945 struct extent_buffer *leaf;
2946 unsigned long nr = 0;
2947
2948 name_len = strlen(symname) + 1;
2949 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
2950 return -ENAMETOOLONG;
2951
2952 err = btrfs_check_free_space(root, 1, 0);
2953 if (err)
2954 goto out_fail;
2955
2956 trans = btrfs_start_transaction(root, 1);
2957 btrfs_set_trans_block_group(trans, dir);
2958
2959 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
2960 if (err) {
2961 err = -ENOSPC;
2962 goto out_unlock;
2963 }
2964
2965 inode = btrfs_new_inode(trans, root, dentry->d_name.name,
2966 dentry->d_name.len,
2967 dentry->d_parent->d_inode->i_ino, objectid,
2968 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO);
2969 err = PTR_ERR(inode);
2970 if (IS_ERR(inode))
2971 goto out_unlock;
2972
2973 btrfs_set_trans_block_group(trans, inode);
2974 err = btrfs_add_nondir(trans, dentry, inode, 0);
2975 if (err)
2976 drop_inode = 1;
2977 else {
2978 inode->i_mapping->a_ops = &btrfs_aops;
2979 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2980 inode->i_fop = &btrfs_file_operations;
2981 inode->i_op = &btrfs_file_inode_operations;
2982 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
2983 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
2984 inode->i_mapping, GFP_NOFS);
2985 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
2986 inode->i_mapping, GFP_NOFS);
2987 mutex_init(&BTRFS_I(inode)->csum_mutex);
2988 BTRFS_I(inode)->delalloc_bytes = 0;
2989 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2990 }
2991 dir->i_sb->s_dirt = 1;
2992 btrfs_update_inode_block_group(trans, inode);
2993 btrfs_update_inode_block_group(trans, dir);
2994 if (drop_inode)
2995 goto out_unlock;
2996
2997 path = btrfs_alloc_path();
2998 BUG_ON(!path);
2999 key.objectid = inode->i_ino;
3000 key.offset = 0;
3001 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
3002 datasize = btrfs_file_extent_calc_inline_size(name_len);
3003 err = btrfs_insert_empty_item(trans, root, path, &key,
3004 datasize);
3005 if (err) {
3006 drop_inode = 1;
3007 goto out_unlock;
3008 }
3009 leaf = path->nodes[0];
3010 ei = btrfs_item_ptr(leaf, path->slots[0],
3011 struct btrfs_file_extent_item);
3012 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
3013 btrfs_set_file_extent_type(leaf, ei,
3014 BTRFS_FILE_EXTENT_INLINE);
3015 ptr = btrfs_file_extent_inline_start(ei);
3016 write_extent_buffer(leaf, symname, ptr, name_len);
3017 btrfs_mark_buffer_dirty(leaf);
3018 btrfs_free_path(path);
3019
3020 inode->i_op = &btrfs_symlink_inode_operations;
3021 inode->i_mapping->a_ops = &btrfs_symlink_aops;
3022 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3023 inode->i_size = name_len - 1;
3024 err = btrfs_update_inode(trans, root, inode);
3025 if (err)
3026 drop_inode = 1;
3027
3028 out_unlock:
3029 nr = trans->blocks_used;
3030 btrfs_end_transaction_throttle(trans, root);
3031 out_fail:
3032 if (drop_inode) {
3033 inode_dec_link_count(inode);
3034 iput(inode);
3035 }
3036 btrfs_btree_balance_dirty(root, nr);
3037 return err;
3038 }
3039
3040 static int btrfs_set_page_dirty(struct page *page)
3041 {
3042 struct inode *inode = page->mapping->host;
3043 u64 page_start = page_offset(page);
3044 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
3045
3046 if (!test_range_bit(&BTRFS_I(inode)->io_tree, page_start, page_end,
3047 EXTENT_DELALLOC, 0)) {
3048 printk("inode %lu page %Lu not delalloc\n", inode->i_ino, page_offset(page));
3049 WARN_ON(1);
3050 }
3051 return __set_page_dirty_nobuffers(page);
3052 }
3053
3054 static int btrfs_permission(struct inode *inode, int mask,
3055 struct nameidata *nd)
3056 {
3057 if (btrfs_test_flag(inode, READONLY) && (mask & MAY_WRITE))
3058 return -EACCES;
3059 return generic_permission(inode, mask, NULL);
3060 }
3061
3062 static struct inode_operations btrfs_dir_inode_operations = {
3063 .lookup = btrfs_lookup,
3064 .create = btrfs_create,
3065 .unlink = btrfs_unlink,
3066 .link = btrfs_link,
3067 .mkdir = btrfs_mkdir,
3068 .rmdir = btrfs_rmdir,
3069 .rename = btrfs_rename,
3070 .symlink = btrfs_symlink,
3071 .setattr = btrfs_setattr,
3072 .mknod = btrfs_mknod,
3073 .setxattr = generic_setxattr,
3074 .getxattr = generic_getxattr,
3075 .listxattr = btrfs_listxattr,
3076 .removexattr = generic_removexattr,
3077 .permission = btrfs_permission,
3078 };
3079 static struct inode_operations btrfs_dir_ro_inode_operations = {
3080 .lookup = btrfs_lookup,
3081 .permission = btrfs_permission,
3082 };
3083 static struct file_operations btrfs_dir_file_operations = {
3084 .llseek = generic_file_llseek,
3085 .read = generic_read_dir,
3086 .readdir = btrfs_readdir,
3087 .unlocked_ioctl = btrfs_ioctl,
3088 #ifdef CONFIG_COMPAT
3089 .compat_ioctl = btrfs_ioctl,
3090 #endif
3091 .release = btrfs_release_file,
3092 };
3093
3094 static struct extent_io_ops btrfs_extent_io_ops = {
3095 .fill_delalloc = run_delalloc_range,
3096 .submit_bio_hook = btrfs_submit_bio_hook,
3097 .merge_bio_hook = btrfs_merge_bio_hook,
3098 .readpage_io_hook = btrfs_readpage_io_hook,
3099 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
3100 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
3101 .readpage_io_failed_hook = btrfs_io_failed_hook,
3102 .set_bit_hook = btrfs_set_bit_hook,
3103 .clear_bit_hook = btrfs_clear_bit_hook,
3104 };
3105
3106 static struct address_space_operations btrfs_aops = {
3107 .readpage = btrfs_readpage,
3108 .writepage = btrfs_writepage,
3109 .writepages = btrfs_writepages,
3110 .readpages = btrfs_readpages,
3111 .sync_page = block_sync_page,
3112 .bmap = btrfs_bmap,
3113 .direct_IO = btrfs_direct_IO,
3114 .invalidatepage = btrfs_invalidatepage,
3115 .releasepage = btrfs_releasepage,
3116 .set_page_dirty = btrfs_set_page_dirty,
3117 };
3118
3119 static struct address_space_operations btrfs_symlink_aops = {
3120 .readpage = btrfs_readpage,
3121 .writepage = btrfs_writepage,
3122 .invalidatepage = btrfs_invalidatepage,
3123 .releasepage = btrfs_releasepage,
3124 };
3125
3126 static struct inode_operations btrfs_file_inode_operations = {
3127 .truncate = btrfs_truncate,
3128 .getattr = btrfs_getattr,
3129 .setattr = btrfs_setattr,
3130 .setxattr = generic_setxattr,
3131 .getxattr = generic_getxattr,
3132 .listxattr = btrfs_listxattr,
3133 .removexattr = generic_removexattr,
3134 .permission = btrfs_permission,
3135 };
3136 static struct inode_operations btrfs_special_inode_operations = {
3137 .getattr = btrfs_getattr,
3138 .setattr = btrfs_setattr,
3139 .permission = btrfs_permission,
3140 };
3141 static struct inode_operations btrfs_symlink_inode_operations = {
3142 .readlink = generic_readlink,
3143 .follow_link = page_follow_link_light,
3144 .put_link = page_put_link,
3145 .permission = btrfs_permission,
3146 };
This page took 0.112514 seconds and 6 git commands to generate.