Btrfs: do not reuse objectid of deleted snapshot/subvol
[deliverable/linux.git] / fs / btrfs / inode.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include "compat.h"
40 #include "ctree.h"
41 #include "disk-io.h"
42 #include "transaction.h"
43 #include "btrfs_inode.h"
44 #include "ioctl.h"
45 #include "print-tree.h"
46 #include "volumes.h"
47 #include "ordered-data.h"
48 #include "xattr.h"
49 #include "tree-log.h"
50 #include "compression.h"
51 #include "locking.h"
52
53 struct btrfs_iget_args {
54 u64 ino;
55 struct btrfs_root *root;
56 };
57
58 static struct inode_operations btrfs_dir_inode_operations;
59 static struct inode_operations btrfs_symlink_inode_operations;
60 static struct inode_operations btrfs_dir_ro_inode_operations;
61 static struct inode_operations btrfs_special_inode_operations;
62 static struct inode_operations btrfs_file_inode_operations;
63 static struct address_space_operations btrfs_aops;
64 static struct address_space_operations btrfs_symlink_aops;
65 static struct file_operations btrfs_dir_file_operations;
66 static struct extent_io_ops btrfs_extent_io_ops;
67
68 static struct kmem_cache *btrfs_inode_cachep;
69 struct kmem_cache *btrfs_trans_handle_cachep;
70 struct kmem_cache *btrfs_transaction_cachep;
71 struct kmem_cache *btrfs_path_cachep;
72
73 #define S_SHIFT 12
74 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
75 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
76 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
77 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
78 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
79 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
80 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
81 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
82 };
83
84 static void btrfs_truncate(struct inode *inode);
85 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
86 static noinline int cow_file_range(struct inode *inode,
87 struct page *locked_page,
88 u64 start, u64 end, int *page_started,
89 unsigned long *nr_written, int unlock);
90
91 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
92 {
93 int err;
94
95 err = btrfs_init_acl(inode, dir);
96 if (!err)
97 err = btrfs_xattr_security_init(inode, dir);
98 return err;
99 }
100
101 /*
102 * this does all the hard work for inserting an inline extent into
103 * the btree. The caller should have done a btrfs_drop_extents so that
104 * no overlapping inline items exist in the btree
105 */
106 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root, struct inode *inode,
108 u64 start, size_t size, size_t compressed_size,
109 struct page **compressed_pages)
110 {
111 struct btrfs_key key;
112 struct btrfs_path *path;
113 struct extent_buffer *leaf;
114 struct page *page = NULL;
115 char *kaddr;
116 unsigned long ptr;
117 struct btrfs_file_extent_item *ei;
118 int err = 0;
119 int ret;
120 size_t cur_size = size;
121 size_t datasize;
122 unsigned long offset;
123 int use_compress = 0;
124
125 if (compressed_size && compressed_pages) {
126 use_compress = 1;
127 cur_size = compressed_size;
128 }
129
130 path = btrfs_alloc_path();
131 if (!path)
132 return -ENOMEM;
133
134 path->leave_spinning = 1;
135 btrfs_set_trans_block_group(trans, inode);
136
137 key.objectid = inode->i_ino;
138 key.offset = start;
139 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
140 datasize = btrfs_file_extent_calc_inline_size(cur_size);
141
142 inode_add_bytes(inode, size);
143 ret = btrfs_insert_empty_item(trans, root, path, &key,
144 datasize);
145 BUG_ON(ret);
146 if (ret) {
147 err = ret;
148 goto fail;
149 }
150 leaf = path->nodes[0];
151 ei = btrfs_item_ptr(leaf, path->slots[0],
152 struct btrfs_file_extent_item);
153 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
154 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
155 btrfs_set_file_extent_encryption(leaf, ei, 0);
156 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
157 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
158 ptr = btrfs_file_extent_inline_start(ei);
159
160 if (use_compress) {
161 struct page *cpage;
162 int i = 0;
163 while (compressed_size > 0) {
164 cpage = compressed_pages[i];
165 cur_size = min_t(unsigned long, compressed_size,
166 PAGE_CACHE_SIZE);
167
168 kaddr = kmap_atomic(cpage, KM_USER0);
169 write_extent_buffer(leaf, kaddr, ptr, cur_size);
170 kunmap_atomic(kaddr, KM_USER0);
171
172 i++;
173 ptr += cur_size;
174 compressed_size -= cur_size;
175 }
176 btrfs_set_file_extent_compression(leaf, ei,
177 BTRFS_COMPRESS_ZLIB);
178 } else {
179 page = find_get_page(inode->i_mapping,
180 start >> PAGE_CACHE_SHIFT);
181 btrfs_set_file_extent_compression(leaf, ei, 0);
182 kaddr = kmap_atomic(page, KM_USER0);
183 offset = start & (PAGE_CACHE_SIZE - 1);
184 write_extent_buffer(leaf, kaddr + offset, ptr, size);
185 kunmap_atomic(kaddr, KM_USER0);
186 page_cache_release(page);
187 }
188 btrfs_mark_buffer_dirty(leaf);
189 btrfs_free_path(path);
190
191 BTRFS_I(inode)->disk_i_size = inode->i_size;
192 btrfs_update_inode(trans, root, inode);
193 return 0;
194 fail:
195 btrfs_free_path(path);
196 return err;
197 }
198
199
200 /*
201 * conditionally insert an inline extent into the file. This
202 * does the checks required to make sure the data is small enough
203 * to fit as an inline extent.
204 */
205 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
206 struct btrfs_root *root,
207 struct inode *inode, u64 start, u64 end,
208 size_t compressed_size,
209 struct page **compressed_pages)
210 {
211 u64 isize = i_size_read(inode);
212 u64 actual_end = min(end + 1, isize);
213 u64 inline_len = actual_end - start;
214 u64 aligned_end = (end + root->sectorsize - 1) &
215 ~((u64)root->sectorsize - 1);
216 u64 hint_byte;
217 u64 data_len = inline_len;
218 int ret;
219
220 if (compressed_size)
221 data_len = compressed_size;
222
223 if (start > 0 ||
224 actual_end >= PAGE_CACHE_SIZE ||
225 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
226 (!compressed_size &&
227 (actual_end & (root->sectorsize - 1)) == 0) ||
228 end + 1 < isize ||
229 data_len > root->fs_info->max_inline) {
230 return 1;
231 }
232
233 ret = btrfs_drop_extents(trans, root, inode, start,
234 aligned_end, aligned_end, start,
235 &hint_byte, 1);
236 BUG_ON(ret);
237
238 if (isize > actual_end)
239 inline_len = min_t(u64, isize, actual_end);
240 ret = insert_inline_extent(trans, root, inode, start,
241 inline_len, compressed_size,
242 compressed_pages);
243 BUG_ON(ret);
244 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
245 return 0;
246 }
247
248 struct async_extent {
249 u64 start;
250 u64 ram_size;
251 u64 compressed_size;
252 struct page **pages;
253 unsigned long nr_pages;
254 struct list_head list;
255 };
256
257 struct async_cow {
258 struct inode *inode;
259 struct btrfs_root *root;
260 struct page *locked_page;
261 u64 start;
262 u64 end;
263 struct list_head extents;
264 struct btrfs_work work;
265 };
266
267 static noinline int add_async_extent(struct async_cow *cow,
268 u64 start, u64 ram_size,
269 u64 compressed_size,
270 struct page **pages,
271 unsigned long nr_pages)
272 {
273 struct async_extent *async_extent;
274
275 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
276 async_extent->start = start;
277 async_extent->ram_size = ram_size;
278 async_extent->compressed_size = compressed_size;
279 async_extent->pages = pages;
280 async_extent->nr_pages = nr_pages;
281 list_add_tail(&async_extent->list, &cow->extents);
282 return 0;
283 }
284
285 /*
286 * we create compressed extents in two phases. The first
287 * phase compresses a range of pages that have already been
288 * locked (both pages and state bits are locked).
289 *
290 * This is done inside an ordered work queue, and the compression
291 * is spread across many cpus. The actual IO submission is step
292 * two, and the ordered work queue takes care of making sure that
293 * happens in the same order things were put onto the queue by
294 * writepages and friends.
295 *
296 * If this code finds it can't get good compression, it puts an
297 * entry onto the work queue to write the uncompressed bytes. This
298 * makes sure that both compressed inodes and uncompressed inodes
299 * are written in the same order that pdflush sent them down.
300 */
301 static noinline int compress_file_range(struct inode *inode,
302 struct page *locked_page,
303 u64 start, u64 end,
304 struct async_cow *async_cow,
305 int *num_added)
306 {
307 struct btrfs_root *root = BTRFS_I(inode)->root;
308 struct btrfs_trans_handle *trans;
309 u64 num_bytes;
310 u64 orig_start;
311 u64 disk_num_bytes;
312 u64 blocksize = root->sectorsize;
313 u64 actual_end;
314 u64 isize = i_size_read(inode);
315 int ret = 0;
316 struct page **pages = NULL;
317 unsigned long nr_pages;
318 unsigned long nr_pages_ret = 0;
319 unsigned long total_compressed = 0;
320 unsigned long total_in = 0;
321 unsigned long max_compressed = 128 * 1024;
322 unsigned long max_uncompressed = 128 * 1024;
323 int i;
324 int will_compress;
325
326 orig_start = start;
327
328 actual_end = min_t(u64, isize, end + 1);
329 again:
330 will_compress = 0;
331 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
332 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
333
334 /*
335 * we don't want to send crud past the end of i_size through
336 * compression, that's just a waste of CPU time. So, if the
337 * end of the file is before the start of our current
338 * requested range of bytes, we bail out to the uncompressed
339 * cleanup code that can deal with all of this.
340 *
341 * It isn't really the fastest way to fix things, but this is a
342 * very uncommon corner.
343 */
344 if (actual_end <= start)
345 goto cleanup_and_bail_uncompressed;
346
347 total_compressed = actual_end - start;
348
349 /* we want to make sure that amount of ram required to uncompress
350 * an extent is reasonable, so we limit the total size in ram
351 * of a compressed extent to 128k. This is a crucial number
352 * because it also controls how easily we can spread reads across
353 * cpus for decompression.
354 *
355 * We also want to make sure the amount of IO required to do
356 * a random read is reasonably small, so we limit the size of
357 * a compressed extent to 128k.
358 */
359 total_compressed = min(total_compressed, max_uncompressed);
360 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
361 num_bytes = max(blocksize, num_bytes);
362 disk_num_bytes = num_bytes;
363 total_in = 0;
364 ret = 0;
365
366 /*
367 * we do compression for mount -o compress and when the
368 * inode has not been flagged as nocompress. This flag can
369 * change at any time if we discover bad compression ratios.
370 */
371 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
372 btrfs_test_opt(root, COMPRESS)) {
373 WARN_ON(pages);
374 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
375
376 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
377 total_compressed, pages,
378 nr_pages, &nr_pages_ret,
379 &total_in,
380 &total_compressed,
381 max_compressed);
382
383 if (!ret) {
384 unsigned long offset = total_compressed &
385 (PAGE_CACHE_SIZE - 1);
386 struct page *page = pages[nr_pages_ret - 1];
387 char *kaddr;
388
389 /* zero the tail end of the last page, we might be
390 * sending it down to disk
391 */
392 if (offset) {
393 kaddr = kmap_atomic(page, KM_USER0);
394 memset(kaddr + offset, 0,
395 PAGE_CACHE_SIZE - offset);
396 kunmap_atomic(kaddr, KM_USER0);
397 }
398 will_compress = 1;
399 }
400 }
401 if (start == 0) {
402 trans = btrfs_join_transaction(root, 1);
403 BUG_ON(!trans);
404 btrfs_set_trans_block_group(trans, inode);
405
406 /* lets try to make an inline extent */
407 if (ret || total_in < (actual_end - start)) {
408 /* we didn't compress the entire range, try
409 * to make an uncompressed inline extent.
410 */
411 ret = cow_file_range_inline(trans, root, inode,
412 start, end, 0, NULL);
413 } else {
414 /* try making a compressed inline extent */
415 ret = cow_file_range_inline(trans, root, inode,
416 start, end,
417 total_compressed, pages);
418 }
419 btrfs_end_transaction(trans, root);
420 if (ret == 0) {
421 /*
422 * inline extent creation worked, we don't need
423 * to create any more async work items. Unlock
424 * and free up our temp pages.
425 */
426 extent_clear_unlock_delalloc(inode,
427 &BTRFS_I(inode)->io_tree,
428 start, end, NULL, 1, 0,
429 0, 1, 1, 1, 0);
430 ret = 0;
431 goto free_pages_out;
432 }
433 }
434
435 if (will_compress) {
436 /*
437 * we aren't doing an inline extent round the compressed size
438 * up to a block size boundary so the allocator does sane
439 * things
440 */
441 total_compressed = (total_compressed + blocksize - 1) &
442 ~(blocksize - 1);
443
444 /*
445 * one last check to make sure the compression is really a
446 * win, compare the page count read with the blocks on disk
447 */
448 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
449 ~(PAGE_CACHE_SIZE - 1);
450 if (total_compressed >= total_in) {
451 will_compress = 0;
452 } else {
453 disk_num_bytes = total_compressed;
454 num_bytes = total_in;
455 }
456 }
457 if (!will_compress && pages) {
458 /*
459 * the compression code ran but failed to make things smaller,
460 * free any pages it allocated and our page pointer array
461 */
462 for (i = 0; i < nr_pages_ret; i++) {
463 WARN_ON(pages[i]->mapping);
464 page_cache_release(pages[i]);
465 }
466 kfree(pages);
467 pages = NULL;
468 total_compressed = 0;
469 nr_pages_ret = 0;
470
471 /* flag the file so we don't compress in the future */
472 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
473 }
474 if (will_compress) {
475 *num_added += 1;
476
477 /* the async work queues will take care of doing actual
478 * allocation on disk for these compressed pages,
479 * and will submit them to the elevator.
480 */
481 add_async_extent(async_cow, start, num_bytes,
482 total_compressed, pages, nr_pages_ret);
483
484 if (start + num_bytes < end && start + num_bytes < actual_end) {
485 start += num_bytes;
486 pages = NULL;
487 cond_resched();
488 goto again;
489 }
490 } else {
491 cleanup_and_bail_uncompressed:
492 /*
493 * No compression, but we still need to write the pages in
494 * the file we've been given so far. redirty the locked
495 * page if it corresponds to our extent and set things up
496 * for the async work queue to run cow_file_range to do
497 * the normal delalloc dance
498 */
499 if (page_offset(locked_page) >= start &&
500 page_offset(locked_page) <= end) {
501 __set_page_dirty_nobuffers(locked_page);
502 /* unlocked later on in the async handlers */
503 }
504 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
505 *num_added += 1;
506 }
507
508 out:
509 return 0;
510
511 free_pages_out:
512 for (i = 0; i < nr_pages_ret; i++) {
513 WARN_ON(pages[i]->mapping);
514 page_cache_release(pages[i]);
515 }
516 kfree(pages);
517
518 goto out;
519 }
520
521 /*
522 * phase two of compressed writeback. This is the ordered portion
523 * of the code, which only gets called in the order the work was
524 * queued. We walk all the async extents created by compress_file_range
525 * and send them down to the disk.
526 */
527 static noinline int submit_compressed_extents(struct inode *inode,
528 struct async_cow *async_cow)
529 {
530 struct async_extent *async_extent;
531 u64 alloc_hint = 0;
532 struct btrfs_trans_handle *trans;
533 struct btrfs_key ins;
534 struct extent_map *em;
535 struct btrfs_root *root = BTRFS_I(inode)->root;
536 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
537 struct extent_io_tree *io_tree;
538 int ret;
539
540 if (list_empty(&async_cow->extents))
541 return 0;
542
543 trans = btrfs_join_transaction(root, 1);
544
545 while (!list_empty(&async_cow->extents)) {
546 async_extent = list_entry(async_cow->extents.next,
547 struct async_extent, list);
548 list_del(&async_extent->list);
549
550 io_tree = &BTRFS_I(inode)->io_tree;
551
552 /* did the compression code fall back to uncompressed IO? */
553 if (!async_extent->pages) {
554 int page_started = 0;
555 unsigned long nr_written = 0;
556
557 lock_extent(io_tree, async_extent->start,
558 async_extent->start +
559 async_extent->ram_size - 1, GFP_NOFS);
560
561 /* allocate blocks */
562 cow_file_range(inode, async_cow->locked_page,
563 async_extent->start,
564 async_extent->start +
565 async_extent->ram_size - 1,
566 &page_started, &nr_written, 0);
567
568 /*
569 * if page_started, cow_file_range inserted an
570 * inline extent and took care of all the unlocking
571 * and IO for us. Otherwise, we need to submit
572 * all those pages down to the drive.
573 */
574 if (!page_started)
575 extent_write_locked_range(io_tree,
576 inode, async_extent->start,
577 async_extent->start +
578 async_extent->ram_size - 1,
579 btrfs_get_extent,
580 WB_SYNC_ALL);
581 kfree(async_extent);
582 cond_resched();
583 continue;
584 }
585
586 lock_extent(io_tree, async_extent->start,
587 async_extent->start + async_extent->ram_size - 1,
588 GFP_NOFS);
589 /*
590 * here we're doing allocation and writeback of the
591 * compressed pages
592 */
593 btrfs_drop_extent_cache(inode, async_extent->start,
594 async_extent->start +
595 async_extent->ram_size - 1, 0);
596
597 ret = btrfs_reserve_extent(trans, root,
598 async_extent->compressed_size,
599 async_extent->compressed_size,
600 0, alloc_hint,
601 (u64)-1, &ins, 1);
602 BUG_ON(ret);
603 em = alloc_extent_map(GFP_NOFS);
604 em->start = async_extent->start;
605 em->len = async_extent->ram_size;
606 em->orig_start = em->start;
607
608 em->block_start = ins.objectid;
609 em->block_len = ins.offset;
610 em->bdev = root->fs_info->fs_devices->latest_bdev;
611 set_bit(EXTENT_FLAG_PINNED, &em->flags);
612 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
613
614 while (1) {
615 write_lock(&em_tree->lock);
616 ret = add_extent_mapping(em_tree, em);
617 write_unlock(&em_tree->lock);
618 if (ret != -EEXIST) {
619 free_extent_map(em);
620 break;
621 }
622 btrfs_drop_extent_cache(inode, async_extent->start,
623 async_extent->start +
624 async_extent->ram_size - 1, 0);
625 }
626
627 ret = btrfs_add_ordered_extent(inode, async_extent->start,
628 ins.objectid,
629 async_extent->ram_size,
630 ins.offset,
631 BTRFS_ORDERED_COMPRESSED);
632 BUG_ON(ret);
633
634 btrfs_end_transaction(trans, root);
635
636 /*
637 * clear dirty, set writeback and unlock the pages.
638 */
639 extent_clear_unlock_delalloc(inode,
640 &BTRFS_I(inode)->io_tree,
641 async_extent->start,
642 async_extent->start +
643 async_extent->ram_size - 1,
644 NULL, 1, 1, 0, 1, 1, 0, 0);
645
646 ret = btrfs_submit_compressed_write(inode,
647 async_extent->start,
648 async_extent->ram_size,
649 ins.objectid,
650 ins.offset, async_extent->pages,
651 async_extent->nr_pages);
652
653 BUG_ON(ret);
654 trans = btrfs_join_transaction(root, 1);
655 alloc_hint = ins.objectid + ins.offset;
656 kfree(async_extent);
657 cond_resched();
658 }
659
660 btrfs_end_transaction(trans, root);
661 return 0;
662 }
663
664 /*
665 * when extent_io.c finds a delayed allocation range in the file,
666 * the call backs end up in this code. The basic idea is to
667 * allocate extents on disk for the range, and create ordered data structs
668 * in ram to track those extents.
669 *
670 * locked_page is the page that writepage had locked already. We use
671 * it to make sure we don't do extra locks or unlocks.
672 *
673 * *page_started is set to one if we unlock locked_page and do everything
674 * required to start IO on it. It may be clean and already done with
675 * IO when we return.
676 */
677 static noinline int cow_file_range(struct inode *inode,
678 struct page *locked_page,
679 u64 start, u64 end, int *page_started,
680 unsigned long *nr_written,
681 int unlock)
682 {
683 struct btrfs_root *root = BTRFS_I(inode)->root;
684 struct btrfs_trans_handle *trans;
685 u64 alloc_hint = 0;
686 u64 num_bytes;
687 unsigned long ram_size;
688 u64 disk_num_bytes;
689 u64 cur_alloc_size;
690 u64 blocksize = root->sectorsize;
691 u64 actual_end;
692 u64 isize = i_size_read(inode);
693 struct btrfs_key ins;
694 struct extent_map *em;
695 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
696 int ret = 0;
697
698 trans = btrfs_join_transaction(root, 1);
699 BUG_ON(!trans);
700 btrfs_set_trans_block_group(trans, inode);
701
702 actual_end = min_t(u64, isize, end + 1);
703
704 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
705 num_bytes = max(blocksize, num_bytes);
706 disk_num_bytes = num_bytes;
707 ret = 0;
708
709 if (start == 0) {
710 /* lets try to make an inline extent */
711 ret = cow_file_range_inline(trans, root, inode,
712 start, end, 0, NULL);
713 if (ret == 0) {
714 extent_clear_unlock_delalloc(inode,
715 &BTRFS_I(inode)->io_tree,
716 start, end, NULL, 1, 1,
717 1, 1, 1, 1, 0);
718 *nr_written = *nr_written +
719 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
720 *page_started = 1;
721 ret = 0;
722 goto out;
723 }
724 }
725
726 BUG_ON(disk_num_bytes >
727 btrfs_super_total_bytes(&root->fs_info->super_copy));
728
729
730 read_lock(&BTRFS_I(inode)->extent_tree.lock);
731 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
732 start, num_bytes);
733 if (em) {
734 alloc_hint = em->block_start;
735 free_extent_map(em);
736 }
737 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
738 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
739
740 while (disk_num_bytes > 0) {
741 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
742 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
743 root->sectorsize, 0, alloc_hint,
744 (u64)-1, &ins, 1);
745 BUG_ON(ret);
746
747 em = alloc_extent_map(GFP_NOFS);
748 em->start = start;
749 em->orig_start = em->start;
750 ram_size = ins.offset;
751 em->len = ins.offset;
752
753 em->block_start = ins.objectid;
754 em->block_len = ins.offset;
755 em->bdev = root->fs_info->fs_devices->latest_bdev;
756 set_bit(EXTENT_FLAG_PINNED, &em->flags);
757
758 while (1) {
759 write_lock(&em_tree->lock);
760 ret = add_extent_mapping(em_tree, em);
761 write_unlock(&em_tree->lock);
762 if (ret != -EEXIST) {
763 free_extent_map(em);
764 break;
765 }
766 btrfs_drop_extent_cache(inode, start,
767 start + ram_size - 1, 0);
768 }
769
770 cur_alloc_size = ins.offset;
771 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
772 ram_size, cur_alloc_size, 0);
773 BUG_ON(ret);
774
775 if (root->root_key.objectid ==
776 BTRFS_DATA_RELOC_TREE_OBJECTID) {
777 ret = btrfs_reloc_clone_csums(inode, start,
778 cur_alloc_size);
779 BUG_ON(ret);
780 }
781
782 if (disk_num_bytes < cur_alloc_size)
783 break;
784
785 /* we're not doing compressed IO, don't unlock the first
786 * page (which the caller expects to stay locked), don't
787 * clear any dirty bits and don't set any writeback bits
788 *
789 * Do set the Private2 bit so we know this page was properly
790 * setup for writepage
791 */
792 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
793 start, start + ram_size - 1,
794 locked_page, unlock, 1,
795 1, 0, 0, 0, 1);
796 disk_num_bytes -= cur_alloc_size;
797 num_bytes -= cur_alloc_size;
798 alloc_hint = ins.objectid + ins.offset;
799 start += cur_alloc_size;
800 }
801 out:
802 ret = 0;
803 btrfs_end_transaction(trans, root);
804
805 return ret;
806 }
807
808 /*
809 * work queue call back to started compression on a file and pages
810 */
811 static noinline void async_cow_start(struct btrfs_work *work)
812 {
813 struct async_cow *async_cow;
814 int num_added = 0;
815 async_cow = container_of(work, struct async_cow, work);
816
817 compress_file_range(async_cow->inode, async_cow->locked_page,
818 async_cow->start, async_cow->end, async_cow,
819 &num_added);
820 if (num_added == 0)
821 async_cow->inode = NULL;
822 }
823
824 /*
825 * work queue call back to submit previously compressed pages
826 */
827 static noinline void async_cow_submit(struct btrfs_work *work)
828 {
829 struct async_cow *async_cow;
830 struct btrfs_root *root;
831 unsigned long nr_pages;
832
833 async_cow = container_of(work, struct async_cow, work);
834
835 root = async_cow->root;
836 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
837 PAGE_CACHE_SHIFT;
838
839 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
840
841 if (atomic_read(&root->fs_info->async_delalloc_pages) <
842 5 * 1042 * 1024 &&
843 waitqueue_active(&root->fs_info->async_submit_wait))
844 wake_up(&root->fs_info->async_submit_wait);
845
846 if (async_cow->inode)
847 submit_compressed_extents(async_cow->inode, async_cow);
848 }
849
850 static noinline void async_cow_free(struct btrfs_work *work)
851 {
852 struct async_cow *async_cow;
853 async_cow = container_of(work, struct async_cow, work);
854 kfree(async_cow);
855 }
856
857 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
858 u64 start, u64 end, int *page_started,
859 unsigned long *nr_written)
860 {
861 struct async_cow *async_cow;
862 struct btrfs_root *root = BTRFS_I(inode)->root;
863 unsigned long nr_pages;
864 u64 cur_end;
865 int limit = 10 * 1024 * 1042;
866
867 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED |
868 EXTENT_DELALLOC, 1, 0, NULL, GFP_NOFS);
869 while (start < end) {
870 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
871 async_cow->inode = inode;
872 async_cow->root = root;
873 async_cow->locked_page = locked_page;
874 async_cow->start = start;
875
876 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
877 cur_end = end;
878 else
879 cur_end = min(end, start + 512 * 1024 - 1);
880
881 async_cow->end = cur_end;
882 INIT_LIST_HEAD(&async_cow->extents);
883
884 async_cow->work.func = async_cow_start;
885 async_cow->work.ordered_func = async_cow_submit;
886 async_cow->work.ordered_free = async_cow_free;
887 async_cow->work.flags = 0;
888
889 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
890 PAGE_CACHE_SHIFT;
891 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
892
893 btrfs_queue_worker(&root->fs_info->delalloc_workers,
894 &async_cow->work);
895
896 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
897 wait_event(root->fs_info->async_submit_wait,
898 (atomic_read(&root->fs_info->async_delalloc_pages) <
899 limit));
900 }
901
902 while (atomic_read(&root->fs_info->async_submit_draining) &&
903 atomic_read(&root->fs_info->async_delalloc_pages)) {
904 wait_event(root->fs_info->async_submit_wait,
905 (atomic_read(&root->fs_info->async_delalloc_pages) ==
906 0));
907 }
908
909 *nr_written += nr_pages;
910 start = cur_end + 1;
911 }
912 *page_started = 1;
913 return 0;
914 }
915
916 static noinline int csum_exist_in_range(struct btrfs_root *root,
917 u64 bytenr, u64 num_bytes)
918 {
919 int ret;
920 struct btrfs_ordered_sum *sums;
921 LIST_HEAD(list);
922
923 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
924 bytenr + num_bytes - 1, &list);
925 if (ret == 0 && list_empty(&list))
926 return 0;
927
928 while (!list_empty(&list)) {
929 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
930 list_del(&sums->list);
931 kfree(sums);
932 }
933 return 1;
934 }
935
936 /*
937 * when nowcow writeback call back. This checks for snapshots or COW copies
938 * of the extents that exist in the file, and COWs the file as required.
939 *
940 * If no cow copies or snapshots exist, we write directly to the existing
941 * blocks on disk
942 */
943 static noinline int run_delalloc_nocow(struct inode *inode,
944 struct page *locked_page,
945 u64 start, u64 end, int *page_started, int force,
946 unsigned long *nr_written)
947 {
948 struct btrfs_root *root = BTRFS_I(inode)->root;
949 struct btrfs_trans_handle *trans;
950 struct extent_buffer *leaf;
951 struct btrfs_path *path;
952 struct btrfs_file_extent_item *fi;
953 struct btrfs_key found_key;
954 u64 cow_start;
955 u64 cur_offset;
956 u64 extent_end;
957 u64 extent_offset;
958 u64 disk_bytenr;
959 u64 num_bytes;
960 int extent_type;
961 int ret;
962 int type;
963 int nocow;
964 int check_prev = 1;
965
966 path = btrfs_alloc_path();
967 BUG_ON(!path);
968 trans = btrfs_join_transaction(root, 1);
969 BUG_ON(!trans);
970
971 cow_start = (u64)-1;
972 cur_offset = start;
973 while (1) {
974 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
975 cur_offset, 0);
976 BUG_ON(ret < 0);
977 if (ret > 0 && path->slots[0] > 0 && check_prev) {
978 leaf = path->nodes[0];
979 btrfs_item_key_to_cpu(leaf, &found_key,
980 path->slots[0] - 1);
981 if (found_key.objectid == inode->i_ino &&
982 found_key.type == BTRFS_EXTENT_DATA_KEY)
983 path->slots[0]--;
984 }
985 check_prev = 0;
986 next_slot:
987 leaf = path->nodes[0];
988 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
989 ret = btrfs_next_leaf(root, path);
990 if (ret < 0)
991 BUG_ON(1);
992 if (ret > 0)
993 break;
994 leaf = path->nodes[0];
995 }
996
997 nocow = 0;
998 disk_bytenr = 0;
999 num_bytes = 0;
1000 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1001
1002 if (found_key.objectid > inode->i_ino ||
1003 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1004 found_key.offset > end)
1005 break;
1006
1007 if (found_key.offset > cur_offset) {
1008 extent_end = found_key.offset;
1009 goto out_check;
1010 }
1011
1012 fi = btrfs_item_ptr(leaf, path->slots[0],
1013 struct btrfs_file_extent_item);
1014 extent_type = btrfs_file_extent_type(leaf, fi);
1015
1016 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1017 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1018 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1019 extent_offset = btrfs_file_extent_offset(leaf, fi);
1020 extent_end = found_key.offset +
1021 btrfs_file_extent_num_bytes(leaf, fi);
1022 if (extent_end <= start) {
1023 path->slots[0]++;
1024 goto next_slot;
1025 }
1026 if (disk_bytenr == 0)
1027 goto out_check;
1028 if (btrfs_file_extent_compression(leaf, fi) ||
1029 btrfs_file_extent_encryption(leaf, fi) ||
1030 btrfs_file_extent_other_encoding(leaf, fi))
1031 goto out_check;
1032 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1033 goto out_check;
1034 if (btrfs_extent_readonly(root, disk_bytenr))
1035 goto out_check;
1036 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1037 found_key.offset -
1038 extent_offset, disk_bytenr))
1039 goto out_check;
1040 disk_bytenr += extent_offset;
1041 disk_bytenr += cur_offset - found_key.offset;
1042 num_bytes = min(end + 1, extent_end) - cur_offset;
1043 /*
1044 * force cow if csum exists in the range.
1045 * this ensure that csum for a given extent are
1046 * either valid or do not exist.
1047 */
1048 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1049 goto out_check;
1050 nocow = 1;
1051 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1052 extent_end = found_key.offset +
1053 btrfs_file_extent_inline_len(leaf, fi);
1054 extent_end = ALIGN(extent_end, root->sectorsize);
1055 } else {
1056 BUG_ON(1);
1057 }
1058 out_check:
1059 if (extent_end <= start) {
1060 path->slots[0]++;
1061 goto next_slot;
1062 }
1063 if (!nocow) {
1064 if (cow_start == (u64)-1)
1065 cow_start = cur_offset;
1066 cur_offset = extent_end;
1067 if (cur_offset > end)
1068 break;
1069 path->slots[0]++;
1070 goto next_slot;
1071 }
1072
1073 btrfs_release_path(root, path);
1074 if (cow_start != (u64)-1) {
1075 ret = cow_file_range(inode, locked_page, cow_start,
1076 found_key.offset - 1, page_started,
1077 nr_written, 1);
1078 BUG_ON(ret);
1079 cow_start = (u64)-1;
1080 }
1081
1082 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1083 struct extent_map *em;
1084 struct extent_map_tree *em_tree;
1085 em_tree = &BTRFS_I(inode)->extent_tree;
1086 em = alloc_extent_map(GFP_NOFS);
1087 em->start = cur_offset;
1088 em->orig_start = em->start;
1089 em->len = num_bytes;
1090 em->block_len = num_bytes;
1091 em->block_start = disk_bytenr;
1092 em->bdev = root->fs_info->fs_devices->latest_bdev;
1093 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1094 while (1) {
1095 write_lock(&em_tree->lock);
1096 ret = add_extent_mapping(em_tree, em);
1097 write_unlock(&em_tree->lock);
1098 if (ret != -EEXIST) {
1099 free_extent_map(em);
1100 break;
1101 }
1102 btrfs_drop_extent_cache(inode, em->start,
1103 em->start + em->len - 1, 0);
1104 }
1105 type = BTRFS_ORDERED_PREALLOC;
1106 } else {
1107 type = BTRFS_ORDERED_NOCOW;
1108 }
1109
1110 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1111 num_bytes, num_bytes, type);
1112 BUG_ON(ret);
1113
1114 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1115 cur_offset, cur_offset + num_bytes - 1,
1116 locked_page, 1, 1, 1, 0, 0, 0, 1);
1117 cur_offset = extent_end;
1118 if (cur_offset > end)
1119 break;
1120 }
1121 btrfs_release_path(root, path);
1122
1123 if (cur_offset <= end && cow_start == (u64)-1)
1124 cow_start = cur_offset;
1125 if (cow_start != (u64)-1) {
1126 ret = cow_file_range(inode, locked_page, cow_start, end,
1127 page_started, nr_written, 1);
1128 BUG_ON(ret);
1129 }
1130
1131 ret = btrfs_end_transaction(trans, root);
1132 BUG_ON(ret);
1133 btrfs_free_path(path);
1134 return 0;
1135 }
1136
1137 /*
1138 * extent_io.c call back to do delayed allocation processing
1139 */
1140 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1141 u64 start, u64 end, int *page_started,
1142 unsigned long *nr_written)
1143 {
1144 int ret;
1145 struct btrfs_root *root = BTRFS_I(inode)->root;
1146
1147 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1148 ret = run_delalloc_nocow(inode, locked_page, start, end,
1149 page_started, 1, nr_written);
1150 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1151 ret = run_delalloc_nocow(inode, locked_page, start, end,
1152 page_started, 0, nr_written);
1153 else if (!btrfs_test_opt(root, COMPRESS))
1154 ret = cow_file_range(inode, locked_page, start, end,
1155 page_started, nr_written, 1);
1156 else
1157 ret = cow_file_range_async(inode, locked_page, start, end,
1158 page_started, nr_written);
1159 return ret;
1160 }
1161
1162 /*
1163 * extent_io.c set_bit_hook, used to track delayed allocation
1164 * bytes in this file, and to maintain the list of inodes that
1165 * have pending delalloc work to be done.
1166 */
1167 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1168 unsigned long old, unsigned long bits)
1169 {
1170 /*
1171 * set_bit and clear bit hooks normally require _irqsave/restore
1172 * but in this case, we are only testeing for the DELALLOC
1173 * bit, which is only set or cleared with irqs on
1174 */
1175 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1176 struct btrfs_root *root = BTRFS_I(inode)->root;
1177 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1178 spin_lock(&root->fs_info->delalloc_lock);
1179 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1180 root->fs_info->delalloc_bytes += end - start + 1;
1181 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1182 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1183 &root->fs_info->delalloc_inodes);
1184 }
1185 spin_unlock(&root->fs_info->delalloc_lock);
1186 }
1187 return 0;
1188 }
1189
1190 /*
1191 * extent_io.c clear_bit_hook, see set_bit_hook for why
1192 */
1193 static int btrfs_clear_bit_hook(struct inode *inode, u64 start, u64 end,
1194 unsigned long old, unsigned long bits)
1195 {
1196 /*
1197 * set_bit and clear bit hooks normally require _irqsave/restore
1198 * but in this case, we are only testeing for the DELALLOC
1199 * bit, which is only set or cleared with irqs on
1200 */
1201 if ((old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1202 struct btrfs_root *root = BTRFS_I(inode)->root;
1203
1204 spin_lock(&root->fs_info->delalloc_lock);
1205 if (end - start + 1 > root->fs_info->delalloc_bytes) {
1206 printk(KERN_INFO "btrfs warning: delalloc account "
1207 "%llu %llu\n",
1208 (unsigned long long)end - start + 1,
1209 (unsigned long long)
1210 root->fs_info->delalloc_bytes);
1211 btrfs_delalloc_free_space(root, inode, (u64)-1);
1212 root->fs_info->delalloc_bytes = 0;
1213 BTRFS_I(inode)->delalloc_bytes = 0;
1214 } else {
1215 btrfs_delalloc_free_space(root, inode,
1216 end - start + 1);
1217 root->fs_info->delalloc_bytes -= end - start + 1;
1218 BTRFS_I(inode)->delalloc_bytes -= end - start + 1;
1219 }
1220 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1221 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1222 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1223 }
1224 spin_unlock(&root->fs_info->delalloc_lock);
1225 }
1226 return 0;
1227 }
1228
1229 /*
1230 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1231 * we don't create bios that span stripes or chunks
1232 */
1233 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1234 size_t size, struct bio *bio,
1235 unsigned long bio_flags)
1236 {
1237 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1238 struct btrfs_mapping_tree *map_tree;
1239 u64 logical = (u64)bio->bi_sector << 9;
1240 u64 length = 0;
1241 u64 map_length;
1242 int ret;
1243
1244 if (bio_flags & EXTENT_BIO_COMPRESSED)
1245 return 0;
1246
1247 length = bio->bi_size;
1248 map_tree = &root->fs_info->mapping_tree;
1249 map_length = length;
1250 ret = btrfs_map_block(map_tree, READ, logical,
1251 &map_length, NULL, 0);
1252
1253 if (map_length < length + size)
1254 return 1;
1255 return 0;
1256 }
1257
1258 /*
1259 * in order to insert checksums into the metadata in large chunks,
1260 * we wait until bio submission time. All the pages in the bio are
1261 * checksummed and sums are attached onto the ordered extent record.
1262 *
1263 * At IO completion time the cums attached on the ordered extent record
1264 * are inserted into the btree
1265 */
1266 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1267 struct bio *bio, int mirror_num,
1268 unsigned long bio_flags)
1269 {
1270 struct btrfs_root *root = BTRFS_I(inode)->root;
1271 int ret = 0;
1272
1273 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1274 BUG_ON(ret);
1275 return 0;
1276 }
1277
1278 /*
1279 * in order to insert checksums into the metadata in large chunks,
1280 * we wait until bio submission time. All the pages in the bio are
1281 * checksummed and sums are attached onto the ordered extent record.
1282 *
1283 * At IO completion time the cums attached on the ordered extent record
1284 * are inserted into the btree
1285 */
1286 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1287 int mirror_num, unsigned long bio_flags)
1288 {
1289 struct btrfs_root *root = BTRFS_I(inode)->root;
1290 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1291 }
1292
1293 /*
1294 * extent_io.c submission hook. This does the right thing for csum calculation
1295 * on write, or reading the csums from the tree before a read
1296 */
1297 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1298 int mirror_num, unsigned long bio_flags)
1299 {
1300 struct btrfs_root *root = BTRFS_I(inode)->root;
1301 int ret = 0;
1302 int skip_sum;
1303
1304 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1305
1306 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1307 BUG_ON(ret);
1308
1309 if (!(rw & (1 << BIO_RW))) {
1310 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1311 return btrfs_submit_compressed_read(inode, bio,
1312 mirror_num, bio_flags);
1313 } else if (!skip_sum)
1314 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1315 goto mapit;
1316 } else if (!skip_sum) {
1317 /* csum items have already been cloned */
1318 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1319 goto mapit;
1320 /* we're doing a write, do the async checksumming */
1321 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1322 inode, rw, bio, mirror_num,
1323 bio_flags, __btrfs_submit_bio_start,
1324 __btrfs_submit_bio_done);
1325 }
1326
1327 mapit:
1328 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1329 }
1330
1331 /*
1332 * given a list of ordered sums record them in the inode. This happens
1333 * at IO completion time based on sums calculated at bio submission time.
1334 */
1335 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1336 struct inode *inode, u64 file_offset,
1337 struct list_head *list)
1338 {
1339 struct btrfs_ordered_sum *sum;
1340
1341 btrfs_set_trans_block_group(trans, inode);
1342
1343 list_for_each_entry(sum, list, list) {
1344 btrfs_csum_file_blocks(trans,
1345 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1346 }
1347 return 0;
1348 }
1349
1350 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1351 {
1352 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1353 WARN_ON(1);
1354 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1355 GFP_NOFS);
1356 }
1357
1358 /* see btrfs_writepage_start_hook for details on why this is required */
1359 struct btrfs_writepage_fixup {
1360 struct page *page;
1361 struct btrfs_work work;
1362 };
1363
1364 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1365 {
1366 struct btrfs_writepage_fixup *fixup;
1367 struct btrfs_ordered_extent *ordered;
1368 struct page *page;
1369 struct inode *inode;
1370 u64 page_start;
1371 u64 page_end;
1372
1373 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1374 page = fixup->page;
1375 again:
1376 lock_page(page);
1377 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1378 ClearPageChecked(page);
1379 goto out_page;
1380 }
1381
1382 inode = page->mapping->host;
1383 page_start = page_offset(page);
1384 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1385
1386 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1387
1388 /* already ordered? We're done */
1389 if (PagePrivate2(page))
1390 goto out;
1391
1392 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1393 if (ordered) {
1394 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1395 page_end, GFP_NOFS);
1396 unlock_page(page);
1397 btrfs_start_ordered_extent(inode, ordered, 1);
1398 goto again;
1399 }
1400
1401 btrfs_set_extent_delalloc(inode, page_start, page_end);
1402 ClearPageChecked(page);
1403 out:
1404 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1405 out_page:
1406 unlock_page(page);
1407 page_cache_release(page);
1408 }
1409
1410 /*
1411 * There are a few paths in the higher layers of the kernel that directly
1412 * set the page dirty bit without asking the filesystem if it is a
1413 * good idea. This causes problems because we want to make sure COW
1414 * properly happens and the data=ordered rules are followed.
1415 *
1416 * In our case any range that doesn't have the ORDERED bit set
1417 * hasn't been properly setup for IO. We kick off an async process
1418 * to fix it up. The async helper will wait for ordered extents, set
1419 * the delalloc bit and make it safe to write the page.
1420 */
1421 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1422 {
1423 struct inode *inode = page->mapping->host;
1424 struct btrfs_writepage_fixup *fixup;
1425 struct btrfs_root *root = BTRFS_I(inode)->root;
1426
1427 /* this page is properly in the ordered list */
1428 if (TestClearPagePrivate2(page))
1429 return 0;
1430
1431 if (PageChecked(page))
1432 return -EAGAIN;
1433
1434 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1435 if (!fixup)
1436 return -EAGAIN;
1437
1438 SetPageChecked(page);
1439 page_cache_get(page);
1440 fixup->work.func = btrfs_writepage_fixup_worker;
1441 fixup->page = page;
1442 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1443 return -EAGAIN;
1444 }
1445
1446 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1447 struct inode *inode, u64 file_pos,
1448 u64 disk_bytenr, u64 disk_num_bytes,
1449 u64 num_bytes, u64 ram_bytes,
1450 u64 locked_end,
1451 u8 compression, u8 encryption,
1452 u16 other_encoding, int extent_type)
1453 {
1454 struct btrfs_root *root = BTRFS_I(inode)->root;
1455 struct btrfs_file_extent_item *fi;
1456 struct btrfs_path *path;
1457 struct extent_buffer *leaf;
1458 struct btrfs_key ins;
1459 u64 hint;
1460 int ret;
1461
1462 path = btrfs_alloc_path();
1463 BUG_ON(!path);
1464
1465 path->leave_spinning = 1;
1466
1467 /*
1468 * we may be replacing one extent in the tree with another.
1469 * The new extent is pinned in the extent map, and we don't want
1470 * to drop it from the cache until it is completely in the btree.
1471 *
1472 * So, tell btrfs_drop_extents to leave this extent in the cache.
1473 * the caller is expected to unpin it and allow it to be merged
1474 * with the others.
1475 */
1476 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1477 file_pos + num_bytes, locked_end,
1478 file_pos, &hint, 0);
1479 BUG_ON(ret);
1480
1481 ins.objectid = inode->i_ino;
1482 ins.offset = file_pos;
1483 ins.type = BTRFS_EXTENT_DATA_KEY;
1484 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1485 BUG_ON(ret);
1486 leaf = path->nodes[0];
1487 fi = btrfs_item_ptr(leaf, path->slots[0],
1488 struct btrfs_file_extent_item);
1489 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1490 btrfs_set_file_extent_type(leaf, fi, extent_type);
1491 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1492 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1493 btrfs_set_file_extent_offset(leaf, fi, 0);
1494 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1495 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1496 btrfs_set_file_extent_compression(leaf, fi, compression);
1497 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1498 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1499
1500 btrfs_unlock_up_safe(path, 1);
1501 btrfs_set_lock_blocking(leaf);
1502
1503 btrfs_mark_buffer_dirty(leaf);
1504
1505 inode_add_bytes(inode, num_bytes);
1506
1507 ins.objectid = disk_bytenr;
1508 ins.offset = disk_num_bytes;
1509 ins.type = BTRFS_EXTENT_ITEM_KEY;
1510 ret = btrfs_alloc_reserved_file_extent(trans, root,
1511 root->root_key.objectid,
1512 inode->i_ino, file_pos, &ins);
1513 BUG_ON(ret);
1514 btrfs_free_path(path);
1515
1516 return 0;
1517 }
1518
1519 /*
1520 * helper function for btrfs_finish_ordered_io, this
1521 * just reads in some of the csum leaves to prime them into ram
1522 * before we start the transaction. It limits the amount of btree
1523 * reads required while inside the transaction.
1524 */
1525 static noinline void reada_csum(struct btrfs_root *root,
1526 struct btrfs_path *path,
1527 struct btrfs_ordered_extent *ordered_extent)
1528 {
1529 struct btrfs_ordered_sum *sum;
1530 u64 bytenr;
1531
1532 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1533 list);
1534 bytenr = sum->sums[0].bytenr;
1535
1536 /*
1537 * we don't care about the results, the point of this search is
1538 * just to get the btree leaves into ram
1539 */
1540 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1541 }
1542
1543 /* as ordered data IO finishes, this gets called so we can finish
1544 * an ordered extent if the range of bytes in the file it covers are
1545 * fully written.
1546 */
1547 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1548 {
1549 struct btrfs_root *root = BTRFS_I(inode)->root;
1550 struct btrfs_trans_handle *trans;
1551 struct btrfs_ordered_extent *ordered_extent = NULL;
1552 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1553 struct btrfs_path *path;
1554 int compressed = 0;
1555 int ret;
1556
1557 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1558 if (!ret)
1559 return 0;
1560
1561 /*
1562 * before we join the transaction, try to do some of our IO.
1563 * This will limit the amount of IO that we have to do with
1564 * the transaction running. We're unlikely to need to do any
1565 * IO if the file extents are new, the disk_i_size checks
1566 * covers the most common case.
1567 */
1568 if (start < BTRFS_I(inode)->disk_i_size) {
1569 path = btrfs_alloc_path();
1570 if (path) {
1571 ret = btrfs_lookup_file_extent(NULL, root, path,
1572 inode->i_ino,
1573 start, 0);
1574 ordered_extent = btrfs_lookup_ordered_extent(inode,
1575 start);
1576 if (!list_empty(&ordered_extent->list)) {
1577 btrfs_release_path(root, path);
1578 reada_csum(root, path, ordered_extent);
1579 }
1580 btrfs_free_path(path);
1581 }
1582 }
1583
1584 trans = btrfs_join_transaction(root, 1);
1585
1586 if (!ordered_extent)
1587 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1588 BUG_ON(!ordered_extent);
1589 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1590 goto nocow;
1591
1592 lock_extent(io_tree, ordered_extent->file_offset,
1593 ordered_extent->file_offset + ordered_extent->len - 1,
1594 GFP_NOFS);
1595
1596 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1597 compressed = 1;
1598 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1599 BUG_ON(compressed);
1600 ret = btrfs_mark_extent_written(trans, root, inode,
1601 ordered_extent->file_offset,
1602 ordered_extent->file_offset +
1603 ordered_extent->len);
1604 BUG_ON(ret);
1605 } else {
1606 ret = insert_reserved_file_extent(trans, inode,
1607 ordered_extent->file_offset,
1608 ordered_extent->start,
1609 ordered_extent->disk_len,
1610 ordered_extent->len,
1611 ordered_extent->len,
1612 ordered_extent->file_offset +
1613 ordered_extent->len,
1614 compressed, 0, 0,
1615 BTRFS_FILE_EXTENT_REG);
1616 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1617 ordered_extent->file_offset,
1618 ordered_extent->len);
1619 BUG_ON(ret);
1620 }
1621 unlock_extent(io_tree, ordered_extent->file_offset,
1622 ordered_extent->file_offset + ordered_extent->len - 1,
1623 GFP_NOFS);
1624 nocow:
1625 add_pending_csums(trans, inode, ordered_extent->file_offset,
1626 &ordered_extent->list);
1627
1628 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1629 btrfs_ordered_update_i_size(inode, ordered_extent);
1630 btrfs_update_inode(trans, root, inode);
1631 btrfs_remove_ordered_extent(inode, ordered_extent);
1632 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1633
1634 /* once for us */
1635 btrfs_put_ordered_extent(ordered_extent);
1636 /* once for the tree */
1637 btrfs_put_ordered_extent(ordered_extent);
1638
1639 btrfs_end_transaction(trans, root);
1640 return 0;
1641 }
1642
1643 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1644 struct extent_state *state, int uptodate)
1645 {
1646 ClearPagePrivate2(page);
1647 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1648 }
1649
1650 /*
1651 * When IO fails, either with EIO or csum verification fails, we
1652 * try other mirrors that might have a good copy of the data. This
1653 * io_failure_record is used to record state as we go through all the
1654 * mirrors. If another mirror has good data, the page is set up to date
1655 * and things continue. If a good mirror can't be found, the original
1656 * bio end_io callback is called to indicate things have failed.
1657 */
1658 struct io_failure_record {
1659 struct page *page;
1660 u64 start;
1661 u64 len;
1662 u64 logical;
1663 unsigned long bio_flags;
1664 int last_mirror;
1665 };
1666
1667 static int btrfs_io_failed_hook(struct bio *failed_bio,
1668 struct page *page, u64 start, u64 end,
1669 struct extent_state *state)
1670 {
1671 struct io_failure_record *failrec = NULL;
1672 u64 private;
1673 struct extent_map *em;
1674 struct inode *inode = page->mapping->host;
1675 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1676 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1677 struct bio *bio;
1678 int num_copies;
1679 int ret;
1680 int rw;
1681 u64 logical;
1682
1683 ret = get_state_private(failure_tree, start, &private);
1684 if (ret) {
1685 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1686 if (!failrec)
1687 return -ENOMEM;
1688 failrec->start = start;
1689 failrec->len = end - start + 1;
1690 failrec->last_mirror = 0;
1691 failrec->bio_flags = 0;
1692
1693 read_lock(&em_tree->lock);
1694 em = lookup_extent_mapping(em_tree, start, failrec->len);
1695 if (em->start > start || em->start + em->len < start) {
1696 free_extent_map(em);
1697 em = NULL;
1698 }
1699 read_unlock(&em_tree->lock);
1700
1701 if (!em || IS_ERR(em)) {
1702 kfree(failrec);
1703 return -EIO;
1704 }
1705 logical = start - em->start;
1706 logical = em->block_start + logical;
1707 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1708 logical = em->block_start;
1709 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1710 }
1711 failrec->logical = logical;
1712 free_extent_map(em);
1713 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1714 EXTENT_DIRTY, GFP_NOFS);
1715 set_state_private(failure_tree, start,
1716 (u64)(unsigned long)failrec);
1717 } else {
1718 failrec = (struct io_failure_record *)(unsigned long)private;
1719 }
1720 num_copies = btrfs_num_copies(
1721 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1722 failrec->logical, failrec->len);
1723 failrec->last_mirror++;
1724 if (!state) {
1725 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1726 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1727 failrec->start,
1728 EXTENT_LOCKED);
1729 if (state && state->start != failrec->start)
1730 state = NULL;
1731 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1732 }
1733 if (!state || failrec->last_mirror > num_copies) {
1734 set_state_private(failure_tree, failrec->start, 0);
1735 clear_extent_bits(failure_tree, failrec->start,
1736 failrec->start + failrec->len - 1,
1737 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1738 kfree(failrec);
1739 return -EIO;
1740 }
1741 bio = bio_alloc(GFP_NOFS, 1);
1742 bio->bi_private = state;
1743 bio->bi_end_io = failed_bio->bi_end_io;
1744 bio->bi_sector = failrec->logical >> 9;
1745 bio->bi_bdev = failed_bio->bi_bdev;
1746 bio->bi_size = 0;
1747
1748 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1749 if (failed_bio->bi_rw & (1 << BIO_RW))
1750 rw = WRITE;
1751 else
1752 rw = READ;
1753
1754 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1755 failrec->last_mirror,
1756 failrec->bio_flags);
1757 return 0;
1758 }
1759
1760 /*
1761 * each time an IO finishes, we do a fast check in the IO failure tree
1762 * to see if we need to process or clean up an io_failure_record
1763 */
1764 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1765 {
1766 u64 private;
1767 u64 private_failure;
1768 struct io_failure_record *failure;
1769 int ret;
1770
1771 private = 0;
1772 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1773 (u64)-1, 1, EXTENT_DIRTY)) {
1774 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1775 start, &private_failure);
1776 if (ret == 0) {
1777 failure = (struct io_failure_record *)(unsigned long)
1778 private_failure;
1779 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1780 failure->start, 0);
1781 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1782 failure->start,
1783 failure->start + failure->len - 1,
1784 EXTENT_DIRTY | EXTENT_LOCKED,
1785 GFP_NOFS);
1786 kfree(failure);
1787 }
1788 }
1789 return 0;
1790 }
1791
1792 /*
1793 * when reads are done, we need to check csums to verify the data is correct
1794 * if there's a match, we allow the bio to finish. If not, we go through
1795 * the io_failure_record routines to find good copies
1796 */
1797 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1798 struct extent_state *state)
1799 {
1800 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1801 struct inode *inode = page->mapping->host;
1802 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1803 char *kaddr;
1804 u64 private = ~(u32)0;
1805 int ret;
1806 struct btrfs_root *root = BTRFS_I(inode)->root;
1807 u32 csum = ~(u32)0;
1808
1809 if (PageChecked(page)) {
1810 ClearPageChecked(page);
1811 goto good;
1812 }
1813
1814 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1815 return 0;
1816
1817 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1818 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1819 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1820 GFP_NOFS);
1821 return 0;
1822 }
1823
1824 if (state && state->start == start) {
1825 private = state->private;
1826 ret = 0;
1827 } else {
1828 ret = get_state_private(io_tree, start, &private);
1829 }
1830 kaddr = kmap_atomic(page, KM_USER0);
1831 if (ret)
1832 goto zeroit;
1833
1834 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1835 btrfs_csum_final(csum, (char *)&csum);
1836 if (csum != private)
1837 goto zeroit;
1838
1839 kunmap_atomic(kaddr, KM_USER0);
1840 good:
1841 /* if the io failure tree for this inode is non-empty,
1842 * check to see if we've recovered from a failed IO
1843 */
1844 btrfs_clean_io_failures(inode, start);
1845 return 0;
1846
1847 zeroit:
1848 if (printk_ratelimit()) {
1849 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1850 "private %llu\n", page->mapping->host->i_ino,
1851 (unsigned long long)start, csum,
1852 (unsigned long long)private);
1853 }
1854 memset(kaddr + offset, 1, end - start + 1);
1855 flush_dcache_page(page);
1856 kunmap_atomic(kaddr, KM_USER0);
1857 if (private == 0)
1858 return 0;
1859 return -EIO;
1860 }
1861
1862 /*
1863 * This creates an orphan entry for the given inode in case something goes
1864 * wrong in the middle of an unlink/truncate.
1865 */
1866 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1867 {
1868 struct btrfs_root *root = BTRFS_I(inode)->root;
1869 int ret = 0;
1870
1871 spin_lock(&root->list_lock);
1872
1873 /* already on the orphan list, we're good */
1874 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1875 spin_unlock(&root->list_lock);
1876 return 0;
1877 }
1878
1879 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1880
1881 spin_unlock(&root->list_lock);
1882
1883 /*
1884 * insert an orphan item to track this unlinked/truncated file
1885 */
1886 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1887
1888 return ret;
1889 }
1890
1891 /*
1892 * We have done the truncate/delete so we can go ahead and remove the orphan
1893 * item for this particular inode.
1894 */
1895 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
1896 {
1897 struct btrfs_root *root = BTRFS_I(inode)->root;
1898 int ret = 0;
1899
1900 spin_lock(&root->list_lock);
1901
1902 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
1903 spin_unlock(&root->list_lock);
1904 return 0;
1905 }
1906
1907 list_del_init(&BTRFS_I(inode)->i_orphan);
1908 if (!trans) {
1909 spin_unlock(&root->list_lock);
1910 return 0;
1911 }
1912
1913 spin_unlock(&root->list_lock);
1914
1915 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
1916
1917 return ret;
1918 }
1919
1920 /*
1921 * this cleans up any orphans that may be left on the list from the last use
1922 * of this root.
1923 */
1924 void btrfs_orphan_cleanup(struct btrfs_root *root)
1925 {
1926 struct btrfs_path *path;
1927 struct extent_buffer *leaf;
1928 struct btrfs_item *item;
1929 struct btrfs_key key, found_key;
1930 struct btrfs_trans_handle *trans;
1931 struct inode *inode;
1932 int ret = 0, nr_unlink = 0, nr_truncate = 0;
1933
1934 path = btrfs_alloc_path();
1935 if (!path)
1936 return;
1937 path->reada = -1;
1938
1939 key.objectid = BTRFS_ORPHAN_OBJECTID;
1940 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
1941 key.offset = (u64)-1;
1942
1943
1944 while (1) {
1945 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1946 if (ret < 0) {
1947 printk(KERN_ERR "Error searching slot for orphan: %d"
1948 "\n", ret);
1949 break;
1950 }
1951
1952 /*
1953 * if ret == 0 means we found what we were searching for, which
1954 * is weird, but possible, so only screw with path if we didnt
1955 * find the key and see if we have stuff that matches
1956 */
1957 if (ret > 0) {
1958 if (path->slots[0] == 0)
1959 break;
1960 path->slots[0]--;
1961 }
1962
1963 /* pull out the item */
1964 leaf = path->nodes[0];
1965 item = btrfs_item_nr(leaf, path->slots[0]);
1966 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1967
1968 /* make sure the item matches what we want */
1969 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
1970 break;
1971 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
1972 break;
1973
1974 /* release the path since we're done with it */
1975 btrfs_release_path(root, path);
1976
1977 /*
1978 * this is where we are basically btrfs_lookup, without the
1979 * crossing root thing. we store the inode number in the
1980 * offset of the orphan item.
1981 */
1982 found_key.objectid = found_key.offset;
1983 found_key.type = BTRFS_INODE_ITEM_KEY;
1984 found_key.offset = 0;
1985 inode = btrfs_iget(root->fs_info->sb, &found_key, root);
1986 if (IS_ERR(inode))
1987 break;
1988
1989 /*
1990 * add this inode to the orphan list so btrfs_orphan_del does
1991 * the proper thing when we hit it
1992 */
1993 spin_lock(&root->list_lock);
1994 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1995 spin_unlock(&root->list_lock);
1996
1997 /*
1998 * if this is a bad inode, means we actually succeeded in
1999 * removing the inode, but not the orphan record, which means
2000 * we need to manually delete the orphan since iput will just
2001 * do a destroy_inode
2002 */
2003 if (is_bad_inode(inode)) {
2004 trans = btrfs_start_transaction(root, 1);
2005 btrfs_orphan_del(trans, inode);
2006 btrfs_end_transaction(trans, root);
2007 iput(inode);
2008 continue;
2009 }
2010
2011 /* if we have links, this was a truncate, lets do that */
2012 if (inode->i_nlink) {
2013 nr_truncate++;
2014 btrfs_truncate(inode);
2015 } else {
2016 nr_unlink++;
2017 }
2018
2019 /* this will do delete_inode and everything for us */
2020 iput(inode);
2021 }
2022
2023 if (nr_unlink)
2024 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2025 if (nr_truncate)
2026 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2027
2028 btrfs_free_path(path);
2029 }
2030
2031 /*
2032 * very simple check to peek ahead in the leaf looking for xattrs. If we
2033 * don't find any xattrs, we know there can't be any acls.
2034 *
2035 * slot is the slot the inode is in, objectid is the objectid of the inode
2036 */
2037 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2038 int slot, u64 objectid)
2039 {
2040 u32 nritems = btrfs_header_nritems(leaf);
2041 struct btrfs_key found_key;
2042 int scanned = 0;
2043
2044 slot++;
2045 while (slot < nritems) {
2046 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2047
2048 /* we found a different objectid, there must not be acls */
2049 if (found_key.objectid != objectid)
2050 return 0;
2051
2052 /* we found an xattr, assume we've got an acl */
2053 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2054 return 1;
2055
2056 /*
2057 * we found a key greater than an xattr key, there can't
2058 * be any acls later on
2059 */
2060 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2061 return 0;
2062
2063 slot++;
2064 scanned++;
2065
2066 /*
2067 * it goes inode, inode backrefs, xattrs, extents,
2068 * so if there are a ton of hard links to an inode there can
2069 * be a lot of backrefs. Don't waste time searching too hard,
2070 * this is just an optimization
2071 */
2072 if (scanned >= 8)
2073 break;
2074 }
2075 /* we hit the end of the leaf before we found an xattr or
2076 * something larger than an xattr. We have to assume the inode
2077 * has acls
2078 */
2079 return 1;
2080 }
2081
2082 /*
2083 * read an inode from the btree into the in-memory inode
2084 */
2085 static void btrfs_read_locked_inode(struct inode *inode)
2086 {
2087 struct btrfs_path *path;
2088 struct extent_buffer *leaf;
2089 struct btrfs_inode_item *inode_item;
2090 struct btrfs_timespec *tspec;
2091 struct btrfs_root *root = BTRFS_I(inode)->root;
2092 struct btrfs_key location;
2093 int maybe_acls;
2094 u64 alloc_group_block;
2095 u32 rdev;
2096 int ret;
2097
2098 path = btrfs_alloc_path();
2099 BUG_ON(!path);
2100 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2101
2102 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2103 if (ret)
2104 goto make_bad;
2105
2106 leaf = path->nodes[0];
2107 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2108 struct btrfs_inode_item);
2109
2110 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2111 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2112 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2113 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2114 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2115
2116 tspec = btrfs_inode_atime(inode_item);
2117 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2118 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2119
2120 tspec = btrfs_inode_mtime(inode_item);
2121 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2122 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2123
2124 tspec = btrfs_inode_ctime(inode_item);
2125 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2126 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2127
2128 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2129 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2130 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2131 inode->i_generation = BTRFS_I(inode)->generation;
2132 inode->i_rdev = 0;
2133 rdev = btrfs_inode_rdev(leaf, inode_item);
2134
2135 BTRFS_I(inode)->index_cnt = (u64)-1;
2136 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2137
2138 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2139
2140 /*
2141 * try to precache a NULL acl entry for files that don't have
2142 * any xattrs or acls
2143 */
2144 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2145 if (!maybe_acls)
2146 cache_no_acl(inode);
2147
2148 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2149 alloc_group_block, 0);
2150 btrfs_free_path(path);
2151 inode_item = NULL;
2152
2153 switch (inode->i_mode & S_IFMT) {
2154 case S_IFREG:
2155 inode->i_mapping->a_ops = &btrfs_aops;
2156 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2157 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2158 inode->i_fop = &btrfs_file_operations;
2159 inode->i_op = &btrfs_file_inode_operations;
2160 break;
2161 case S_IFDIR:
2162 inode->i_fop = &btrfs_dir_file_operations;
2163 if (root == root->fs_info->tree_root)
2164 inode->i_op = &btrfs_dir_ro_inode_operations;
2165 else
2166 inode->i_op = &btrfs_dir_inode_operations;
2167 break;
2168 case S_IFLNK:
2169 inode->i_op = &btrfs_symlink_inode_operations;
2170 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2171 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2172 break;
2173 default:
2174 inode->i_op = &btrfs_special_inode_operations;
2175 init_special_inode(inode, inode->i_mode, rdev);
2176 break;
2177 }
2178
2179 btrfs_update_iflags(inode);
2180 return;
2181
2182 make_bad:
2183 btrfs_free_path(path);
2184 make_bad_inode(inode);
2185 }
2186
2187 /*
2188 * given a leaf and an inode, copy the inode fields into the leaf
2189 */
2190 static void fill_inode_item(struct btrfs_trans_handle *trans,
2191 struct extent_buffer *leaf,
2192 struct btrfs_inode_item *item,
2193 struct inode *inode)
2194 {
2195 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2196 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2197 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2198 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2199 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2200
2201 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2202 inode->i_atime.tv_sec);
2203 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2204 inode->i_atime.tv_nsec);
2205
2206 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2207 inode->i_mtime.tv_sec);
2208 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2209 inode->i_mtime.tv_nsec);
2210
2211 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2212 inode->i_ctime.tv_sec);
2213 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2214 inode->i_ctime.tv_nsec);
2215
2216 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2217 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2218 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2219 btrfs_set_inode_transid(leaf, item, trans->transid);
2220 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2221 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2222 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2223 }
2224
2225 /*
2226 * copy everything in the in-memory inode into the btree.
2227 */
2228 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2229 struct btrfs_root *root, struct inode *inode)
2230 {
2231 struct btrfs_inode_item *inode_item;
2232 struct btrfs_path *path;
2233 struct extent_buffer *leaf;
2234 int ret;
2235
2236 path = btrfs_alloc_path();
2237 BUG_ON(!path);
2238 path->leave_spinning = 1;
2239 ret = btrfs_lookup_inode(trans, root, path,
2240 &BTRFS_I(inode)->location, 1);
2241 if (ret) {
2242 if (ret > 0)
2243 ret = -ENOENT;
2244 goto failed;
2245 }
2246
2247 btrfs_unlock_up_safe(path, 1);
2248 leaf = path->nodes[0];
2249 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2250 struct btrfs_inode_item);
2251
2252 fill_inode_item(trans, leaf, inode_item, inode);
2253 btrfs_mark_buffer_dirty(leaf);
2254 btrfs_set_inode_last_trans(trans, inode);
2255 ret = 0;
2256 failed:
2257 btrfs_free_path(path);
2258 return ret;
2259 }
2260
2261
2262 /*
2263 * unlink helper that gets used here in inode.c and in the tree logging
2264 * recovery code. It remove a link in a directory with a given name, and
2265 * also drops the back refs in the inode to the directory
2266 */
2267 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2268 struct btrfs_root *root,
2269 struct inode *dir, struct inode *inode,
2270 const char *name, int name_len)
2271 {
2272 struct btrfs_path *path;
2273 int ret = 0;
2274 struct extent_buffer *leaf;
2275 struct btrfs_dir_item *di;
2276 struct btrfs_key key;
2277 u64 index;
2278
2279 path = btrfs_alloc_path();
2280 if (!path) {
2281 ret = -ENOMEM;
2282 goto err;
2283 }
2284
2285 path->leave_spinning = 1;
2286 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2287 name, name_len, -1);
2288 if (IS_ERR(di)) {
2289 ret = PTR_ERR(di);
2290 goto err;
2291 }
2292 if (!di) {
2293 ret = -ENOENT;
2294 goto err;
2295 }
2296 leaf = path->nodes[0];
2297 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2298 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2299 if (ret)
2300 goto err;
2301 btrfs_release_path(root, path);
2302
2303 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2304 inode->i_ino,
2305 dir->i_ino, &index);
2306 if (ret) {
2307 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2308 "inode %lu parent %lu\n", name_len, name,
2309 inode->i_ino, dir->i_ino);
2310 goto err;
2311 }
2312
2313 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2314 index, name, name_len, -1);
2315 if (IS_ERR(di)) {
2316 ret = PTR_ERR(di);
2317 goto err;
2318 }
2319 if (!di) {
2320 ret = -ENOENT;
2321 goto err;
2322 }
2323 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2324 btrfs_release_path(root, path);
2325
2326 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2327 inode, dir->i_ino);
2328 BUG_ON(ret != 0 && ret != -ENOENT);
2329
2330 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2331 dir, index);
2332 BUG_ON(ret);
2333 err:
2334 btrfs_free_path(path);
2335 if (ret)
2336 goto out;
2337
2338 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2339 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2340 btrfs_update_inode(trans, root, dir);
2341 btrfs_drop_nlink(inode);
2342 ret = btrfs_update_inode(trans, root, inode);
2343 out:
2344 return ret;
2345 }
2346
2347 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2348 {
2349 struct btrfs_root *root;
2350 struct btrfs_trans_handle *trans;
2351 struct inode *inode = dentry->d_inode;
2352 int ret;
2353 unsigned long nr = 0;
2354
2355 root = BTRFS_I(dir)->root;
2356
2357 trans = btrfs_start_transaction(root, 1);
2358
2359 btrfs_set_trans_block_group(trans, dir);
2360
2361 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2362
2363 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2364 dentry->d_name.name, dentry->d_name.len);
2365
2366 if (inode->i_nlink == 0)
2367 ret = btrfs_orphan_add(trans, inode);
2368
2369 nr = trans->blocks_used;
2370
2371 btrfs_end_transaction_throttle(trans, root);
2372 btrfs_btree_balance_dirty(root, nr);
2373 return ret;
2374 }
2375
2376 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2377 {
2378 struct inode *inode = dentry->d_inode;
2379 int err = 0;
2380 int ret;
2381 struct btrfs_root *root = BTRFS_I(dir)->root;
2382 struct btrfs_trans_handle *trans;
2383 unsigned long nr = 0;
2384
2385 /*
2386 * the FIRST_FREE_OBJECTID check makes sure we don't try to rmdir
2387 * the root of a subvolume or snapshot
2388 */
2389 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2390 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
2391 return -ENOTEMPTY;
2392 }
2393
2394 trans = btrfs_start_transaction(root, 1);
2395 btrfs_set_trans_block_group(trans, dir);
2396
2397 err = btrfs_orphan_add(trans, inode);
2398 if (err)
2399 goto fail_trans;
2400
2401 /* now the directory is empty */
2402 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2403 dentry->d_name.name, dentry->d_name.len);
2404 if (!err)
2405 btrfs_i_size_write(inode, 0);
2406
2407 fail_trans:
2408 nr = trans->blocks_used;
2409 ret = btrfs_end_transaction_throttle(trans, root);
2410 btrfs_btree_balance_dirty(root, nr);
2411
2412 if (ret && !err)
2413 err = ret;
2414 return err;
2415 }
2416
2417 #if 0
2418 /*
2419 * when truncating bytes in a file, it is possible to avoid reading
2420 * the leaves that contain only checksum items. This can be the
2421 * majority of the IO required to delete a large file, but it must
2422 * be done carefully.
2423 *
2424 * The keys in the level just above the leaves are checked to make sure
2425 * the lowest key in a given leaf is a csum key, and starts at an offset
2426 * after the new size.
2427 *
2428 * Then the key for the next leaf is checked to make sure it also has
2429 * a checksum item for the same file. If it does, we know our target leaf
2430 * contains only checksum items, and it can be safely freed without reading
2431 * it.
2432 *
2433 * This is just an optimization targeted at large files. It may do
2434 * nothing. It will return 0 unless things went badly.
2435 */
2436 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2437 struct btrfs_root *root,
2438 struct btrfs_path *path,
2439 struct inode *inode, u64 new_size)
2440 {
2441 struct btrfs_key key;
2442 int ret;
2443 int nritems;
2444 struct btrfs_key found_key;
2445 struct btrfs_key other_key;
2446 struct btrfs_leaf_ref *ref;
2447 u64 leaf_gen;
2448 u64 leaf_start;
2449
2450 path->lowest_level = 1;
2451 key.objectid = inode->i_ino;
2452 key.type = BTRFS_CSUM_ITEM_KEY;
2453 key.offset = new_size;
2454 again:
2455 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2456 if (ret < 0)
2457 goto out;
2458
2459 if (path->nodes[1] == NULL) {
2460 ret = 0;
2461 goto out;
2462 }
2463 ret = 0;
2464 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2465 nritems = btrfs_header_nritems(path->nodes[1]);
2466
2467 if (!nritems)
2468 goto out;
2469
2470 if (path->slots[1] >= nritems)
2471 goto next_node;
2472
2473 /* did we find a key greater than anything we want to delete? */
2474 if (found_key.objectid > inode->i_ino ||
2475 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2476 goto out;
2477
2478 /* we check the next key in the node to make sure the leave contains
2479 * only checksum items. This comparison doesn't work if our
2480 * leaf is the last one in the node
2481 */
2482 if (path->slots[1] + 1 >= nritems) {
2483 next_node:
2484 /* search forward from the last key in the node, this
2485 * will bring us into the next node in the tree
2486 */
2487 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2488
2489 /* unlikely, but we inc below, so check to be safe */
2490 if (found_key.offset == (u64)-1)
2491 goto out;
2492
2493 /* search_forward needs a path with locks held, do the
2494 * search again for the original key. It is possible
2495 * this will race with a balance and return a path that
2496 * we could modify, but this drop is just an optimization
2497 * and is allowed to miss some leaves.
2498 */
2499 btrfs_release_path(root, path);
2500 found_key.offset++;
2501
2502 /* setup a max key for search_forward */
2503 other_key.offset = (u64)-1;
2504 other_key.type = key.type;
2505 other_key.objectid = key.objectid;
2506
2507 path->keep_locks = 1;
2508 ret = btrfs_search_forward(root, &found_key, &other_key,
2509 path, 0, 0);
2510 path->keep_locks = 0;
2511 if (ret || found_key.objectid != key.objectid ||
2512 found_key.type != key.type) {
2513 ret = 0;
2514 goto out;
2515 }
2516
2517 key.offset = found_key.offset;
2518 btrfs_release_path(root, path);
2519 cond_resched();
2520 goto again;
2521 }
2522
2523 /* we know there's one more slot after us in the tree,
2524 * read that key so we can verify it is also a checksum item
2525 */
2526 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2527
2528 if (found_key.objectid < inode->i_ino)
2529 goto next_key;
2530
2531 if (found_key.type != key.type || found_key.offset < new_size)
2532 goto next_key;
2533
2534 /*
2535 * if the key for the next leaf isn't a csum key from this objectid,
2536 * we can't be sure there aren't good items inside this leaf.
2537 * Bail out
2538 */
2539 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2540 goto out;
2541
2542 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2543 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2544 /*
2545 * it is safe to delete this leaf, it contains only
2546 * csum items from this inode at an offset >= new_size
2547 */
2548 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2549 BUG_ON(ret);
2550
2551 if (root->ref_cows && leaf_gen < trans->transid) {
2552 ref = btrfs_alloc_leaf_ref(root, 0);
2553 if (ref) {
2554 ref->root_gen = root->root_key.offset;
2555 ref->bytenr = leaf_start;
2556 ref->owner = 0;
2557 ref->generation = leaf_gen;
2558 ref->nritems = 0;
2559
2560 btrfs_sort_leaf_ref(ref);
2561
2562 ret = btrfs_add_leaf_ref(root, ref, 0);
2563 WARN_ON(ret);
2564 btrfs_free_leaf_ref(root, ref);
2565 } else {
2566 WARN_ON(1);
2567 }
2568 }
2569 next_key:
2570 btrfs_release_path(root, path);
2571
2572 if (other_key.objectid == inode->i_ino &&
2573 other_key.type == key.type && other_key.offset > key.offset) {
2574 key.offset = other_key.offset;
2575 cond_resched();
2576 goto again;
2577 }
2578 ret = 0;
2579 out:
2580 /* fixup any changes we've made to the path */
2581 path->lowest_level = 0;
2582 path->keep_locks = 0;
2583 btrfs_release_path(root, path);
2584 return ret;
2585 }
2586
2587 #endif
2588
2589 /*
2590 * this can truncate away extent items, csum items and directory items.
2591 * It starts at a high offset and removes keys until it can't find
2592 * any higher than new_size
2593 *
2594 * csum items that cross the new i_size are truncated to the new size
2595 * as well.
2596 *
2597 * min_type is the minimum key type to truncate down to. If set to 0, this
2598 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2599 */
2600 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2601 struct btrfs_root *root,
2602 struct inode *inode,
2603 u64 new_size, u32 min_type)
2604 {
2605 int ret;
2606 struct btrfs_path *path;
2607 struct btrfs_key key;
2608 struct btrfs_key found_key;
2609 u32 found_type = (u8)-1;
2610 struct extent_buffer *leaf;
2611 struct btrfs_file_extent_item *fi;
2612 u64 extent_start = 0;
2613 u64 extent_num_bytes = 0;
2614 u64 extent_offset = 0;
2615 u64 item_end = 0;
2616 int found_extent;
2617 int del_item;
2618 int pending_del_nr = 0;
2619 int pending_del_slot = 0;
2620 int extent_type = -1;
2621 int encoding;
2622 u64 mask = root->sectorsize - 1;
2623
2624 if (root->ref_cows)
2625 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2626 path = btrfs_alloc_path();
2627 BUG_ON(!path);
2628 path->reada = -1;
2629
2630 /* FIXME, add redo link to tree so we don't leak on crash */
2631 key.objectid = inode->i_ino;
2632 key.offset = (u64)-1;
2633 key.type = (u8)-1;
2634
2635 search_again:
2636 path->leave_spinning = 1;
2637 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2638 if (ret < 0)
2639 goto error;
2640
2641 if (ret > 0) {
2642 /* there are no items in the tree for us to truncate, we're
2643 * done
2644 */
2645 if (path->slots[0] == 0) {
2646 ret = 0;
2647 goto error;
2648 }
2649 path->slots[0]--;
2650 }
2651
2652 while (1) {
2653 fi = NULL;
2654 leaf = path->nodes[0];
2655 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2656 found_type = btrfs_key_type(&found_key);
2657 encoding = 0;
2658
2659 if (found_key.objectid != inode->i_ino)
2660 break;
2661
2662 if (found_type < min_type)
2663 break;
2664
2665 item_end = found_key.offset;
2666 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2667 fi = btrfs_item_ptr(leaf, path->slots[0],
2668 struct btrfs_file_extent_item);
2669 extent_type = btrfs_file_extent_type(leaf, fi);
2670 encoding = btrfs_file_extent_compression(leaf, fi);
2671 encoding |= btrfs_file_extent_encryption(leaf, fi);
2672 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2673
2674 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2675 item_end +=
2676 btrfs_file_extent_num_bytes(leaf, fi);
2677 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2678 item_end += btrfs_file_extent_inline_len(leaf,
2679 fi);
2680 }
2681 item_end--;
2682 }
2683 if (item_end < new_size) {
2684 if (found_type == BTRFS_DIR_ITEM_KEY)
2685 found_type = BTRFS_INODE_ITEM_KEY;
2686 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2687 found_type = BTRFS_EXTENT_DATA_KEY;
2688 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2689 found_type = BTRFS_XATTR_ITEM_KEY;
2690 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2691 found_type = BTRFS_INODE_REF_KEY;
2692 else if (found_type)
2693 found_type--;
2694 else
2695 break;
2696 btrfs_set_key_type(&key, found_type);
2697 goto next;
2698 }
2699 if (found_key.offset >= new_size)
2700 del_item = 1;
2701 else
2702 del_item = 0;
2703 found_extent = 0;
2704
2705 /* FIXME, shrink the extent if the ref count is only 1 */
2706 if (found_type != BTRFS_EXTENT_DATA_KEY)
2707 goto delete;
2708
2709 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2710 u64 num_dec;
2711 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2712 if (!del_item && !encoding) {
2713 u64 orig_num_bytes =
2714 btrfs_file_extent_num_bytes(leaf, fi);
2715 extent_num_bytes = new_size -
2716 found_key.offset + root->sectorsize - 1;
2717 extent_num_bytes = extent_num_bytes &
2718 ~((u64)root->sectorsize - 1);
2719 btrfs_set_file_extent_num_bytes(leaf, fi,
2720 extent_num_bytes);
2721 num_dec = (orig_num_bytes -
2722 extent_num_bytes);
2723 if (root->ref_cows && extent_start != 0)
2724 inode_sub_bytes(inode, num_dec);
2725 btrfs_mark_buffer_dirty(leaf);
2726 } else {
2727 extent_num_bytes =
2728 btrfs_file_extent_disk_num_bytes(leaf,
2729 fi);
2730 extent_offset = found_key.offset -
2731 btrfs_file_extent_offset(leaf, fi);
2732
2733 /* FIXME blocksize != 4096 */
2734 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2735 if (extent_start != 0) {
2736 found_extent = 1;
2737 if (root->ref_cows)
2738 inode_sub_bytes(inode, num_dec);
2739 }
2740 }
2741 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2742 /*
2743 * we can't truncate inline items that have had
2744 * special encodings
2745 */
2746 if (!del_item &&
2747 btrfs_file_extent_compression(leaf, fi) == 0 &&
2748 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2749 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2750 u32 size = new_size - found_key.offset;
2751
2752 if (root->ref_cows) {
2753 inode_sub_bytes(inode, item_end + 1 -
2754 new_size);
2755 }
2756 size =
2757 btrfs_file_extent_calc_inline_size(size);
2758 ret = btrfs_truncate_item(trans, root, path,
2759 size, 1);
2760 BUG_ON(ret);
2761 } else if (root->ref_cows) {
2762 inode_sub_bytes(inode, item_end + 1 -
2763 found_key.offset);
2764 }
2765 }
2766 delete:
2767 if (del_item) {
2768 if (!pending_del_nr) {
2769 /* no pending yet, add ourselves */
2770 pending_del_slot = path->slots[0];
2771 pending_del_nr = 1;
2772 } else if (pending_del_nr &&
2773 path->slots[0] + 1 == pending_del_slot) {
2774 /* hop on the pending chunk */
2775 pending_del_nr++;
2776 pending_del_slot = path->slots[0];
2777 } else {
2778 BUG();
2779 }
2780 } else {
2781 break;
2782 }
2783 if (found_extent && root->ref_cows) {
2784 btrfs_set_path_blocking(path);
2785 ret = btrfs_free_extent(trans, root, extent_start,
2786 extent_num_bytes, 0,
2787 btrfs_header_owner(leaf),
2788 inode->i_ino, extent_offset);
2789 BUG_ON(ret);
2790 }
2791 next:
2792 if (path->slots[0] == 0) {
2793 if (pending_del_nr)
2794 goto del_pending;
2795 btrfs_release_path(root, path);
2796 if (found_type == BTRFS_INODE_ITEM_KEY)
2797 break;
2798 goto search_again;
2799 }
2800
2801 path->slots[0]--;
2802 if (pending_del_nr &&
2803 path->slots[0] + 1 != pending_del_slot) {
2804 struct btrfs_key debug;
2805 del_pending:
2806 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2807 pending_del_slot);
2808 ret = btrfs_del_items(trans, root, path,
2809 pending_del_slot,
2810 pending_del_nr);
2811 BUG_ON(ret);
2812 pending_del_nr = 0;
2813 btrfs_release_path(root, path);
2814 if (found_type == BTRFS_INODE_ITEM_KEY)
2815 break;
2816 goto search_again;
2817 }
2818 }
2819 ret = 0;
2820 error:
2821 if (pending_del_nr) {
2822 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2823 pending_del_nr);
2824 }
2825 btrfs_free_path(path);
2826 return ret;
2827 }
2828
2829 /*
2830 * taken from block_truncate_page, but does cow as it zeros out
2831 * any bytes left in the last page in the file.
2832 */
2833 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
2834 {
2835 struct inode *inode = mapping->host;
2836 struct btrfs_root *root = BTRFS_I(inode)->root;
2837 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2838 struct btrfs_ordered_extent *ordered;
2839 char *kaddr;
2840 u32 blocksize = root->sectorsize;
2841 pgoff_t index = from >> PAGE_CACHE_SHIFT;
2842 unsigned offset = from & (PAGE_CACHE_SIZE-1);
2843 struct page *page;
2844 int ret = 0;
2845 u64 page_start;
2846 u64 page_end;
2847
2848 if ((offset & (blocksize - 1)) == 0)
2849 goto out;
2850
2851 ret = -ENOMEM;
2852 again:
2853 page = grab_cache_page(mapping, index);
2854 if (!page)
2855 goto out;
2856
2857 page_start = page_offset(page);
2858 page_end = page_start + PAGE_CACHE_SIZE - 1;
2859
2860 if (!PageUptodate(page)) {
2861 ret = btrfs_readpage(NULL, page);
2862 lock_page(page);
2863 if (page->mapping != mapping) {
2864 unlock_page(page);
2865 page_cache_release(page);
2866 goto again;
2867 }
2868 if (!PageUptodate(page)) {
2869 ret = -EIO;
2870 goto out_unlock;
2871 }
2872 }
2873 wait_on_page_writeback(page);
2874
2875 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2876 set_page_extent_mapped(page);
2877
2878 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2879 if (ordered) {
2880 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2881 unlock_page(page);
2882 page_cache_release(page);
2883 btrfs_start_ordered_extent(inode, ordered, 1);
2884 btrfs_put_ordered_extent(ordered);
2885 goto again;
2886 }
2887
2888 btrfs_set_extent_delalloc(inode, page_start, page_end);
2889 ret = 0;
2890 if (offset != PAGE_CACHE_SIZE) {
2891 kaddr = kmap(page);
2892 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
2893 flush_dcache_page(page);
2894 kunmap(page);
2895 }
2896 ClearPageChecked(page);
2897 set_page_dirty(page);
2898 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2899
2900 out_unlock:
2901 unlock_page(page);
2902 page_cache_release(page);
2903 out:
2904 return ret;
2905 }
2906
2907 int btrfs_cont_expand(struct inode *inode, loff_t size)
2908 {
2909 struct btrfs_trans_handle *trans;
2910 struct btrfs_root *root = BTRFS_I(inode)->root;
2911 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2912 struct extent_map *em;
2913 u64 mask = root->sectorsize - 1;
2914 u64 hole_start = (inode->i_size + mask) & ~mask;
2915 u64 block_end = (size + mask) & ~mask;
2916 u64 last_byte;
2917 u64 cur_offset;
2918 u64 hole_size;
2919 int err;
2920
2921 if (size <= hole_start)
2922 return 0;
2923
2924 err = btrfs_check_metadata_free_space(root);
2925 if (err)
2926 return err;
2927
2928 btrfs_truncate_page(inode->i_mapping, inode->i_size);
2929
2930 while (1) {
2931 struct btrfs_ordered_extent *ordered;
2932 btrfs_wait_ordered_range(inode, hole_start,
2933 block_end - hole_start);
2934 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2935 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
2936 if (!ordered)
2937 break;
2938 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2939 btrfs_put_ordered_extent(ordered);
2940 }
2941
2942 trans = btrfs_start_transaction(root, 1);
2943 btrfs_set_trans_block_group(trans, inode);
2944
2945 cur_offset = hole_start;
2946 while (1) {
2947 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
2948 block_end - cur_offset, 0);
2949 BUG_ON(IS_ERR(em) || !em);
2950 last_byte = min(extent_map_end(em), block_end);
2951 last_byte = (last_byte + mask) & ~mask;
2952 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
2953 u64 hint_byte = 0;
2954 hole_size = last_byte - cur_offset;
2955 err = btrfs_drop_extents(trans, root, inode,
2956 cur_offset,
2957 cur_offset + hole_size,
2958 block_end,
2959 cur_offset, &hint_byte, 1);
2960 if (err)
2961 break;
2962 err = btrfs_insert_file_extent(trans, root,
2963 inode->i_ino, cur_offset, 0,
2964 0, hole_size, 0, hole_size,
2965 0, 0, 0);
2966 btrfs_drop_extent_cache(inode, hole_start,
2967 last_byte - 1, 0);
2968 }
2969 free_extent_map(em);
2970 cur_offset = last_byte;
2971 if (err || cur_offset >= block_end)
2972 break;
2973 }
2974
2975 btrfs_end_transaction(trans, root);
2976 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
2977 return err;
2978 }
2979
2980 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
2981 {
2982 struct inode *inode = dentry->d_inode;
2983 int err;
2984
2985 err = inode_change_ok(inode, attr);
2986 if (err)
2987 return err;
2988
2989 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
2990 if (attr->ia_size > inode->i_size) {
2991 err = btrfs_cont_expand(inode, attr->ia_size);
2992 if (err)
2993 return err;
2994 } else if (inode->i_size > 0 &&
2995 attr->ia_size == 0) {
2996
2997 /* we're truncating a file that used to have good
2998 * data down to zero. Make sure it gets into
2999 * the ordered flush list so that any new writes
3000 * get down to disk quickly.
3001 */
3002 BTRFS_I(inode)->ordered_data_close = 1;
3003 }
3004 }
3005
3006 err = inode_setattr(inode, attr);
3007
3008 if (!err && ((attr->ia_valid & ATTR_MODE)))
3009 err = btrfs_acl_chmod(inode);
3010 return err;
3011 }
3012
3013 void btrfs_delete_inode(struct inode *inode)
3014 {
3015 struct btrfs_trans_handle *trans;
3016 struct btrfs_root *root = BTRFS_I(inode)->root;
3017 unsigned long nr;
3018 int ret;
3019
3020 truncate_inode_pages(&inode->i_data, 0);
3021 if (is_bad_inode(inode)) {
3022 btrfs_orphan_del(NULL, inode);
3023 goto no_delete;
3024 }
3025 btrfs_wait_ordered_range(inode, 0, (u64)-1);
3026
3027 btrfs_i_size_write(inode, 0);
3028 trans = btrfs_join_transaction(root, 1);
3029
3030 btrfs_set_trans_block_group(trans, inode);
3031 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
3032 if (ret) {
3033 btrfs_orphan_del(NULL, inode);
3034 goto no_delete_lock;
3035 }
3036
3037 btrfs_orphan_del(trans, inode);
3038
3039 nr = trans->blocks_used;
3040 clear_inode(inode);
3041
3042 btrfs_end_transaction(trans, root);
3043 btrfs_btree_balance_dirty(root, nr);
3044 return;
3045
3046 no_delete_lock:
3047 nr = trans->blocks_used;
3048 btrfs_end_transaction(trans, root);
3049 btrfs_btree_balance_dirty(root, nr);
3050 no_delete:
3051 clear_inode(inode);
3052 }
3053
3054 /*
3055 * this returns the key found in the dir entry in the location pointer.
3056 * If no dir entries were found, location->objectid is 0.
3057 */
3058 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3059 struct btrfs_key *location)
3060 {
3061 const char *name = dentry->d_name.name;
3062 int namelen = dentry->d_name.len;
3063 struct btrfs_dir_item *di;
3064 struct btrfs_path *path;
3065 struct btrfs_root *root = BTRFS_I(dir)->root;
3066 int ret = 0;
3067
3068 path = btrfs_alloc_path();
3069 BUG_ON(!path);
3070
3071 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3072 namelen, 0);
3073 if (IS_ERR(di))
3074 ret = PTR_ERR(di);
3075
3076 if (!di || IS_ERR(di))
3077 goto out_err;
3078
3079 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3080 out:
3081 btrfs_free_path(path);
3082 return ret;
3083 out_err:
3084 location->objectid = 0;
3085 goto out;
3086 }
3087
3088 /*
3089 * when we hit a tree root in a directory, the btrfs part of the inode
3090 * needs to be changed to reflect the root directory of the tree root. This
3091 * is kind of like crossing a mount point.
3092 */
3093 static int fixup_tree_root_location(struct btrfs_root *root,
3094 struct btrfs_key *location,
3095 struct btrfs_root **sub_root,
3096 struct dentry *dentry)
3097 {
3098 struct btrfs_root_item *ri;
3099
3100 if (btrfs_key_type(location) != BTRFS_ROOT_ITEM_KEY)
3101 return 0;
3102 if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
3103 return 0;
3104
3105 *sub_root = btrfs_read_fs_root(root->fs_info, location,
3106 dentry->d_name.name,
3107 dentry->d_name.len);
3108 if (IS_ERR(*sub_root))
3109 return PTR_ERR(*sub_root);
3110
3111 ri = &(*sub_root)->root_item;
3112 location->objectid = btrfs_root_dirid(ri);
3113 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3114 location->offset = 0;
3115
3116 return 0;
3117 }
3118
3119 static void inode_tree_add(struct inode *inode)
3120 {
3121 struct btrfs_root *root = BTRFS_I(inode)->root;
3122 struct btrfs_inode *entry;
3123 struct rb_node **p;
3124 struct rb_node *parent;
3125
3126 again:
3127 p = &root->inode_tree.rb_node;
3128 parent = NULL;
3129
3130 spin_lock(&root->inode_lock);
3131 while (*p) {
3132 parent = *p;
3133 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3134
3135 if (inode->i_ino < entry->vfs_inode.i_ino)
3136 p = &parent->rb_left;
3137 else if (inode->i_ino > entry->vfs_inode.i_ino)
3138 p = &parent->rb_right;
3139 else {
3140 WARN_ON(!(entry->vfs_inode.i_state &
3141 (I_WILL_FREE | I_FREEING | I_CLEAR)));
3142 rb_erase(parent, &root->inode_tree);
3143 RB_CLEAR_NODE(parent);
3144 spin_unlock(&root->inode_lock);
3145 goto again;
3146 }
3147 }
3148 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3149 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3150 spin_unlock(&root->inode_lock);
3151 }
3152
3153 static void inode_tree_del(struct inode *inode)
3154 {
3155 struct btrfs_root *root = BTRFS_I(inode)->root;
3156
3157 spin_lock(&root->inode_lock);
3158 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3159 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3160 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3161 }
3162 spin_unlock(&root->inode_lock);
3163 }
3164
3165 static noinline void init_btrfs_i(struct inode *inode)
3166 {
3167 struct btrfs_inode *bi = BTRFS_I(inode);
3168
3169 bi->generation = 0;
3170 bi->sequence = 0;
3171 bi->last_trans = 0;
3172 bi->logged_trans = 0;
3173 bi->delalloc_bytes = 0;
3174 bi->reserved_bytes = 0;
3175 bi->disk_i_size = 0;
3176 bi->flags = 0;
3177 bi->index_cnt = (u64)-1;
3178 bi->last_unlink_trans = 0;
3179 bi->ordered_data_close = 0;
3180 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3181 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3182 inode->i_mapping, GFP_NOFS);
3183 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3184 inode->i_mapping, GFP_NOFS);
3185 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3186 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3187 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3188 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3189 mutex_init(&BTRFS_I(inode)->extent_mutex);
3190 mutex_init(&BTRFS_I(inode)->log_mutex);
3191 }
3192
3193 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3194 {
3195 struct btrfs_iget_args *args = p;
3196 inode->i_ino = args->ino;
3197 init_btrfs_i(inode);
3198 BTRFS_I(inode)->root = args->root;
3199 btrfs_set_inode_space_info(args->root, inode);
3200 return 0;
3201 }
3202
3203 static int btrfs_find_actor(struct inode *inode, void *opaque)
3204 {
3205 struct btrfs_iget_args *args = opaque;
3206 return args->ino == inode->i_ino &&
3207 args->root == BTRFS_I(inode)->root;
3208 }
3209
3210 static struct inode *btrfs_iget_locked(struct super_block *s,
3211 u64 objectid,
3212 struct btrfs_root *root)
3213 {
3214 struct inode *inode;
3215 struct btrfs_iget_args args;
3216 args.ino = objectid;
3217 args.root = root;
3218
3219 inode = iget5_locked(s, objectid, btrfs_find_actor,
3220 btrfs_init_locked_inode,
3221 (void *)&args);
3222 return inode;
3223 }
3224
3225 /* Get an inode object given its location and corresponding root.
3226 * Returns in *is_new if the inode was read from disk
3227 */
3228 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3229 struct btrfs_root *root)
3230 {
3231 struct inode *inode;
3232
3233 inode = btrfs_iget_locked(s, location->objectid, root);
3234 if (!inode)
3235 return ERR_PTR(-ENOMEM);
3236
3237 if (inode->i_state & I_NEW) {
3238 BTRFS_I(inode)->root = root;
3239 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3240 btrfs_read_locked_inode(inode);
3241
3242 inode_tree_add(inode);
3243 unlock_new_inode(inode);
3244 }
3245
3246 return inode;
3247 }
3248
3249 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3250 {
3251 struct inode *inode;
3252 struct btrfs_inode *bi = BTRFS_I(dir);
3253 struct btrfs_root *root = bi->root;
3254 struct btrfs_root *sub_root = root;
3255 struct btrfs_key location;
3256 int ret;
3257
3258 if (dentry->d_name.len > BTRFS_NAME_LEN)
3259 return ERR_PTR(-ENAMETOOLONG);
3260
3261 ret = btrfs_inode_by_name(dir, dentry, &location);
3262
3263 if (ret < 0)
3264 return ERR_PTR(ret);
3265
3266 inode = NULL;
3267 if (location.objectid) {
3268 ret = fixup_tree_root_location(root, &location, &sub_root,
3269 dentry);
3270 if (ret < 0)
3271 return ERR_PTR(ret);
3272 if (ret > 0)
3273 return ERR_PTR(-ENOENT);
3274 inode = btrfs_iget(dir->i_sb, &location, sub_root);
3275 if (IS_ERR(inode))
3276 return ERR_CAST(inode);
3277 }
3278 return inode;
3279 }
3280
3281 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3282 struct nameidata *nd)
3283 {
3284 struct inode *inode;
3285
3286 if (dentry->d_name.len > BTRFS_NAME_LEN)
3287 return ERR_PTR(-ENAMETOOLONG);
3288
3289 inode = btrfs_lookup_dentry(dir, dentry);
3290 if (IS_ERR(inode))
3291 return ERR_CAST(inode);
3292
3293 return d_splice_alias(inode, dentry);
3294 }
3295
3296 static unsigned char btrfs_filetype_table[] = {
3297 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3298 };
3299
3300 static int btrfs_real_readdir(struct file *filp, void *dirent,
3301 filldir_t filldir)
3302 {
3303 struct inode *inode = filp->f_dentry->d_inode;
3304 struct btrfs_root *root = BTRFS_I(inode)->root;
3305 struct btrfs_item *item;
3306 struct btrfs_dir_item *di;
3307 struct btrfs_key key;
3308 struct btrfs_key found_key;
3309 struct btrfs_path *path;
3310 int ret;
3311 u32 nritems;
3312 struct extent_buffer *leaf;
3313 int slot;
3314 int advance;
3315 unsigned char d_type;
3316 int over = 0;
3317 u32 di_cur;
3318 u32 di_total;
3319 u32 di_len;
3320 int key_type = BTRFS_DIR_INDEX_KEY;
3321 char tmp_name[32];
3322 char *name_ptr;
3323 int name_len;
3324
3325 /* FIXME, use a real flag for deciding about the key type */
3326 if (root->fs_info->tree_root == root)
3327 key_type = BTRFS_DIR_ITEM_KEY;
3328
3329 /* special case for "." */
3330 if (filp->f_pos == 0) {
3331 over = filldir(dirent, ".", 1,
3332 1, inode->i_ino,
3333 DT_DIR);
3334 if (over)
3335 return 0;
3336 filp->f_pos = 1;
3337 }
3338 /* special case for .., just use the back ref */
3339 if (filp->f_pos == 1) {
3340 u64 pino = parent_ino(filp->f_path.dentry);
3341 over = filldir(dirent, "..", 2,
3342 2, pino, DT_DIR);
3343 if (over)
3344 return 0;
3345 filp->f_pos = 2;
3346 }
3347 path = btrfs_alloc_path();
3348 path->reada = 2;
3349
3350 btrfs_set_key_type(&key, key_type);
3351 key.offset = filp->f_pos;
3352 key.objectid = inode->i_ino;
3353
3354 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3355 if (ret < 0)
3356 goto err;
3357 advance = 0;
3358
3359 while (1) {
3360 leaf = path->nodes[0];
3361 nritems = btrfs_header_nritems(leaf);
3362 slot = path->slots[0];
3363 if (advance || slot >= nritems) {
3364 if (slot >= nritems - 1) {
3365 ret = btrfs_next_leaf(root, path);
3366 if (ret)
3367 break;
3368 leaf = path->nodes[0];
3369 nritems = btrfs_header_nritems(leaf);
3370 slot = path->slots[0];
3371 } else {
3372 slot++;
3373 path->slots[0]++;
3374 }
3375 }
3376
3377 advance = 1;
3378 item = btrfs_item_nr(leaf, slot);
3379 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3380
3381 if (found_key.objectid != key.objectid)
3382 break;
3383 if (btrfs_key_type(&found_key) != key_type)
3384 break;
3385 if (found_key.offset < filp->f_pos)
3386 continue;
3387
3388 filp->f_pos = found_key.offset;
3389
3390 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3391 di_cur = 0;
3392 di_total = btrfs_item_size(leaf, item);
3393
3394 while (di_cur < di_total) {
3395 struct btrfs_key location;
3396
3397 name_len = btrfs_dir_name_len(leaf, di);
3398 if (name_len <= sizeof(tmp_name)) {
3399 name_ptr = tmp_name;
3400 } else {
3401 name_ptr = kmalloc(name_len, GFP_NOFS);
3402 if (!name_ptr) {
3403 ret = -ENOMEM;
3404 goto err;
3405 }
3406 }
3407 read_extent_buffer(leaf, name_ptr,
3408 (unsigned long)(di + 1), name_len);
3409
3410 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3411 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3412
3413 /* is this a reference to our own snapshot? If so
3414 * skip it
3415 */
3416 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3417 location.objectid == root->root_key.objectid) {
3418 over = 0;
3419 goto skip;
3420 }
3421 over = filldir(dirent, name_ptr, name_len,
3422 found_key.offset, location.objectid,
3423 d_type);
3424
3425 skip:
3426 if (name_ptr != tmp_name)
3427 kfree(name_ptr);
3428
3429 if (over)
3430 goto nopos;
3431 di_len = btrfs_dir_name_len(leaf, di) +
3432 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3433 di_cur += di_len;
3434 di = (struct btrfs_dir_item *)((char *)di + di_len);
3435 }
3436 }
3437
3438 /* Reached end of directory/root. Bump pos past the last item. */
3439 if (key_type == BTRFS_DIR_INDEX_KEY)
3440 filp->f_pos = INT_LIMIT(off_t);
3441 else
3442 filp->f_pos++;
3443 nopos:
3444 ret = 0;
3445 err:
3446 btrfs_free_path(path);
3447 return ret;
3448 }
3449
3450 int btrfs_write_inode(struct inode *inode, int wait)
3451 {
3452 struct btrfs_root *root = BTRFS_I(inode)->root;
3453 struct btrfs_trans_handle *trans;
3454 int ret = 0;
3455
3456 if (root->fs_info->btree_inode == inode)
3457 return 0;
3458
3459 if (wait) {
3460 trans = btrfs_join_transaction(root, 1);
3461 btrfs_set_trans_block_group(trans, inode);
3462 ret = btrfs_commit_transaction(trans, root);
3463 }
3464 return ret;
3465 }
3466
3467 /*
3468 * This is somewhat expensive, updating the tree every time the
3469 * inode changes. But, it is most likely to find the inode in cache.
3470 * FIXME, needs more benchmarking...there are no reasons other than performance
3471 * to keep or drop this code.
3472 */
3473 void btrfs_dirty_inode(struct inode *inode)
3474 {
3475 struct btrfs_root *root = BTRFS_I(inode)->root;
3476 struct btrfs_trans_handle *trans;
3477
3478 trans = btrfs_join_transaction(root, 1);
3479 btrfs_set_trans_block_group(trans, inode);
3480 btrfs_update_inode(trans, root, inode);
3481 btrfs_end_transaction(trans, root);
3482 }
3483
3484 /*
3485 * find the highest existing sequence number in a directory
3486 * and then set the in-memory index_cnt variable to reflect
3487 * free sequence numbers
3488 */
3489 static int btrfs_set_inode_index_count(struct inode *inode)
3490 {
3491 struct btrfs_root *root = BTRFS_I(inode)->root;
3492 struct btrfs_key key, found_key;
3493 struct btrfs_path *path;
3494 struct extent_buffer *leaf;
3495 int ret;
3496
3497 key.objectid = inode->i_ino;
3498 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3499 key.offset = (u64)-1;
3500
3501 path = btrfs_alloc_path();
3502 if (!path)
3503 return -ENOMEM;
3504
3505 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3506 if (ret < 0)
3507 goto out;
3508 /* FIXME: we should be able to handle this */
3509 if (ret == 0)
3510 goto out;
3511 ret = 0;
3512
3513 /*
3514 * MAGIC NUMBER EXPLANATION:
3515 * since we search a directory based on f_pos we have to start at 2
3516 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3517 * else has to start at 2
3518 */
3519 if (path->slots[0] == 0) {
3520 BTRFS_I(inode)->index_cnt = 2;
3521 goto out;
3522 }
3523
3524 path->slots[0]--;
3525
3526 leaf = path->nodes[0];
3527 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3528
3529 if (found_key.objectid != inode->i_ino ||
3530 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3531 BTRFS_I(inode)->index_cnt = 2;
3532 goto out;
3533 }
3534
3535 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3536 out:
3537 btrfs_free_path(path);
3538 return ret;
3539 }
3540
3541 /*
3542 * helper to find a free sequence number in a given directory. This current
3543 * code is very simple, later versions will do smarter things in the btree
3544 */
3545 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3546 {
3547 int ret = 0;
3548
3549 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3550 ret = btrfs_set_inode_index_count(dir);
3551 if (ret)
3552 return ret;
3553 }
3554
3555 *index = BTRFS_I(dir)->index_cnt;
3556 BTRFS_I(dir)->index_cnt++;
3557
3558 return ret;
3559 }
3560
3561 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3562 struct btrfs_root *root,
3563 struct inode *dir,
3564 const char *name, int name_len,
3565 u64 ref_objectid, u64 objectid,
3566 u64 alloc_hint, int mode, u64 *index)
3567 {
3568 struct inode *inode;
3569 struct btrfs_inode_item *inode_item;
3570 struct btrfs_key *location;
3571 struct btrfs_path *path;
3572 struct btrfs_inode_ref *ref;
3573 struct btrfs_key key[2];
3574 u32 sizes[2];
3575 unsigned long ptr;
3576 int ret;
3577 int owner;
3578
3579 path = btrfs_alloc_path();
3580 BUG_ON(!path);
3581
3582 inode = new_inode(root->fs_info->sb);
3583 if (!inode)
3584 return ERR_PTR(-ENOMEM);
3585
3586 if (dir) {
3587 ret = btrfs_set_inode_index(dir, index);
3588 if (ret) {
3589 iput(inode);
3590 return ERR_PTR(ret);
3591 }
3592 }
3593 /*
3594 * index_cnt is ignored for everything but a dir,
3595 * btrfs_get_inode_index_count has an explanation for the magic
3596 * number
3597 */
3598 init_btrfs_i(inode);
3599 BTRFS_I(inode)->index_cnt = 2;
3600 BTRFS_I(inode)->root = root;
3601 BTRFS_I(inode)->generation = trans->transid;
3602 btrfs_set_inode_space_info(root, inode);
3603
3604 if (mode & S_IFDIR)
3605 owner = 0;
3606 else
3607 owner = 1;
3608 BTRFS_I(inode)->block_group =
3609 btrfs_find_block_group(root, 0, alloc_hint, owner);
3610
3611 key[0].objectid = objectid;
3612 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3613 key[0].offset = 0;
3614
3615 key[1].objectid = objectid;
3616 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3617 key[1].offset = ref_objectid;
3618
3619 sizes[0] = sizeof(struct btrfs_inode_item);
3620 sizes[1] = name_len + sizeof(*ref);
3621
3622 path->leave_spinning = 1;
3623 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3624 if (ret != 0)
3625 goto fail;
3626
3627 inode->i_uid = current_fsuid();
3628
3629 if (dir && (dir->i_mode & S_ISGID)) {
3630 inode->i_gid = dir->i_gid;
3631 if (S_ISDIR(mode))
3632 mode |= S_ISGID;
3633 } else
3634 inode->i_gid = current_fsgid();
3635
3636 inode->i_mode = mode;
3637 inode->i_ino = objectid;
3638 inode_set_bytes(inode, 0);
3639 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3640 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3641 struct btrfs_inode_item);
3642 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3643
3644 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3645 struct btrfs_inode_ref);
3646 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3647 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3648 ptr = (unsigned long)(ref + 1);
3649 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3650
3651 btrfs_mark_buffer_dirty(path->nodes[0]);
3652 btrfs_free_path(path);
3653
3654 location = &BTRFS_I(inode)->location;
3655 location->objectid = objectid;
3656 location->offset = 0;
3657 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
3658
3659 btrfs_inherit_iflags(inode, dir);
3660
3661 if ((mode & S_IFREG)) {
3662 if (btrfs_test_opt(root, NODATASUM))
3663 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
3664 if (btrfs_test_opt(root, NODATACOW))
3665 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
3666 }
3667
3668 insert_inode_hash(inode);
3669 inode_tree_add(inode);
3670 return inode;
3671 fail:
3672 if (dir)
3673 BTRFS_I(dir)->index_cnt--;
3674 btrfs_free_path(path);
3675 iput(inode);
3676 return ERR_PTR(ret);
3677 }
3678
3679 static inline u8 btrfs_inode_type(struct inode *inode)
3680 {
3681 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
3682 }
3683
3684 /*
3685 * utility function to add 'inode' into 'parent_inode' with
3686 * a give name and a given sequence number.
3687 * if 'add_backref' is true, also insert a backref from the
3688 * inode to the parent directory.
3689 */
3690 int btrfs_add_link(struct btrfs_trans_handle *trans,
3691 struct inode *parent_inode, struct inode *inode,
3692 const char *name, int name_len, int add_backref, u64 index)
3693 {
3694 int ret;
3695 struct btrfs_key key;
3696 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
3697
3698 key.objectid = inode->i_ino;
3699 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
3700 key.offset = 0;
3701
3702 ret = btrfs_insert_dir_item(trans, root, name, name_len,
3703 parent_inode->i_ino,
3704 &key, btrfs_inode_type(inode),
3705 index);
3706 if (ret == 0) {
3707 if (add_backref) {
3708 ret = btrfs_insert_inode_ref(trans, root,
3709 name, name_len,
3710 inode->i_ino,
3711 parent_inode->i_ino,
3712 index);
3713 }
3714 btrfs_i_size_write(parent_inode, parent_inode->i_size +
3715 name_len * 2);
3716 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
3717 ret = btrfs_update_inode(trans, root, parent_inode);
3718 }
3719 return ret;
3720 }
3721
3722 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
3723 struct dentry *dentry, struct inode *inode,
3724 int backref, u64 index)
3725 {
3726 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3727 inode, dentry->d_name.name,
3728 dentry->d_name.len, backref, index);
3729 if (!err) {
3730 d_instantiate(dentry, inode);
3731 return 0;
3732 }
3733 if (err > 0)
3734 err = -EEXIST;
3735 return err;
3736 }
3737
3738 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
3739 int mode, dev_t rdev)
3740 {
3741 struct btrfs_trans_handle *trans;
3742 struct btrfs_root *root = BTRFS_I(dir)->root;
3743 struct inode *inode = NULL;
3744 int err;
3745 int drop_inode = 0;
3746 u64 objectid;
3747 unsigned long nr = 0;
3748 u64 index = 0;
3749
3750 if (!new_valid_dev(rdev))
3751 return -EINVAL;
3752
3753 err = btrfs_check_metadata_free_space(root);
3754 if (err)
3755 goto fail;
3756
3757 trans = btrfs_start_transaction(root, 1);
3758 btrfs_set_trans_block_group(trans, dir);
3759
3760 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3761 if (err) {
3762 err = -ENOSPC;
3763 goto out_unlock;
3764 }
3765
3766 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3767 dentry->d_name.len,
3768 dentry->d_parent->d_inode->i_ino, objectid,
3769 BTRFS_I(dir)->block_group, mode, &index);
3770 err = PTR_ERR(inode);
3771 if (IS_ERR(inode))
3772 goto out_unlock;
3773
3774 err = btrfs_init_inode_security(inode, dir);
3775 if (err) {
3776 drop_inode = 1;
3777 goto out_unlock;
3778 }
3779
3780 btrfs_set_trans_block_group(trans, inode);
3781 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3782 if (err)
3783 drop_inode = 1;
3784 else {
3785 inode->i_op = &btrfs_special_inode_operations;
3786 init_special_inode(inode, inode->i_mode, rdev);
3787 btrfs_update_inode(trans, root, inode);
3788 }
3789 btrfs_update_inode_block_group(trans, inode);
3790 btrfs_update_inode_block_group(trans, dir);
3791 out_unlock:
3792 nr = trans->blocks_used;
3793 btrfs_end_transaction_throttle(trans, root);
3794 fail:
3795 if (drop_inode) {
3796 inode_dec_link_count(inode);
3797 iput(inode);
3798 }
3799 btrfs_btree_balance_dirty(root, nr);
3800 return err;
3801 }
3802
3803 static int btrfs_create(struct inode *dir, struct dentry *dentry,
3804 int mode, struct nameidata *nd)
3805 {
3806 struct btrfs_trans_handle *trans;
3807 struct btrfs_root *root = BTRFS_I(dir)->root;
3808 struct inode *inode = NULL;
3809 int err;
3810 int drop_inode = 0;
3811 unsigned long nr = 0;
3812 u64 objectid;
3813 u64 index = 0;
3814
3815 err = btrfs_check_metadata_free_space(root);
3816 if (err)
3817 goto fail;
3818 trans = btrfs_start_transaction(root, 1);
3819 btrfs_set_trans_block_group(trans, dir);
3820
3821 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3822 if (err) {
3823 err = -ENOSPC;
3824 goto out_unlock;
3825 }
3826
3827 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3828 dentry->d_name.len,
3829 dentry->d_parent->d_inode->i_ino,
3830 objectid, BTRFS_I(dir)->block_group, mode,
3831 &index);
3832 err = PTR_ERR(inode);
3833 if (IS_ERR(inode))
3834 goto out_unlock;
3835
3836 err = btrfs_init_inode_security(inode, dir);
3837 if (err) {
3838 drop_inode = 1;
3839 goto out_unlock;
3840 }
3841
3842 btrfs_set_trans_block_group(trans, inode);
3843 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
3844 if (err)
3845 drop_inode = 1;
3846 else {
3847 inode->i_mapping->a_ops = &btrfs_aops;
3848 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
3849 inode->i_fop = &btrfs_file_operations;
3850 inode->i_op = &btrfs_file_inode_operations;
3851 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
3852 }
3853 btrfs_update_inode_block_group(trans, inode);
3854 btrfs_update_inode_block_group(trans, dir);
3855 out_unlock:
3856 nr = trans->blocks_used;
3857 btrfs_end_transaction_throttle(trans, root);
3858 fail:
3859 if (drop_inode) {
3860 inode_dec_link_count(inode);
3861 iput(inode);
3862 }
3863 btrfs_btree_balance_dirty(root, nr);
3864 return err;
3865 }
3866
3867 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
3868 struct dentry *dentry)
3869 {
3870 struct btrfs_trans_handle *trans;
3871 struct btrfs_root *root = BTRFS_I(dir)->root;
3872 struct inode *inode = old_dentry->d_inode;
3873 u64 index;
3874 unsigned long nr = 0;
3875 int err;
3876 int drop_inode = 0;
3877
3878 if (inode->i_nlink == 0)
3879 return -ENOENT;
3880
3881 btrfs_inc_nlink(inode);
3882 err = btrfs_check_metadata_free_space(root);
3883 if (err)
3884 goto fail;
3885 err = btrfs_set_inode_index(dir, &index);
3886 if (err)
3887 goto fail;
3888
3889 trans = btrfs_start_transaction(root, 1);
3890
3891 btrfs_set_trans_block_group(trans, dir);
3892 atomic_inc(&inode->i_count);
3893
3894 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
3895
3896 if (err)
3897 drop_inode = 1;
3898
3899 btrfs_update_inode_block_group(trans, dir);
3900 err = btrfs_update_inode(trans, root, inode);
3901
3902 if (err)
3903 drop_inode = 1;
3904
3905 nr = trans->blocks_used;
3906
3907 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
3908 btrfs_end_transaction_throttle(trans, root);
3909 fail:
3910 if (drop_inode) {
3911 inode_dec_link_count(inode);
3912 iput(inode);
3913 }
3914 btrfs_btree_balance_dirty(root, nr);
3915 return err;
3916 }
3917
3918 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
3919 {
3920 struct inode *inode = NULL;
3921 struct btrfs_trans_handle *trans;
3922 struct btrfs_root *root = BTRFS_I(dir)->root;
3923 int err = 0;
3924 int drop_on_err = 0;
3925 u64 objectid = 0;
3926 u64 index = 0;
3927 unsigned long nr = 1;
3928
3929 err = btrfs_check_metadata_free_space(root);
3930 if (err)
3931 goto out_unlock;
3932
3933 trans = btrfs_start_transaction(root, 1);
3934 btrfs_set_trans_block_group(trans, dir);
3935
3936 if (IS_ERR(trans)) {
3937 err = PTR_ERR(trans);
3938 goto out_unlock;
3939 }
3940
3941 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
3942 if (err) {
3943 err = -ENOSPC;
3944 goto out_unlock;
3945 }
3946
3947 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
3948 dentry->d_name.len,
3949 dentry->d_parent->d_inode->i_ino, objectid,
3950 BTRFS_I(dir)->block_group, S_IFDIR | mode,
3951 &index);
3952 if (IS_ERR(inode)) {
3953 err = PTR_ERR(inode);
3954 goto out_fail;
3955 }
3956
3957 drop_on_err = 1;
3958
3959 err = btrfs_init_inode_security(inode, dir);
3960 if (err)
3961 goto out_fail;
3962
3963 inode->i_op = &btrfs_dir_inode_operations;
3964 inode->i_fop = &btrfs_dir_file_operations;
3965 btrfs_set_trans_block_group(trans, inode);
3966
3967 btrfs_i_size_write(inode, 0);
3968 err = btrfs_update_inode(trans, root, inode);
3969 if (err)
3970 goto out_fail;
3971
3972 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
3973 inode, dentry->d_name.name,
3974 dentry->d_name.len, 0, index);
3975 if (err)
3976 goto out_fail;
3977
3978 d_instantiate(dentry, inode);
3979 drop_on_err = 0;
3980 btrfs_update_inode_block_group(trans, inode);
3981 btrfs_update_inode_block_group(trans, dir);
3982
3983 out_fail:
3984 nr = trans->blocks_used;
3985 btrfs_end_transaction_throttle(trans, root);
3986
3987 out_unlock:
3988 if (drop_on_err)
3989 iput(inode);
3990 btrfs_btree_balance_dirty(root, nr);
3991 return err;
3992 }
3993
3994 /* helper for btfs_get_extent. Given an existing extent in the tree,
3995 * and an extent that you want to insert, deal with overlap and insert
3996 * the new extent into the tree.
3997 */
3998 static int merge_extent_mapping(struct extent_map_tree *em_tree,
3999 struct extent_map *existing,
4000 struct extent_map *em,
4001 u64 map_start, u64 map_len)
4002 {
4003 u64 start_diff;
4004
4005 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4006 start_diff = map_start - em->start;
4007 em->start = map_start;
4008 em->len = map_len;
4009 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4010 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4011 em->block_start += start_diff;
4012 em->block_len -= start_diff;
4013 }
4014 return add_extent_mapping(em_tree, em);
4015 }
4016
4017 static noinline int uncompress_inline(struct btrfs_path *path,
4018 struct inode *inode, struct page *page,
4019 size_t pg_offset, u64 extent_offset,
4020 struct btrfs_file_extent_item *item)
4021 {
4022 int ret;
4023 struct extent_buffer *leaf = path->nodes[0];
4024 char *tmp;
4025 size_t max_size;
4026 unsigned long inline_size;
4027 unsigned long ptr;
4028
4029 WARN_ON(pg_offset != 0);
4030 max_size = btrfs_file_extent_ram_bytes(leaf, item);
4031 inline_size = btrfs_file_extent_inline_item_len(leaf,
4032 btrfs_item_nr(leaf, path->slots[0]));
4033 tmp = kmalloc(inline_size, GFP_NOFS);
4034 ptr = btrfs_file_extent_inline_start(item);
4035
4036 read_extent_buffer(leaf, tmp, ptr, inline_size);
4037
4038 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4039 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4040 inline_size, max_size);
4041 if (ret) {
4042 char *kaddr = kmap_atomic(page, KM_USER0);
4043 unsigned long copy_size = min_t(u64,
4044 PAGE_CACHE_SIZE - pg_offset,
4045 max_size - extent_offset);
4046 memset(kaddr + pg_offset, 0, copy_size);
4047 kunmap_atomic(kaddr, KM_USER0);
4048 }
4049 kfree(tmp);
4050 return 0;
4051 }
4052
4053 /*
4054 * a bit scary, this does extent mapping from logical file offset to the disk.
4055 * the ugly parts come from merging extents from the disk with the in-ram
4056 * representation. This gets more complex because of the data=ordered code,
4057 * where the in-ram extents might be locked pending data=ordered completion.
4058 *
4059 * This also copies inline extents directly into the page.
4060 */
4061
4062 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4063 size_t pg_offset, u64 start, u64 len,
4064 int create)
4065 {
4066 int ret;
4067 int err = 0;
4068 u64 bytenr;
4069 u64 extent_start = 0;
4070 u64 extent_end = 0;
4071 u64 objectid = inode->i_ino;
4072 u32 found_type;
4073 struct btrfs_path *path = NULL;
4074 struct btrfs_root *root = BTRFS_I(inode)->root;
4075 struct btrfs_file_extent_item *item;
4076 struct extent_buffer *leaf;
4077 struct btrfs_key found_key;
4078 struct extent_map *em = NULL;
4079 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4080 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4081 struct btrfs_trans_handle *trans = NULL;
4082 int compressed;
4083
4084 again:
4085 read_lock(&em_tree->lock);
4086 em = lookup_extent_mapping(em_tree, start, len);
4087 if (em)
4088 em->bdev = root->fs_info->fs_devices->latest_bdev;
4089 read_unlock(&em_tree->lock);
4090
4091 if (em) {
4092 if (em->start > start || em->start + em->len <= start)
4093 free_extent_map(em);
4094 else if (em->block_start == EXTENT_MAP_INLINE && page)
4095 free_extent_map(em);
4096 else
4097 goto out;
4098 }
4099 em = alloc_extent_map(GFP_NOFS);
4100 if (!em) {
4101 err = -ENOMEM;
4102 goto out;
4103 }
4104 em->bdev = root->fs_info->fs_devices->latest_bdev;
4105 em->start = EXTENT_MAP_HOLE;
4106 em->orig_start = EXTENT_MAP_HOLE;
4107 em->len = (u64)-1;
4108 em->block_len = (u64)-1;
4109
4110 if (!path) {
4111 path = btrfs_alloc_path();
4112 BUG_ON(!path);
4113 }
4114
4115 ret = btrfs_lookup_file_extent(trans, root, path,
4116 objectid, start, trans != NULL);
4117 if (ret < 0) {
4118 err = ret;
4119 goto out;
4120 }
4121
4122 if (ret != 0) {
4123 if (path->slots[0] == 0)
4124 goto not_found;
4125 path->slots[0]--;
4126 }
4127
4128 leaf = path->nodes[0];
4129 item = btrfs_item_ptr(leaf, path->slots[0],
4130 struct btrfs_file_extent_item);
4131 /* are we inside the extent that was found? */
4132 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4133 found_type = btrfs_key_type(&found_key);
4134 if (found_key.objectid != objectid ||
4135 found_type != BTRFS_EXTENT_DATA_KEY) {
4136 goto not_found;
4137 }
4138
4139 found_type = btrfs_file_extent_type(leaf, item);
4140 extent_start = found_key.offset;
4141 compressed = btrfs_file_extent_compression(leaf, item);
4142 if (found_type == BTRFS_FILE_EXTENT_REG ||
4143 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4144 extent_end = extent_start +
4145 btrfs_file_extent_num_bytes(leaf, item);
4146 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4147 size_t size;
4148 size = btrfs_file_extent_inline_len(leaf, item);
4149 extent_end = (extent_start + size + root->sectorsize - 1) &
4150 ~((u64)root->sectorsize - 1);
4151 }
4152
4153 if (start >= extent_end) {
4154 path->slots[0]++;
4155 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4156 ret = btrfs_next_leaf(root, path);
4157 if (ret < 0) {
4158 err = ret;
4159 goto out;
4160 }
4161 if (ret > 0)
4162 goto not_found;
4163 leaf = path->nodes[0];
4164 }
4165 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4166 if (found_key.objectid != objectid ||
4167 found_key.type != BTRFS_EXTENT_DATA_KEY)
4168 goto not_found;
4169 if (start + len <= found_key.offset)
4170 goto not_found;
4171 em->start = start;
4172 em->len = found_key.offset - start;
4173 goto not_found_em;
4174 }
4175
4176 if (found_type == BTRFS_FILE_EXTENT_REG ||
4177 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4178 em->start = extent_start;
4179 em->len = extent_end - extent_start;
4180 em->orig_start = extent_start -
4181 btrfs_file_extent_offset(leaf, item);
4182 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4183 if (bytenr == 0) {
4184 em->block_start = EXTENT_MAP_HOLE;
4185 goto insert;
4186 }
4187 if (compressed) {
4188 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4189 em->block_start = bytenr;
4190 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4191 item);
4192 } else {
4193 bytenr += btrfs_file_extent_offset(leaf, item);
4194 em->block_start = bytenr;
4195 em->block_len = em->len;
4196 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4197 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4198 }
4199 goto insert;
4200 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4201 unsigned long ptr;
4202 char *map;
4203 size_t size;
4204 size_t extent_offset;
4205 size_t copy_size;
4206
4207 em->block_start = EXTENT_MAP_INLINE;
4208 if (!page || create) {
4209 em->start = extent_start;
4210 em->len = extent_end - extent_start;
4211 goto out;
4212 }
4213
4214 size = btrfs_file_extent_inline_len(leaf, item);
4215 extent_offset = page_offset(page) + pg_offset - extent_start;
4216 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4217 size - extent_offset);
4218 em->start = extent_start + extent_offset;
4219 em->len = (copy_size + root->sectorsize - 1) &
4220 ~((u64)root->sectorsize - 1);
4221 em->orig_start = EXTENT_MAP_INLINE;
4222 if (compressed)
4223 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4224 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4225 if (create == 0 && !PageUptodate(page)) {
4226 if (btrfs_file_extent_compression(leaf, item) ==
4227 BTRFS_COMPRESS_ZLIB) {
4228 ret = uncompress_inline(path, inode, page,
4229 pg_offset,
4230 extent_offset, item);
4231 BUG_ON(ret);
4232 } else {
4233 map = kmap(page);
4234 read_extent_buffer(leaf, map + pg_offset, ptr,
4235 copy_size);
4236 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
4237 memset(map + pg_offset + copy_size, 0,
4238 PAGE_CACHE_SIZE - pg_offset -
4239 copy_size);
4240 }
4241 kunmap(page);
4242 }
4243 flush_dcache_page(page);
4244 } else if (create && PageUptodate(page)) {
4245 if (!trans) {
4246 kunmap(page);
4247 free_extent_map(em);
4248 em = NULL;
4249 btrfs_release_path(root, path);
4250 trans = btrfs_join_transaction(root, 1);
4251 goto again;
4252 }
4253 map = kmap(page);
4254 write_extent_buffer(leaf, map + pg_offset, ptr,
4255 copy_size);
4256 kunmap(page);
4257 btrfs_mark_buffer_dirty(leaf);
4258 }
4259 set_extent_uptodate(io_tree, em->start,
4260 extent_map_end(em) - 1, GFP_NOFS);
4261 goto insert;
4262 } else {
4263 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4264 WARN_ON(1);
4265 }
4266 not_found:
4267 em->start = start;
4268 em->len = len;
4269 not_found_em:
4270 em->block_start = EXTENT_MAP_HOLE;
4271 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4272 insert:
4273 btrfs_release_path(root, path);
4274 if (em->start > start || extent_map_end(em) <= start) {
4275 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4276 "[%llu %llu]\n", (unsigned long long)em->start,
4277 (unsigned long long)em->len,
4278 (unsigned long long)start,
4279 (unsigned long long)len);
4280 err = -EIO;
4281 goto out;
4282 }
4283
4284 err = 0;
4285 write_lock(&em_tree->lock);
4286 ret = add_extent_mapping(em_tree, em);
4287 /* it is possible that someone inserted the extent into the tree
4288 * while we had the lock dropped. It is also possible that
4289 * an overlapping map exists in the tree
4290 */
4291 if (ret == -EEXIST) {
4292 struct extent_map *existing;
4293
4294 ret = 0;
4295
4296 existing = lookup_extent_mapping(em_tree, start, len);
4297 if (existing && (existing->start > start ||
4298 existing->start + existing->len <= start)) {
4299 free_extent_map(existing);
4300 existing = NULL;
4301 }
4302 if (!existing) {
4303 existing = lookup_extent_mapping(em_tree, em->start,
4304 em->len);
4305 if (existing) {
4306 err = merge_extent_mapping(em_tree, existing,
4307 em, start,
4308 root->sectorsize);
4309 free_extent_map(existing);
4310 if (err) {
4311 free_extent_map(em);
4312 em = NULL;
4313 }
4314 } else {
4315 err = -EIO;
4316 free_extent_map(em);
4317 em = NULL;
4318 }
4319 } else {
4320 free_extent_map(em);
4321 em = existing;
4322 err = 0;
4323 }
4324 }
4325 write_unlock(&em_tree->lock);
4326 out:
4327 if (path)
4328 btrfs_free_path(path);
4329 if (trans) {
4330 ret = btrfs_end_transaction(trans, root);
4331 if (!err)
4332 err = ret;
4333 }
4334 if (err) {
4335 free_extent_map(em);
4336 return ERR_PTR(err);
4337 }
4338 return em;
4339 }
4340
4341 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4342 const struct iovec *iov, loff_t offset,
4343 unsigned long nr_segs)
4344 {
4345 return -EINVAL;
4346 }
4347
4348 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4349 __u64 start, __u64 len)
4350 {
4351 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4352 }
4353
4354 int btrfs_readpage(struct file *file, struct page *page)
4355 {
4356 struct extent_io_tree *tree;
4357 tree = &BTRFS_I(page->mapping->host)->io_tree;
4358 return extent_read_full_page(tree, page, btrfs_get_extent);
4359 }
4360
4361 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4362 {
4363 struct extent_io_tree *tree;
4364
4365
4366 if (current->flags & PF_MEMALLOC) {
4367 redirty_page_for_writepage(wbc, page);
4368 unlock_page(page);
4369 return 0;
4370 }
4371 tree = &BTRFS_I(page->mapping->host)->io_tree;
4372 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4373 }
4374
4375 int btrfs_writepages(struct address_space *mapping,
4376 struct writeback_control *wbc)
4377 {
4378 struct extent_io_tree *tree;
4379
4380 tree = &BTRFS_I(mapping->host)->io_tree;
4381 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4382 }
4383
4384 static int
4385 btrfs_readpages(struct file *file, struct address_space *mapping,
4386 struct list_head *pages, unsigned nr_pages)
4387 {
4388 struct extent_io_tree *tree;
4389 tree = &BTRFS_I(mapping->host)->io_tree;
4390 return extent_readpages(tree, mapping, pages, nr_pages,
4391 btrfs_get_extent);
4392 }
4393 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4394 {
4395 struct extent_io_tree *tree;
4396 struct extent_map_tree *map;
4397 int ret;
4398
4399 tree = &BTRFS_I(page->mapping->host)->io_tree;
4400 map = &BTRFS_I(page->mapping->host)->extent_tree;
4401 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4402 if (ret == 1) {
4403 ClearPagePrivate(page);
4404 set_page_private(page, 0);
4405 page_cache_release(page);
4406 }
4407 return ret;
4408 }
4409
4410 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4411 {
4412 if (PageWriteback(page) || PageDirty(page))
4413 return 0;
4414 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4415 }
4416
4417 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4418 {
4419 struct extent_io_tree *tree;
4420 struct btrfs_ordered_extent *ordered;
4421 u64 page_start = page_offset(page);
4422 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4423
4424
4425 /*
4426 * we have the page locked, so new writeback can't start,
4427 * and the dirty bit won't be cleared while we are here.
4428 *
4429 * Wait for IO on this page so that we can safely clear
4430 * the PagePrivate2 bit and do ordered accounting
4431 */
4432 wait_on_page_writeback(page);
4433
4434 tree = &BTRFS_I(page->mapping->host)->io_tree;
4435 if (offset) {
4436 btrfs_releasepage(page, GFP_NOFS);
4437 return;
4438 }
4439 lock_extent(tree, page_start, page_end, GFP_NOFS);
4440 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4441 page_offset(page));
4442 if (ordered) {
4443 /*
4444 * IO on this page will never be started, so we need
4445 * to account for any ordered extents now
4446 */
4447 clear_extent_bit(tree, page_start, page_end,
4448 EXTENT_DIRTY | EXTENT_DELALLOC |
4449 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
4450 /*
4451 * whoever cleared the private bit is responsible
4452 * for the finish_ordered_io
4453 */
4454 if (TestClearPagePrivate2(page)) {
4455 btrfs_finish_ordered_io(page->mapping->host,
4456 page_start, page_end);
4457 }
4458 btrfs_put_ordered_extent(ordered);
4459 lock_extent(tree, page_start, page_end, GFP_NOFS);
4460 }
4461 clear_extent_bit(tree, page_start, page_end,
4462 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
4463 1, 1, NULL, GFP_NOFS);
4464 __btrfs_releasepage(page, GFP_NOFS);
4465
4466 ClearPageChecked(page);
4467 if (PagePrivate(page)) {
4468 ClearPagePrivate(page);
4469 set_page_private(page, 0);
4470 page_cache_release(page);
4471 }
4472 }
4473
4474 /*
4475 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4476 * called from a page fault handler when a page is first dirtied. Hence we must
4477 * be careful to check for EOF conditions here. We set the page up correctly
4478 * for a written page which means we get ENOSPC checking when writing into
4479 * holes and correct delalloc and unwritten extent mapping on filesystems that
4480 * support these features.
4481 *
4482 * We are not allowed to take the i_mutex here so we have to play games to
4483 * protect against truncate races as the page could now be beyond EOF. Because
4484 * vmtruncate() writes the inode size before removing pages, once we have the
4485 * page lock we can determine safely if the page is beyond EOF. If it is not
4486 * beyond EOF, then the page is guaranteed safe against truncation until we
4487 * unlock the page.
4488 */
4489 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4490 {
4491 struct page *page = vmf->page;
4492 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4493 struct btrfs_root *root = BTRFS_I(inode)->root;
4494 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4495 struct btrfs_ordered_extent *ordered;
4496 char *kaddr;
4497 unsigned long zero_start;
4498 loff_t size;
4499 int ret;
4500 u64 page_start;
4501 u64 page_end;
4502
4503 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4504 if (ret) {
4505 if (ret == -ENOMEM)
4506 ret = VM_FAULT_OOM;
4507 else /* -ENOSPC, -EIO, etc */
4508 ret = VM_FAULT_SIGBUS;
4509 goto out;
4510 }
4511
4512 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4513 again:
4514 lock_page(page);
4515 size = i_size_read(inode);
4516 page_start = page_offset(page);
4517 page_end = page_start + PAGE_CACHE_SIZE - 1;
4518
4519 if ((page->mapping != inode->i_mapping) ||
4520 (page_start >= size)) {
4521 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4522 /* page got truncated out from underneath us */
4523 goto out_unlock;
4524 }
4525 wait_on_page_writeback(page);
4526
4527 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4528 set_page_extent_mapped(page);
4529
4530 /*
4531 * we can't set the delalloc bits if there are pending ordered
4532 * extents. Drop our locks and wait for them to finish
4533 */
4534 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4535 if (ordered) {
4536 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4537 unlock_page(page);
4538 btrfs_start_ordered_extent(inode, ordered, 1);
4539 btrfs_put_ordered_extent(ordered);
4540 goto again;
4541 }
4542
4543 btrfs_set_extent_delalloc(inode, page_start, page_end);
4544 ret = 0;
4545
4546 /* page is wholly or partially inside EOF */
4547 if (page_start + PAGE_CACHE_SIZE > size)
4548 zero_start = size & ~PAGE_CACHE_MASK;
4549 else
4550 zero_start = PAGE_CACHE_SIZE;
4551
4552 if (zero_start != PAGE_CACHE_SIZE) {
4553 kaddr = kmap(page);
4554 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4555 flush_dcache_page(page);
4556 kunmap(page);
4557 }
4558 ClearPageChecked(page);
4559 set_page_dirty(page);
4560 SetPageUptodate(page);
4561
4562 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
4563 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4564
4565 out_unlock:
4566 if (!ret)
4567 return VM_FAULT_LOCKED;
4568 unlock_page(page);
4569 out:
4570 return ret;
4571 }
4572
4573 static void btrfs_truncate(struct inode *inode)
4574 {
4575 struct btrfs_root *root = BTRFS_I(inode)->root;
4576 int ret;
4577 struct btrfs_trans_handle *trans;
4578 unsigned long nr;
4579 u64 mask = root->sectorsize - 1;
4580
4581 if (!S_ISREG(inode->i_mode))
4582 return;
4583 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4584 return;
4585
4586 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4587 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4588
4589 trans = btrfs_start_transaction(root, 1);
4590
4591 /*
4592 * setattr is responsible for setting the ordered_data_close flag,
4593 * but that is only tested during the last file release. That
4594 * could happen well after the next commit, leaving a great big
4595 * window where new writes may get lost if someone chooses to write
4596 * to this file after truncating to zero
4597 *
4598 * The inode doesn't have any dirty data here, and so if we commit
4599 * this is a noop. If someone immediately starts writing to the inode
4600 * it is very likely we'll catch some of their writes in this
4601 * transaction, and the commit will find this file on the ordered
4602 * data list with good things to send down.
4603 *
4604 * This is a best effort solution, there is still a window where
4605 * using truncate to replace the contents of the file will
4606 * end up with a zero length file after a crash.
4607 */
4608 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
4609 btrfs_add_ordered_operation(trans, root, inode);
4610
4611 btrfs_set_trans_block_group(trans, inode);
4612 btrfs_i_size_write(inode, inode->i_size);
4613
4614 ret = btrfs_orphan_add(trans, inode);
4615 if (ret)
4616 goto out;
4617 /* FIXME, add redo link to tree so we don't leak on crash */
4618 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
4619 BTRFS_EXTENT_DATA_KEY);
4620 btrfs_update_inode(trans, root, inode);
4621
4622 ret = btrfs_orphan_del(trans, inode);
4623 BUG_ON(ret);
4624
4625 out:
4626 nr = trans->blocks_used;
4627 ret = btrfs_end_transaction_throttle(trans, root);
4628 BUG_ON(ret);
4629 btrfs_btree_balance_dirty(root, nr);
4630 }
4631
4632 /*
4633 * create a new subvolume directory/inode (helper for the ioctl).
4634 */
4635 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
4636 struct btrfs_root *new_root, struct dentry *dentry,
4637 u64 new_dirid, u64 alloc_hint)
4638 {
4639 struct inode *inode;
4640 int error;
4641 u64 index = 0;
4642
4643 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
4644 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
4645 if (IS_ERR(inode))
4646 return PTR_ERR(inode);
4647 inode->i_op = &btrfs_dir_inode_operations;
4648 inode->i_fop = &btrfs_dir_file_operations;
4649
4650 inode->i_nlink = 1;
4651 btrfs_i_size_write(inode, 0);
4652
4653 error = btrfs_update_inode(trans, new_root, inode);
4654 if (error)
4655 return error;
4656
4657 d_instantiate(dentry, inode);
4658 return 0;
4659 }
4660
4661 /* helper function for file defrag and space balancing. This
4662 * forces readahead on a given range of bytes in an inode
4663 */
4664 unsigned long btrfs_force_ra(struct address_space *mapping,
4665 struct file_ra_state *ra, struct file *file,
4666 pgoff_t offset, pgoff_t last_index)
4667 {
4668 pgoff_t req_size = last_index - offset + 1;
4669
4670 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
4671 return offset + req_size;
4672 }
4673
4674 struct inode *btrfs_alloc_inode(struct super_block *sb)
4675 {
4676 struct btrfs_inode *ei;
4677
4678 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
4679 if (!ei)
4680 return NULL;
4681 ei->last_trans = 0;
4682 ei->logged_trans = 0;
4683 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
4684 INIT_LIST_HEAD(&ei->i_orphan);
4685 INIT_LIST_HEAD(&ei->ordered_operations);
4686 return &ei->vfs_inode;
4687 }
4688
4689 void btrfs_destroy_inode(struct inode *inode)
4690 {
4691 struct btrfs_ordered_extent *ordered;
4692 struct btrfs_root *root = BTRFS_I(inode)->root;
4693
4694 WARN_ON(!list_empty(&inode->i_dentry));
4695 WARN_ON(inode->i_data.nrpages);
4696
4697 /*
4698 * Make sure we're properly removed from the ordered operation
4699 * lists.
4700 */
4701 smp_mb();
4702 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
4703 spin_lock(&root->fs_info->ordered_extent_lock);
4704 list_del_init(&BTRFS_I(inode)->ordered_operations);
4705 spin_unlock(&root->fs_info->ordered_extent_lock);
4706 }
4707
4708 spin_lock(&root->list_lock);
4709 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
4710 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
4711 " list\n", inode->i_ino);
4712 dump_stack();
4713 }
4714 spin_unlock(&root->list_lock);
4715
4716 while (1) {
4717 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
4718 if (!ordered)
4719 break;
4720 else {
4721 printk(KERN_ERR "btrfs found ordered "
4722 "extent %llu %llu on inode cleanup\n",
4723 (unsigned long long)ordered->file_offset,
4724 (unsigned long long)ordered->len);
4725 btrfs_remove_ordered_extent(inode, ordered);
4726 btrfs_put_ordered_extent(ordered);
4727 btrfs_put_ordered_extent(ordered);
4728 }
4729 }
4730 inode_tree_del(inode);
4731 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
4732 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
4733 }
4734
4735 static void init_once(void *foo)
4736 {
4737 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
4738
4739 inode_init_once(&ei->vfs_inode);
4740 }
4741
4742 void btrfs_destroy_cachep(void)
4743 {
4744 if (btrfs_inode_cachep)
4745 kmem_cache_destroy(btrfs_inode_cachep);
4746 if (btrfs_trans_handle_cachep)
4747 kmem_cache_destroy(btrfs_trans_handle_cachep);
4748 if (btrfs_transaction_cachep)
4749 kmem_cache_destroy(btrfs_transaction_cachep);
4750 if (btrfs_path_cachep)
4751 kmem_cache_destroy(btrfs_path_cachep);
4752 }
4753
4754 int btrfs_init_cachep(void)
4755 {
4756 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
4757 sizeof(struct btrfs_inode), 0,
4758 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
4759 if (!btrfs_inode_cachep)
4760 goto fail;
4761
4762 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
4763 sizeof(struct btrfs_trans_handle), 0,
4764 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4765 if (!btrfs_trans_handle_cachep)
4766 goto fail;
4767
4768 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
4769 sizeof(struct btrfs_transaction), 0,
4770 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4771 if (!btrfs_transaction_cachep)
4772 goto fail;
4773
4774 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
4775 sizeof(struct btrfs_path), 0,
4776 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
4777 if (!btrfs_path_cachep)
4778 goto fail;
4779
4780 return 0;
4781 fail:
4782 btrfs_destroy_cachep();
4783 return -ENOMEM;
4784 }
4785
4786 static int btrfs_getattr(struct vfsmount *mnt,
4787 struct dentry *dentry, struct kstat *stat)
4788 {
4789 struct inode *inode = dentry->d_inode;
4790 generic_fillattr(inode, stat);
4791 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
4792 stat->blksize = PAGE_CACHE_SIZE;
4793 stat->blocks = (inode_get_bytes(inode) +
4794 BTRFS_I(inode)->delalloc_bytes) >> 9;
4795 return 0;
4796 }
4797
4798 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
4799 struct inode *new_dir, struct dentry *new_dentry)
4800 {
4801 struct btrfs_trans_handle *trans;
4802 struct btrfs_root *root = BTRFS_I(old_dir)->root;
4803 struct inode *new_inode = new_dentry->d_inode;
4804 struct inode *old_inode = old_dentry->d_inode;
4805 struct timespec ctime = CURRENT_TIME;
4806 u64 index = 0;
4807 int ret;
4808
4809 /* we're not allowed to rename between subvolumes */
4810 if (BTRFS_I(old_inode)->root->root_key.objectid !=
4811 BTRFS_I(new_dir)->root->root_key.objectid)
4812 return -EXDEV;
4813
4814 if (S_ISDIR(old_inode->i_mode) && new_inode &&
4815 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE) {
4816 return -ENOTEMPTY;
4817 }
4818
4819 /* to rename a snapshot or subvolume, we need to juggle the
4820 * backrefs. This isn't coded yet
4821 */
4822 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
4823 return -EXDEV;
4824
4825 ret = btrfs_check_metadata_free_space(root);
4826 if (ret)
4827 goto out_unlock;
4828
4829 /*
4830 * we're using rename to replace one file with another.
4831 * and the replacement file is large. Start IO on it now so
4832 * we don't add too much work to the end of the transaction
4833 */
4834 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
4835 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
4836 filemap_flush(old_inode->i_mapping);
4837
4838 trans = btrfs_start_transaction(root, 1);
4839
4840 /*
4841 * make sure the inode gets flushed if it is replacing
4842 * something.
4843 */
4844 if (new_inode && new_inode->i_size &&
4845 old_inode && S_ISREG(old_inode->i_mode)) {
4846 btrfs_add_ordered_operation(trans, root, old_inode);
4847 }
4848
4849 /*
4850 * this is an ugly little race, but the rename is required to make
4851 * sure that if we crash, the inode is either at the old name
4852 * or the new one. pinning the log transaction lets us make sure
4853 * we don't allow a log commit to come in after we unlink the
4854 * name but before we add the new name back in.
4855 */
4856 btrfs_pin_log_trans(root);
4857
4858 btrfs_set_trans_block_group(trans, new_dir);
4859
4860 btrfs_inc_nlink(old_dentry->d_inode);
4861 old_dir->i_ctime = old_dir->i_mtime = ctime;
4862 new_dir->i_ctime = new_dir->i_mtime = ctime;
4863 old_inode->i_ctime = ctime;
4864
4865 if (old_dentry->d_parent != new_dentry->d_parent)
4866 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
4867
4868 ret = btrfs_unlink_inode(trans, root, old_dir, old_dentry->d_inode,
4869 old_dentry->d_name.name,
4870 old_dentry->d_name.len);
4871 if (ret)
4872 goto out_fail;
4873
4874 if (new_inode) {
4875 new_inode->i_ctime = CURRENT_TIME;
4876 ret = btrfs_unlink_inode(trans, root, new_dir,
4877 new_dentry->d_inode,
4878 new_dentry->d_name.name,
4879 new_dentry->d_name.len);
4880 if (ret)
4881 goto out_fail;
4882 if (new_inode->i_nlink == 0) {
4883 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
4884 if (ret)
4885 goto out_fail;
4886 }
4887
4888 }
4889 ret = btrfs_set_inode_index(new_dir, &index);
4890 if (ret)
4891 goto out_fail;
4892
4893 ret = btrfs_add_link(trans, new_dentry->d_parent->d_inode,
4894 old_inode, new_dentry->d_name.name,
4895 new_dentry->d_name.len, 1, index);
4896 if (ret)
4897 goto out_fail;
4898
4899 btrfs_log_new_name(trans, old_inode, old_dir,
4900 new_dentry->d_parent);
4901 out_fail:
4902
4903 /* this btrfs_end_log_trans just allows the current
4904 * log-sub transaction to complete
4905 */
4906 btrfs_end_log_trans(root);
4907 btrfs_end_transaction_throttle(trans, root);
4908 out_unlock:
4909 return ret;
4910 }
4911
4912 /*
4913 * some fairly slow code that needs optimization. This walks the list
4914 * of all the inodes with pending delalloc and forces them to disk.
4915 */
4916 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
4917 {
4918 struct list_head *head = &root->fs_info->delalloc_inodes;
4919 struct btrfs_inode *binode;
4920 struct inode *inode;
4921
4922 if (root->fs_info->sb->s_flags & MS_RDONLY)
4923 return -EROFS;
4924
4925 spin_lock(&root->fs_info->delalloc_lock);
4926 while (!list_empty(head)) {
4927 binode = list_entry(head->next, struct btrfs_inode,
4928 delalloc_inodes);
4929 inode = igrab(&binode->vfs_inode);
4930 if (!inode)
4931 list_del_init(&binode->delalloc_inodes);
4932 spin_unlock(&root->fs_info->delalloc_lock);
4933 if (inode) {
4934 filemap_flush(inode->i_mapping);
4935 iput(inode);
4936 }
4937 cond_resched();
4938 spin_lock(&root->fs_info->delalloc_lock);
4939 }
4940 spin_unlock(&root->fs_info->delalloc_lock);
4941
4942 /* the filemap_flush will queue IO into the worker threads, but
4943 * we have to make sure the IO is actually started and that
4944 * ordered extents get created before we return
4945 */
4946 atomic_inc(&root->fs_info->async_submit_draining);
4947 while (atomic_read(&root->fs_info->nr_async_submits) ||
4948 atomic_read(&root->fs_info->async_delalloc_pages)) {
4949 wait_event(root->fs_info->async_submit_wait,
4950 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
4951 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
4952 }
4953 atomic_dec(&root->fs_info->async_submit_draining);
4954 return 0;
4955 }
4956
4957 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
4958 const char *symname)
4959 {
4960 struct btrfs_trans_handle *trans;
4961 struct btrfs_root *root = BTRFS_I(dir)->root;
4962 struct btrfs_path *path;
4963 struct btrfs_key key;
4964 struct inode *inode = NULL;
4965 int err;
4966 int drop_inode = 0;
4967 u64 objectid;
4968 u64 index = 0 ;
4969 int name_len;
4970 int datasize;
4971 unsigned long ptr;
4972 struct btrfs_file_extent_item *ei;
4973 struct extent_buffer *leaf;
4974 unsigned long nr = 0;
4975
4976 name_len = strlen(symname) + 1;
4977 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
4978 return -ENAMETOOLONG;
4979
4980 err = btrfs_check_metadata_free_space(root);
4981 if (err)
4982 goto out_fail;
4983
4984 trans = btrfs_start_transaction(root, 1);
4985 btrfs_set_trans_block_group(trans, dir);
4986
4987 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4988 if (err) {
4989 err = -ENOSPC;
4990 goto out_unlock;
4991 }
4992
4993 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4994 dentry->d_name.len,
4995 dentry->d_parent->d_inode->i_ino, objectid,
4996 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
4997 &index);
4998 err = PTR_ERR(inode);
4999 if (IS_ERR(inode))
5000 goto out_unlock;
5001
5002 err = btrfs_init_inode_security(inode, dir);
5003 if (err) {
5004 drop_inode = 1;
5005 goto out_unlock;
5006 }
5007
5008 btrfs_set_trans_block_group(trans, inode);
5009 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
5010 if (err)
5011 drop_inode = 1;
5012 else {
5013 inode->i_mapping->a_ops = &btrfs_aops;
5014 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5015 inode->i_fop = &btrfs_file_operations;
5016 inode->i_op = &btrfs_file_inode_operations;
5017 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5018 }
5019 btrfs_update_inode_block_group(trans, inode);
5020 btrfs_update_inode_block_group(trans, dir);
5021 if (drop_inode)
5022 goto out_unlock;
5023
5024 path = btrfs_alloc_path();
5025 BUG_ON(!path);
5026 key.objectid = inode->i_ino;
5027 key.offset = 0;
5028 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
5029 datasize = btrfs_file_extent_calc_inline_size(name_len);
5030 err = btrfs_insert_empty_item(trans, root, path, &key,
5031 datasize);
5032 if (err) {
5033 drop_inode = 1;
5034 goto out_unlock;
5035 }
5036 leaf = path->nodes[0];
5037 ei = btrfs_item_ptr(leaf, path->slots[0],
5038 struct btrfs_file_extent_item);
5039 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
5040 btrfs_set_file_extent_type(leaf, ei,
5041 BTRFS_FILE_EXTENT_INLINE);
5042 btrfs_set_file_extent_encryption(leaf, ei, 0);
5043 btrfs_set_file_extent_compression(leaf, ei, 0);
5044 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5045 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5046
5047 ptr = btrfs_file_extent_inline_start(ei);
5048 write_extent_buffer(leaf, symname, ptr, name_len);
5049 btrfs_mark_buffer_dirty(leaf);
5050 btrfs_free_path(path);
5051
5052 inode->i_op = &btrfs_symlink_inode_operations;
5053 inode->i_mapping->a_ops = &btrfs_symlink_aops;
5054 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5055 inode_set_bytes(inode, name_len);
5056 btrfs_i_size_write(inode, name_len - 1);
5057 err = btrfs_update_inode(trans, root, inode);
5058 if (err)
5059 drop_inode = 1;
5060
5061 out_unlock:
5062 nr = trans->blocks_used;
5063 btrfs_end_transaction_throttle(trans, root);
5064 out_fail:
5065 if (drop_inode) {
5066 inode_dec_link_count(inode);
5067 iput(inode);
5068 }
5069 btrfs_btree_balance_dirty(root, nr);
5070 return err;
5071 }
5072
5073 static int prealloc_file_range(struct btrfs_trans_handle *trans,
5074 struct inode *inode, u64 start, u64 end,
5075 u64 locked_end, u64 alloc_hint, int mode)
5076 {
5077 struct btrfs_root *root = BTRFS_I(inode)->root;
5078 struct btrfs_key ins;
5079 u64 alloc_size;
5080 u64 cur_offset = start;
5081 u64 num_bytes = end - start;
5082 int ret = 0;
5083
5084 while (num_bytes > 0) {
5085 alloc_size = min(num_bytes, root->fs_info->max_extent);
5086 ret = btrfs_reserve_extent(trans, root, alloc_size,
5087 root->sectorsize, 0, alloc_hint,
5088 (u64)-1, &ins, 1);
5089 if (ret) {
5090 WARN_ON(1);
5091 goto out;
5092 }
5093 ret = insert_reserved_file_extent(trans, inode,
5094 cur_offset, ins.objectid,
5095 ins.offset, ins.offset,
5096 ins.offset, locked_end,
5097 0, 0, 0,
5098 BTRFS_FILE_EXTENT_PREALLOC);
5099 BUG_ON(ret);
5100 btrfs_drop_extent_cache(inode, cur_offset,
5101 cur_offset + ins.offset -1, 0);
5102 num_bytes -= ins.offset;
5103 cur_offset += ins.offset;
5104 alloc_hint = ins.objectid + ins.offset;
5105 }
5106 out:
5107 if (cur_offset > start) {
5108 inode->i_ctime = CURRENT_TIME;
5109 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5110 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5111 cur_offset > i_size_read(inode))
5112 btrfs_i_size_write(inode, cur_offset);
5113 ret = btrfs_update_inode(trans, root, inode);
5114 BUG_ON(ret);
5115 }
5116
5117 return ret;
5118 }
5119
5120 static long btrfs_fallocate(struct inode *inode, int mode,
5121 loff_t offset, loff_t len)
5122 {
5123 u64 cur_offset;
5124 u64 last_byte;
5125 u64 alloc_start;
5126 u64 alloc_end;
5127 u64 alloc_hint = 0;
5128 u64 locked_end;
5129 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5130 struct extent_map *em;
5131 struct btrfs_trans_handle *trans;
5132 struct btrfs_root *root;
5133 int ret;
5134
5135 alloc_start = offset & ~mask;
5136 alloc_end = (offset + len + mask) & ~mask;
5137
5138 /*
5139 * wait for ordered IO before we have any locks. We'll loop again
5140 * below with the locks held.
5141 */
5142 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5143
5144 mutex_lock(&inode->i_mutex);
5145 if (alloc_start > inode->i_size) {
5146 ret = btrfs_cont_expand(inode, alloc_start);
5147 if (ret)
5148 goto out;
5149 }
5150
5151 root = BTRFS_I(inode)->root;
5152
5153 ret = btrfs_check_data_free_space(root, inode,
5154 alloc_end - alloc_start);
5155 if (ret)
5156 goto out;
5157
5158 locked_end = alloc_end - 1;
5159 while (1) {
5160 struct btrfs_ordered_extent *ordered;
5161
5162 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
5163 if (!trans) {
5164 ret = -EIO;
5165 goto out_free;
5166 }
5167
5168 /* the extent lock is ordered inside the running
5169 * transaction
5170 */
5171 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5172 GFP_NOFS);
5173 ordered = btrfs_lookup_first_ordered_extent(inode,
5174 alloc_end - 1);
5175 if (ordered &&
5176 ordered->file_offset + ordered->len > alloc_start &&
5177 ordered->file_offset < alloc_end) {
5178 btrfs_put_ordered_extent(ordered);
5179 unlock_extent(&BTRFS_I(inode)->io_tree,
5180 alloc_start, locked_end, GFP_NOFS);
5181 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5182
5183 /*
5184 * we can't wait on the range with the transaction
5185 * running or with the extent lock held
5186 */
5187 btrfs_wait_ordered_range(inode, alloc_start,
5188 alloc_end - alloc_start);
5189 } else {
5190 if (ordered)
5191 btrfs_put_ordered_extent(ordered);
5192 break;
5193 }
5194 }
5195
5196 cur_offset = alloc_start;
5197 while (1) {
5198 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5199 alloc_end - cur_offset, 0);
5200 BUG_ON(IS_ERR(em) || !em);
5201 last_byte = min(extent_map_end(em), alloc_end);
5202 last_byte = (last_byte + mask) & ~mask;
5203 if (em->block_start == EXTENT_MAP_HOLE) {
5204 ret = prealloc_file_range(trans, inode, cur_offset,
5205 last_byte, locked_end + 1,
5206 alloc_hint, mode);
5207 if (ret < 0) {
5208 free_extent_map(em);
5209 break;
5210 }
5211 }
5212 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5213 alloc_hint = em->block_start;
5214 free_extent_map(em);
5215
5216 cur_offset = last_byte;
5217 if (cur_offset >= alloc_end) {
5218 ret = 0;
5219 break;
5220 }
5221 }
5222 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5223 GFP_NOFS);
5224
5225 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5226 out_free:
5227 btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
5228 out:
5229 mutex_unlock(&inode->i_mutex);
5230 return ret;
5231 }
5232
5233 static int btrfs_set_page_dirty(struct page *page)
5234 {
5235 return __set_page_dirty_nobuffers(page);
5236 }
5237
5238 static int btrfs_permission(struct inode *inode, int mask)
5239 {
5240 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
5241 return -EACCES;
5242 return generic_permission(inode, mask, btrfs_check_acl);
5243 }
5244
5245 static struct inode_operations btrfs_dir_inode_operations = {
5246 .getattr = btrfs_getattr,
5247 .lookup = btrfs_lookup,
5248 .create = btrfs_create,
5249 .unlink = btrfs_unlink,
5250 .link = btrfs_link,
5251 .mkdir = btrfs_mkdir,
5252 .rmdir = btrfs_rmdir,
5253 .rename = btrfs_rename,
5254 .symlink = btrfs_symlink,
5255 .setattr = btrfs_setattr,
5256 .mknod = btrfs_mknod,
5257 .setxattr = btrfs_setxattr,
5258 .getxattr = btrfs_getxattr,
5259 .listxattr = btrfs_listxattr,
5260 .removexattr = btrfs_removexattr,
5261 .permission = btrfs_permission,
5262 };
5263 static struct inode_operations btrfs_dir_ro_inode_operations = {
5264 .lookup = btrfs_lookup,
5265 .permission = btrfs_permission,
5266 };
5267 static struct file_operations btrfs_dir_file_operations = {
5268 .llseek = generic_file_llseek,
5269 .read = generic_read_dir,
5270 .readdir = btrfs_real_readdir,
5271 .unlocked_ioctl = btrfs_ioctl,
5272 #ifdef CONFIG_COMPAT
5273 .compat_ioctl = btrfs_ioctl,
5274 #endif
5275 .release = btrfs_release_file,
5276 .fsync = btrfs_sync_file,
5277 };
5278
5279 static struct extent_io_ops btrfs_extent_io_ops = {
5280 .fill_delalloc = run_delalloc_range,
5281 .submit_bio_hook = btrfs_submit_bio_hook,
5282 .merge_bio_hook = btrfs_merge_bio_hook,
5283 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5284 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5285 .writepage_start_hook = btrfs_writepage_start_hook,
5286 .readpage_io_failed_hook = btrfs_io_failed_hook,
5287 .set_bit_hook = btrfs_set_bit_hook,
5288 .clear_bit_hook = btrfs_clear_bit_hook,
5289 };
5290
5291 /*
5292 * btrfs doesn't support the bmap operation because swapfiles
5293 * use bmap to make a mapping of extents in the file. They assume
5294 * these extents won't change over the life of the file and they
5295 * use the bmap result to do IO directly to the drive.
5296 *
5297 * the btrfs bmap call would return logical addresses that aren't
5298 * suitable for IO and they also will change frequently as COW
5299 * operations happen. So, swapfile + btrfs == corruption.
5300 *
5301 * For now we're avoiding this by dropping bmap.
5302 */
5303 static struct address_space_operations btrfs_aops = {
5304 .readpage = btrfs_readpage,
5305 .writepage = btrfs_writepage,
5306 .writepages = btrfs_writepages,
5307 .readpages = btrfs_readpages,
5308 .sync_page = block_sync_page,
5309 .direct_IO = btrfs_direct_IO,
5310 .invalidatepage = btrfs_invalidatepage,
5311 .releasepage = btrfs_releasepage,
5312 .set_page_dirty = btrfs_set_page_dirty,
5313 };
5314
5315 static struct address_space_operations btrfs_symlink_aops = {
5316 .readpage = btrfs_readpage,
5317 .writepage = btrfs_writepage,
5318 .invalidatepage = btrfs_invalidatepage,
5319 .releasepage = btrfs_releasepage,
5320 };
5321
5322 static struct inode_operations btrfs_file_inode_operations = {
5323 .truncate = btrfs_truncate,
5324 .getattr = btrfs_getattr,
5325 .setattr = btrfs_setattr,
5326 .setxattr = btrfs_setxattr,
5327 .getxattr = btrfs_getxattr,
5328 .listxattr = btrfs_listxattr,
5329 .removexattr = btrfs_removexattr,
5330 .permission = btrfs_permission,
5331 .fallocate = btrfs_fallocate,
5332 .fiemap = btrfs_fiemap,
5333 };
5334 static struct inode_operations btrfs_special_inode_operations = {
5335 .getattr = btrfs_getattr,
5336 .setattr = btrfs_setattr,
5337 .permission = btrfs_permission,
5338 .setxattr = btrfs_setxattr,
5339 .getxattr = btrfs_getxattr,
5340 .listxattr = btrfs_listxattr,
5341 .removexattr = btrfs_removexattr,
5342 };
5343 static struct inode_operations btrfs_symlink_inode_operations = {
5344 .readlink = generic_readlink,
5345 .follow_link = page_follow_link_light,
5346 .put_link = page_put_link,
5347 .permission = btrfs_permission,
5348 .setxattr = btrfs_setxattr,
5349 .getxattr = btrfs_getxattr,
5350 .listxattr = btrfs_listxattr,
5351 .removexattr = btrfs_removexattr,
5352 };
This page took 0.275288 seconds and 5 git commands to generate.