401dfb2a94e811a858f21d8d90677a47af506fb3
[deliverable/linux.git] / fs / btrfs / inode.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/statfs.h>
34 #include <linux/compat.h>
35 #include <linux/bit_spinlock.h>
36 #include <linux/xattr.h>
37 #include <linux/posix_acl.h>
38 #include <linux/falloc.h>
39 #include "compat.h"
40 #include "ctree.h"
41 #include "disk-io.h"
42 #include "transaction.h"
43 #include "btrfs_inode.h"
44 #include "ioctl.h"
45 #include "print-tree.h"
46 #include "volumes.h"
47 #include "ordered-data.h"
48 #include "xattr.h"
49 #include "tree-log.h"
50 #include "compression.h"
51 #include "locking.h"
52
53 struct btrfs_iget_args {
54 u64 ino;
55 struct btrfs_root *root;
56 };
57
58 static struct inode_operations btrfs_dir_inode_operations;
59 static struct inode_operations btrfs_symlink_inode_operations;
60 static struct inode_operations btrfs_dir_ro_inode_operations;
61 static struct inode_operations btrfs_special_inode_operations;
62 static struct inode_operations btrfs_file_inode_operations;
63 static struct address_space_operations btrfs_aops;
64 static struct address_space_operations btrfs_symlink_aops;
65 static struct file_operations btrfs_dir_file_operations;
66 static struct extent_io_ops btrfs_extent_io_ops;
67
68 static struct kmem_cache *btrfs_inode_cachep;
69 struct kmem_cache *btrfs_trans_handle_cachep;
70 struct kmem_cache *btrfs_transaction_cachep;
71 struct kmem_cache *btrfs_path_cachep;
72
73 #define S_SHIFT 12
74 static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
75 [S_IFREG >> S_SHIFT] = BTRFS_FT_REG_FILE,
76 [S_IFDIR >> S_SHIFT] = BTRFS_FT_DIR,
77 [S_IFCHR >> S_SHIFT] = BTRFS_FT_CHRDEV,
78 [S_IFBLK >> S_SHIFT] = BTRFS_FT_BLKDEV,
79 [S_IFIFO >> S_SHIFT] = BTRFS_FT_FIFO,
80 [S_IFSOCK >> S_SHIFT] = BTRFS_FT_SOCK,
81 [S_IFLNK >> S_SHIFT] = BTRFS_FT_SYMLINK,
82 };
83
84 static void btrfs_truncate(struct inode *inode);
85 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end);
86 static noinline int cow_file_range(struct inode *inode,
87 struct page *locked_page,
88 u64 start, u64 end, int *page_started,
89 unsigned long *nr_written, int unlock);
90
91 static int btrfs_init_inode_security(struct inode *inode, struct inode *dir)
92 {
93 int err;
94
95 err = btrfs_init_acl(inode, dir);
96 if (!err)
97 err = btrfs_xattr_security_init(inode, dir);
98 return err;
99 }
100
101 /*
102 * this does all the hard work for inserting an inline extent into
103 * the btree. The caller should have done a btrfs_drop_extents so that
104 * no overlapping inline items exist in the btree
105 */
106 static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
107 struct btrfs_root *root, struct inode *inode,
108 u64 start, size_t size, size_t compressed_size,
109 struct page **compressed_pages)
110 {
111 struct btrfs_key key;
112 struct btrfs_path *path;
113 struct extent_buffer *leaf;
114 struct page *page = NULL;
115 char *kaddr;
116 unsigned long ptr;
117 struct btrfs_file_extent_item *ei;
118 int err = 0;
119 int ret;
120 size_t cur_size = size;
121 size_t datasize;
122 unsigned long offset;
123 int use_compress = 0;
124
125 if (compressed_size && compressed_pages) {
126 use_compress = 1;
127 cur_size = compressed_size;
128 }
129
130 path = btrfs_alloc_path();
131 if (!path)
132 return -ENOMEM;
133
134 path->leave_spinning = 1;
135 btrfs_set_trans_block_group(trans, inode);
136
137 key.objectid = inode->i_ino;
138 key.offset = start;
139 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
140 datasize = btrfs_file_extent_calc_inline_size(cur_size);
141
142 inode_add_bytes(inode, size);
143 ret = btrfs_insert_empty_item(trans, root, path, &key,
144 datasize);
145 BUG_ON(ret);
146 if (ret) {
147 err = ret;
148 goto fail;
149 }
150 leaf = path->nodes[0];
151 ei = btrfs_item_ptr(leaf, path->slots[0],
152 struct btrfs_file_extent_item);
153 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
154 btrfs_set_file_extent_type(leaf, ei, BTRFS_FILE_EXTENT_INLINE);
155 btrfs_set_file_extent_encryption(leaf, ei, 0);
156 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
157 btrfs_set_file_extent_ram_bytes(leaf, ei, size);
158 ptr = btrfs_file_extent_inline_start(ei);
159
160 if (use_compress) {
161 struct page *cpage;
162 int i = 0;
163 while (compressed_size > 0) {
164 cpage = compressed_pages[i];
165 cur_size = min_t(unsigned long, compressed_size,
166 PAGE_CACHE_SIZE);
167
168 kaddr = kmap_atomic(cpage, KM_USER0);
169 write_extent_buffer(leaf, kaddr, ptr, cur_size);
170 kunmap_atomic(kaddr, KM_USER0);
171
172 i++;
173 ptr += cur_size;
174 compressed_size -= cur_size;
175 }
176 btrfs_set_file_extent_compression(leaf, ei,
177 BTRFS_COMPRESS_ZLIB);
178 } else {
179 page = find_get_page(inode->i_mapping,
180 start >> PAGE_CACHE_SHIFT);
181 btrfs_set_file_extent_compression(leaf, ei, 0);
182 kaddr = kmap_atomic(page, KM_USER0);
183 offset = start & (PAGE_CACHE_SIZE - 1);
184 write_extent_buffer(leaf, kaddr + offset, ptr, size);
185 kunmap_atomic(kaddr, KM_USER0);
186 page_cache_release(page);
187 }
188 btrfs_mark_buffer_dirty(leaf);
189 btrfs_free_path(path);
190
191 BTRFS_I(inode)->disk_i_size = inode->i_size;
192 btrfs_update_inode(trans, root, inode);
193 return 0;
194 fail:
195 btrfs_free_path(path);
196 return err;
197 }
198
199
200 /*
201 * conditionally insert an inline extent into the file. This
202 * does the checks required to make sure the data is small enough
203 * to fit as an inline extent.
204 */
205 static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
206 struct btrfs_root *root,
207 struct inode *inode, u64 start, u64 end,
208 size_t compressed_size,
209 struct page **compressed_pages)
210 {
211 u64 isize = i_size_read(inode);
212 u64 actual_end = min(end + 1, isize);
213 u64 inline_len = actual_end - start;
214 u64 aligned_end = (end + root->sectorsize - 1) &
215 ~((u64)root->sectorsize - 1);
216 u64 hint_byte;
217 u64 data_len = inline_len;
218 int ret;
219
220 if (compressed_size)
221 data_len = compressed_size;
222
223 if (start > 0 ||
224 actual_end >= PAGE_CACHE_SIZE ||
225 data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
226 (!compressed_size &&
227 (actual_end & (root->sectorsize - 1)) == 0) ||
228 end + 1 < isize ||
229 data_len > root->fs_info->max_inline) {
230 return 1;
231 }
232
233 ret = btrfs_drop_extents(trans, root, inode, start,
234 aligned_end, aligned_end, start,
235 &hint_byte, 1);
236 BUG_ON(ret);
237
238 if (isize > actual_end)
239 inline_len = min_t(u64, isize, actual_end);
240 ret = insert_inline_extent(trans, root, inode, start,
241 inline_len, compressed_size,
242 compressed_pages);
243 BUG_ON(ret);
244 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
245 return 0;
246 }
247
248 struct async_extent {
249 u64 start;
250 u64 ram_size;
251 u64 compressed_size;
252 struct page **pages;
253 unsigned long nr_pages;
254 struct list_head list;
255 };
256
257 struct async_cow {
258 struct inode *inode;
259 struct btrfs_root *root;
260 struct page *locked_page;
261 u64 start;
262 u64 end;
263 struct list_head extents;
264 struct btrfs_work work;
265 };
266
267 static noinline int add_async_extent(struct async_cow *cow,
268 u64 start, u64 ram_size,
269 u64 compressed_size,
270 struct page **pages,
271 unsigned long nr_pages)
272 {
273 struct async_extent *async_extent;
274
275 async_extent = kmalloc(sizeof(*async_extent), GFP_NOFS);
276 async_extent->start = start;
277 async_extent->ram_size = ram_size;
278 async_extent->compressed_size = compressed_size;
279 async_extent->pages = pages;
280 async_extent->nr_pages = nr_pages;
281 list_add_tail(&async_extent->list, &cow->extents);
282 return 0;
283 }
284
285 /*
286 * we create compressed extents in two phases. The first
287 * phase compresses a range of pages that have already been
288 * locked (both pages and state bits are locked).
289 *
290 * This is done inside an ordered work queue, and the compression
291 * is spread across many cpus. The actual IO submission is step
292 * two, and the ordered work queue takes care of making sure that
293 * happens in the same order things were put onto the queue by
294 * writepages and friends.
295 *
296 * If this code finds it can't get good compression, it puts an
297 * entry onto the work queue to write the uncompressed bytes. This
298 * makes sure that both compressed inodes and uncompressed inodes
299 * are written in the same order that pdflush sent them down.
300 */
301 static noinline int compress_file_range(struct inode *inode,
302 struct page *locked_page,
303 u64 start, u64 end,
304 struct async_cow *async_cow,
305 int *num_added)
306 {
307 struct btrfs_root *root = BTRFS_I(inode)->root;
308 struct btrfs_trans_handle *trans;
309 u64 num_bytes;
310 u64 orig_start;
311 u64 disk_num_bytes;
312 u64 blocksize = root->sectorsize;
313 u64 actual_end;
314 u64 isize = i_size_read(inode);
315 int ret = 0;
316 struct page **pages = NULL;
317 unsigned long nr_pages;
318 unsigned long nr_pages_ret = 0;
319 unsigned long total_compressed = 0;
320 unsigned long total_in = 0;
321 unsigned long max_compressed = 128 * 1024;
322 unsigned long max_uncompressed = 128 * 1024;
323 int i;
324 int will_compress;
325
326 orig_start = start;
327
328 actual_end = min_t(u64, isize, end + 1);
329 again:
330 will_compress = 0;
331 nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1;
332 nr_pages = min(nr_pages, (128 * 1024UL) / PAGE_CACHE_SIZE);
333
334 /*
335 * we don't want to send crud past the end of i_size through
336 * compression, that's just a waste of CPU time. So, if the
337 * end of the file is before the start of our current
338 * requested range of bytes, we bail out to the uncompressed
339 * cleanup code that can deal with all of this.
340 *
341 * It isn't really the fastest way to fix things, but this is a
342 * very uncommon corner.
343 */
344 if (actual_end <= start)
345 goto cleanup_and_bail_uncompressed;
346
347 total_compressed = actual_end - start;
348
349 /* we want to make sure that amount of ram required to uncompress
350 * an extent is reasonable, so we limit the total size in ram
351 * of a compressed extent to 128k. This is a crucial number
352 * because it also controls how easily we can spread reads across
353 * cpus for decompression.
354 *
355 * We also want to make sure the amount of IO required to do
356 * a random read is reasonably small, so we limit the size of
357 * a compressed extent to 128k.
358 */
359 total_compressed = min(total_compressed, max_uncompressed);
360 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
361 num_bytes = max(blocksize, num_bytes);
362 disk_num_bytes = num_bytes;
363 total_in = 0;
364 ret = 0;
365
366 /*
367 * we do compression for mount -o compress and when the
368 * inode has not been flagged as nocompress. This flag can
369 * change at any time if we discover bad compression ratios.
370 */
371 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS) &&
372 btrfs_test_opt(root, COMPRESS)) {
373 WARN_ON(pages);
374 pages = kzalloc(sizeof(struct page *) * nr_pages, GFP_NOFS);
375
376 ret = btrfs_zlib_compress_pages(inode->i_mapping, start,
377 total_compressed, pages,
378 nr_pages, &nr_pages_ret,
379 &total_in,
380 &total_compressed,
381 max_compressed);
382
383 if (!ret) {
384 unsigned long offset = total_compressed &
385 (PAGE_CACHE_SIZE - 1);
386 struct page *page = pages[nr_pages_ret - 1];
387 char *kaddr;
388
389 /* zero the tail end of the last page, we might be
390 * sending it down to disk
391 */
392 if (offset) {
393 kaddr = kmap_atomic(page, KM_USER0);
394 memset(kaddr + offset, 0,
395 PAGE_CACHE_SIZE - offset);
396 kunmap_atomic(kaddr, KM_USER0);
397 }
398 will_compress = 1;
399 }
400 }
401 if (start == 0) {
402 trans = btrfs_join_transaction(root, 1);
403 BUG_ON(!trans);
404 btrfs_set_trans_block_group(trans, inode);
405
406 /* lets try to make an inline extent */
407 if (ret || total_in < (actual_end - start)) {
408 /* we didn't compress the entire range, try
409 * to make an uncompressed inline extent.
410 */
411 ret = cow_file_range_inline(trans, root, inode,
412 start, end, 0, NULL);
413 } else {
414 /* try making a compressed inline extent */
415 ret = cow_file_range_inline(trans, root, inode,
416 start, end,
417 total_compressed, pages);
418 }
419 btrfs_end_transaction(trans, root);
420 if (ret == 0) {
421 /*
422 * inline extent creation worked, we don't need
423 * to create any more async work items. Unlock
424 * and free up our temp pages.
425 */
426 extent_clear_unlock_delalloc(inode,
427 &BTRFS_I(inode)->io_tree,
428 start, end, NULL,
429 EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
430 EXTENT_CLEAR_DELALLOC |
431 EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK);
432 ret = 0;
433 goto free_pages_out;
434 }
435 }
436
437 if (will_compress) {
438 /*
439 * we aren't doing an inline extent round the compressed size
440 * up to a block size boundary so the allocator does sane
441 * things
442 */
443 total_compressed = (total_compressed + blocksize - 1) &
444 ~(blocksize - 1);
445
446 /*
447 * one last check to make sure the compression is really a
448 * win, compare the page count read with the blocks on disk
449 */
450 total_in = (total_in + PAGE_CACHE_SIZE - 1) &
451 ~(PAGE_CACHE_SIZE - 1);
452 if (total_compressed >= total_in) {
453 will_compress = 0;
454 } else {
455 disk_num_bytes = total_compressed;
456 num_bytes = total_in;
457 }
458 }
459 if (!will_compress && pages) {
460 /*
461 * the compression code ran but failed to make things smaller,
462 * free any pages it allocated and our page pointer array
463 */
464 for (i = 0; i < nr_pages_ret; i++) {
465 WARN_ON(pages[i]->mapping);
466 page_cache_release(pages[i]);
467 }
468 kfree(pages);
469 pages = NULL;
470 total_compressed = 0;
471 nr_pages_ret = 0;
472
473 /* flag the file so we don't compress in the future */
474 BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
475 }
476 if (will_compress) {
477 *num_added += 1;
478
479 /* the async work queues will take care of doing actual
480 * allocation on disk for these compressed pages,
481 * and will submit them to the elevator.
482 */
483 add_async_extent(async_cow, start, num_bytes,
484 total_compressed, pages, nr_pages_ret);
485
486 if (start + num_bytes < end && start + num_bytes < actual_end) {
487 start += num_bytes;
488 pages = NULL;
489 cond_resched();
490 goto again;
491 }
492 } else {
493 cleanup_and_bail_uncompressed:
494 /*
495 * No compression, but we still need to write the pages in
496 * the file we've been given so far. redirty the locked
497 * page if it corresponds to our extent and set things up
498 * for the async work queue to run cow_file_range to do
499 * the normal delalloc dance
500 */
501 if (page_offset(locked_page) >= start &&
502 page_offset(locked_page) <= end) {
503 __set_page_dirty_nobuffers(locked_page);
504 /* unlocked later on in the async handlers */
505 }
506 add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0);
507 *num_added += 1;
508 }
509
510 out:
511 return 0;
512
513 free_pages_out:
514 for (i = 0; i < nr_pages_ret; i++) {
515 WARN_ON(pages[i]->mapping);
516 page_cache_release(pages[i]);
517 }
518 kfree(pages);
519
520 goto out;
521 }
522
523 /*
524 * phase two of compressed writeback. This is the ordered portion
525 * of the code, which only gets called in the order the work was
526 * queued. We walk all the async extents created by compress_file_range
527 * and send them down to the disk.
528 */
529 static noinline int submit_compressed_extents(struct inode *inode,
530 struct async_cow *async_cow)
531 {
532 struct async_extent *async_extent;
533 u64 alloc_hint = 0;
534 struct btrfs_trans_handle *trans;
535 struct btrfs_key ins;
536 struct extent_map *em;
537 struct btrfs_root *root = BTRFS_I(inode)->root;
538 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
539 struct extent_io_tree *io_tree;
540 int ret;
541
542 if (list_empty(&async_cow->extents))
543 return 0;
544
545 trans = btrfs_join_transaction(root, 1);
546
547 while (!list_empty(&async_cow->extents)) {
548 async_extent = list_entry(async_cow->extents.next,
549 struct async_extent, list);
550 list_del(&async_extent->list);
551
552 io_tree = &BTRFS_I(inode)->io_tree;
553
554 /* did the compression code fall back to uncompressed IO? */
555 if (!async_extent->pages) {
556 int page_started = 0;
557 unsigned long nr_written = 0;
558
559 lock_extent(io_tree, async_extent->start,
560 async_extent->start +
561 async_extent->ram_size - 1, GFP_NOFS);
562
563 /* allocate blocks */
564 cow_file_range(inode, async_cow->locked_page,
565 async_extent->start,
566 async_extent->start +
567 async_extent->ram_size - 1,
568 &page_started, &nr_written, 0);
569
570 /*
571 * if page_started, cow_file_range inserted an
572 * inline extent and took care of all the unlocking
573 * and IO for us. Otherwise, we need to submit
574 * all those pages down to the drive.
575 */
576 if (!page_started)
577 extent_write_locked_range(io_tree,
578 inode, async_extent->start,
579 async_extent->start +
580 async_extent->ram_size - 1,
581 btrfs_get_extent,
582 WB_SYNC_ALL);
583 kfree(async_extent);
584 cond_resched();
585 continue;
586 }
587
588 lock_extent(io_tree, async_extent->start,
589 async_extent->start + async_extent->ram_size - 1,
590 GFP_NOFS);
591 /*
592 * here we're doing allocation and writeback of the
593 * compressed pages
594 */
595 btrfs_drop_extent_cache(inode, async_extent->start,
596 async_extent->start +
597 async_extent->ram_size - 1, 0);
598
599 ret = btrfs_reserve_extent(trans, root,
600 async_extent->compressed_size,
601 async_extent->compressed_size,
602 0, alloc_hint,
603 (u64)-1, &ins, 1);
604 BUG_ON(ret);
605 em = alloc_extent_map(GFP_NOFS);
606 em->start = async_extent->start;
607 em->len = async_extent->ram_size;
608 em->orig_start = em->start;
609
610 em->block_start = ins.objectid;
611 em->block_len = ins.offset;
612 em->bdev = root->fs_info->fs_devices->latest_bdev;
613 set_bit(EXTENT_FLAG_PINNED, &em->flags);
614 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
615
616 while (1) {
617 write_lock(&em_tree->lock);
618 ret = add_extent_mapping(em_tree, em);
619 write_unlock(&em_tree->lock);
620 if (ret != -EEXIST) {
621 free_extent_map(em);
622 break;
623 }
624 btrfs_drop_extent_cache(inode, async_extent->start,
625 async_extent->start +
626 async_extent->ram_size - 1, 0);
627 }
628
629 ret = btrfs_add_ordered_extent(inode, async_extent->start,
630 ins.objectid,
631 async_extent->ram_size,
632 ins.offset,
633 BTRFS_ORDERED_COMPRESSED);
634 BUG_ON(ret);
635
636 btrfs_end_transaction(trans, root);
637
638 /*
639 * clear dirty, set writeback and unlock the pages.
640 */
641 extent_clear_unlock_delalloc(inode,
642 &BTRFS_I(inode)->io_tree,
643 async_extent->start,
644 async_extent->start +
645 async_extent->ram_size - 1,
646 NULL, EXTENT_CLEAR_UNLOCK_PAGE |
647 EXTENT_CLEAR_UNLOCK |
648 EXTENT_CLEAR_DELALLOC |
649 EXTENT_CLEAR_DIRTY | EXTENT_SET_WRITEBACK);
650
651 ret = btrfs_submit_compressed_write(inode,
652 async_extent->start,
653 async_extent->ram_size,
654 ins.objectid,
655 ins.offset, async_extent->pages,
656 async_extent->nr_pages);
657
658 BUG_ON(ret);
659 trans = btrfs_join_transaction(root, 1);
660 alloc_hint = ins.objectid + ins.offset;
661 kfree(async_extent);
662 cond_resched();
663 }
664
665 btrfs_end_transaction(trans, root);
666 return 0;
667 }
668
669 /*
670 * when extent_io.c finds a delayed allocation range in the file,
671 * the call backs end up in this code. The basic idea is to
672 * allocate extents on disk for the range, and create ordered data structs
673 * in ram to track those extents.
674 *
675 * locked_page is the page that writepage had locked already. We use
676 * it to make sure we don't do extra locks or unlocks.
677 *
678 * *page_started is set to one if we unlock locked_page and do everything
679 * required to start IO on it. It may be clean and already done with
680 * IO when we return.
681 */
682 static noinline int cow_file_range(struct inode *inode,
683 struct page *locked_page,
684 u64 start, u64 end, int *page_started,
685 unsigned long *nr_written,
686 int unlock)
687 {
688 struct btrfs_root *root = BTRFS_I(inode)->root;
689 struct btrfs_trans_handle *trans;
690 u64 alloc_hint = 0;
691 u64 num_bytes;
692 unsigned long ram_size;
693 u64 disk_num_bytes;
694 u64 cur_alloc_size;
695 u64 blocksize = root->sectorsize;
696 u64 actual_end;
697 u64 isize = i_size_read(inode);
698 struct btrfs_key ins;
699 struct extent_map *em;
700 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
701 int ret = 0;
702
703 trans = btrfs_join_transaction(root, 1);
704 BUG_ON(!trans);
705 btrfs_set_trans_block_group(trans, inode);
706
707 actual_end = min_t(u64, isize, end + 1);
708
709 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
710 num_bytes = max(blocksize, num_bytes);
711 disk_num_bytes = num_bytes;
712 ret = 0;
713
714 if (start == 0) {
715 /* lets try to make an inline extent */
716 ret = cow_file_range_inline(trans, root, inode,
717 start, end, 0, NULL);
718 if (ret == 0) {
719 extent_clear_unlock_delalloc(inode,
720 &BTRFS_I(inode)->io_tree,
721 start, end, NULL,
722 EXTENT_CLEAR_UNLOCK_PAGE |
723 EXTENT_CLEAR_UNLOCK |
724 EXTENT_CLEAR_DELALLOC |
725 EXTENT_CLEAR_DIRTY |
726 EXTENT_SET_WRITEBACK |
727 EXTENT_END_WRITEBACK);
728 *nr_written = *nr_written +
729 (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE;
730 *page_started = 1;
731 ret = 0;
732 goto out;
733 }
734 }
735
736 BUG_ON(disk_num_bytes >
737 btrfs_super_total_bytes(&root->fs_info->super_copy));
738
739
740 read_lock(&BTRFS_I(inode)->extent_tree.lock);
741 em = search_extent_mapping(&BTRFS_I(inode)->extent_tree,
742 start, num_bytes);
743 if (em) {
744 alloc_hint = em->block_start;
745 free_extent_map(em);
746 }
747 read_unlock(&BTRFS_I(inode)->extent_tree.lock);
748 btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0);
749
750 while (disk_num_bytes > 0) {
751 unsigned long op;
752
753 cur_alloc_size = min(disk_num_bytes, root->fs_info->max_extent);
754 ret = btrfs_reserve_extent(trans, root, cur_alloc_size,
755 root->sectorsize, 0, alloc_hint,
756 (u64)-1, &ins, 1);
757 BUG_ON(ret);
758
759 em = alloc_extent_map(GFP_NOFS);
760 em->start = start;
761 em->orig_start = em->start;
762 ram_size = ins.offset;
763 em->len = ins.offset;
764
765 em->block_start = ins.objectid;
766 em->block_len = ins.offset;
767 em->bdev = root->fs_info->fs_devices->latest_bdev;
768 set_bit(EXTENT_FLAG_PINNED, &em->flags);
769
770 while (1) {
771 write_lock(&em_tree->lock);
772 ret = add_extent_mapping(em_tree, em);
773 write_unlock(&em_tree->lock);
774 if (ret != -EEXIST) {
775 free_extent_map(em);
776 break;
777 }
778 btrfs_drop_extent_cache(inode, start,
779 start + ram_size - 1, 0);
780 }
781
782 cur_alloc_size = ins.offset;
783 ret = btrfs_add_ordered_extent(inode, start, ins.objectid,
784 ram_size, cur_alloc_size, 0);
785 BUG_ON(ret);
786
787 if (root->root_key.objectid ==
788 BTRFS_DATA_RELOC_TREE_OBJECTID) {
789 ret = btrfs_reloc_clone_csums(inode, start,
790 cur_alloc_size);
791 BUG_ON(ret);
792 }
793
794 if (disk_num_bytes < cur_alloc_size)
795 break;
796
797 /* we're not doing compressed IO, don't unlock the first
798 * page (which the caller expects to stay locked), don't
799 * clear any dirty bits and don't set any writeback bits
800 *
801 * Do set the Private2 bit so we know this page was properly
802 * setup for writepage
803 */
804 op = unlock ? EXTENT_CLEAR_UNLOCK_PAGE : 0;
805 op |= EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
806 EXTENT_SET_PRIVATE2;
807
808 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
809 start, start + ram_size - 1,
810 locked_page, op);
811 disk_num_bytes -= cur_alloc_size;
812 num_bytes -= cur_alloc_size;
813 alloc_hint = ins.objectid + ins.offset;
814 start += cur_alloc_size;
815 }
816 out:
817 ret = 0;
818 btrfs_end_transaction(trans, root);
819
820 return ret;
821 }
822
823 /*
824 * work queue call back to started compression on a file and pages
825 */
826 static noinline void async_cow_start(struct btrfs_work *work)
827 {
828 struct async_cow *async_cow;
829 int num_added = 0;
830 async_cow = container_of(work, struct async_cow, work);
831
832 compress_file_range(async_cow->inode, async_cow->locked_page,
833 async_cow->start, async_cow->end, async_cow,
834 &num_added);
835 if (num_added == 0)
836 async_cow->inode = NULL;
837 }
838
839 /*
840 * work queue call back to submit previously compressed pages
841 */
842 static noinline void async_cow_submit(struct btrfs_work *work)
843 {
844 struct async_cow *async_cow;
845 struct btrfs_root *root;
846 unsigned long nr_pages;
847
848 async_cow = container_of(work, struct async_cow, work);
849
850 root = async_cow->root;
851 nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >>
852 PAGE_CACHE_SHIFT;
853
854 atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages);
855
856 if (atomic_read(&root->fs_info->async_delalloc_pages) <
857 5 * 1042 * 1024 &&
858 waitqueue_active(&root->fs_info->async_submit_wait))
859 wake_up(&root->fs_info->async_submit_wait);
860
861 if (async_cow->inode)
862 submit_compressed_extents(async_cow->inode, async_cow);
863 }
864
865 static noinline void async_cow_free(struct btrfs_work *work)
866 {
867 struct async_cow *async_cow;
868 async_cow = container_of(work, struct async_cow, work);
869 kfree(async_cow);
870 }
871
872 static int cow_file_range_async(struct inode *inode, struct page *locked_page,
873 u64 start, u64 end, int *page_started,
874 unsigned long *nr_written)
875 {
876 struct async_cow *async_cow;
877 struct btrfs_root *root = BTRFS_I(inode)->root;
878 unsigned long nr_pages;
879 u64 cur_end;
880 int limit = 10 * 1024 * 1042;
881
882 clear_extent_bit(&BTRFS_I(inode)->io_tree, start, end, EXTENT_LOCKED,
883 1, 0, NULL, GFP_NOFS);
884 while (start < end) {
885 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
886 async_cow->inode = inode;
887 async_cow->root = root;
888 async_cow->locked_page = locked_page;
889 async_cow->start = start;
890
891 if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
892 cur_end = end;
893 else
894 cur_end = min(end, start + 512 * 1024 - 1);
895
896 async_cow->end = cur_end;
897 INIT_LIST_HEAD(&async_cow->extents);
898
899 async_cow->work.func = async_cow_start;
900 async_cow->work.ordered_func = async_cow_submit;
901 async_cow->work.ordered_free = async_cow_free;
902 async_cow->work.flags = 0;
903
904 nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >>
905 PAGE_CACHE_SHIFT;
906 atomic_add(nr_pages, &root->fs_info->async_delalloc_pages);
907
908 btrfs_queue_worker(&root->fs_info->delalloc_workers,
909 &async_cow->work);
910
911 if (atomic_read(&root->fs_info->async_delalloc_pages) > limit) {
912 wait_event(root->fs_info->async_submit_wait,
913 (atomic_read(&root->fs_info->async_delalloc_pages) <
914 limit));
915 }
916
917 while (atomic_read(&root->fs_info->async_submit_draining) &&
918 atomic_read(&root->fs_info->async_delalloc_pages)) {
919 wait_event(root->fs_info->async_submit_wait,
920 (atomic_read(&root->fs_info->async_delalloc_pages) ==
921 0));
922 }
923
924 *nr_written += nr_pages;
925 start = cur_end + 1;
926 }
927 *page_started = 1;
928 return 0;
929 }
930
931 static noinline int csum_exist_in_range(struct btrfs_root *root,
932 u64 bytenr, u64 num_bytes)
933 {
934 int ret;
935 struct btrfs_ordered_sum *sums;
936 LIST_HEAD(list);
937
938 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr,
939 bytenr + num_bytes - 1, &list);
940 if (ret == 0 && list_empty(&list))
941 return 0;
942
943 while (!list_empty(&list)) {
944 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
945 list_del(&sums->list);
946 kfree(sums);
947 }
948 return 1;
949 }
950
951 /*
952 * when nowcow writeback call back. This checks for snapshots or COW copies
953 * of the extents that exist in the file, and COWs the file as required.
954 *
955 * If no cow copies or snapshots exist, we write directly to the existing
956 * blocks on disk
957 */
958 static noinline int run_delalloc_nocow(struct inode *inode,
959 struct page *locked_page,
960 u64 start, u64 end, int *page_started, int force,
961 unsigned long *nr_written)
962 {
963 struct btrfs_root *root = BTRFS_I(inode)->root;
964 struct btrfs_trans_handle *trans;
965 struct extent_buffer *leaf;
966 struct btrfs_path *path;
967 struct btrfs_file_extent_item *fi;
968 struct btrfs_key found_key;
969 u64 cow_start;
970 u64 cur_offset;
971 u64 extent_end;
972 u64 extent_offset;
973 u64 disk_bytenr;
974 u64 num_bytes;
975 int extent_type;
976 int ret;
977 int type;
978 int nocow;
979 int check_prev = 1;
980
981 path = btrfs_alloc_path();
982 BUG_ON(!path);
983 trans = btrfs_join_transaction(root, 1);
984 BUG_ON(!trans);
985
986 cow_start = (u64)-1;
987 cur_offset = start;
988 while (1) {
989 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
990 cur_offset, 0);
991 BUG_ON(ret < 0);
992 if (ret > 0 && path->slots[0] > 0 && check_prev) {
993 leaf = path->nodes[0];
994 btrfs_item_key_to_cpu(leaf, &found_key,
995 path->slots[0] - 1);
996 if (found_key.objectid == inode->i_ino &&
997 found_key.type == BTRFS_EXTENT_DATA_KEY)
998 path->slots[0]--;
999 }
1000 check_prev = 0;
1001 next_slot:
1002 leaf = path->nodes[0];
1003 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1004 ret = btrfs_next_leaf(root, path);
1005 if (ret < 0)
1006 BUG_ON(1);
1007 if (ret > 0)
1008 break;
1009 leaf = path->nodes[0];
1010 }
1011
1012 nocow = 0;
1013 disk_bytenr = 0;
1014 num_bytes = 0;
1015 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1016
1017 if (found_key.objectid > inode->i_ino ||
1018 found_key.type > BTRFS_EXTENT_DATA_KEY ||
1019 found_key.offset > end)
1020 break;
1021
1022 if (found_key.offset > cur_offset) {
1023 extent_end = found_key.offset;
1024 goto out_check;
1025 }
1026
1027 fi = btrfs_item_ptr(leaf, path->slots[0],
1028 struct btrfs_file_extent_item);
1029 extent_type = btrfs_file_extent_type(leaf, fi);
1030
1031 if (extent_type == BTRFS_FILE_EXTENT_REG ||
1032 extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1033 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
1034 extent_offset = btrfs_file_extent_offset(leaf, fi);
1035 extent_end = found_key.offset +
1036 btrfs_file_extent_num_bytes(leaf, fi);
1037 if (extent_end <= start) {
1038 path->slots[0]++;
1039 goto next_slot;
1040 }
1041 if (disk_bytenr == 0)
1042 goto out_check;
1043 if (btrfs_file_extent_compression(leaf, fi) ||
1044 btrfs_file_extent_encryption(leaf, fi) ||
1045 btrfs_file_extent_other_encoding(leaf, fi))
1046 goto out_check;
1047 if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
1048 goto out_check;
1049 if (btrfs_extent_readonly(root, disk_bytenr))
1050 goto out_check;
1051 if (btrfs_cross_ref_exist(trans, root, inode->i_ino,
1052 found_key.offset -
1053 extent_offset, disk_bytenr))
1054 goto out_check;
1055 disk_bytenr += extent_offset;
1056 disk_bytenr += cur_offset - found_key.offset;
1057 num_bytes = min(end + 1, extent_end) - cur_offset;
1058 /*
1059 * force cow if csum exists in the range.
1060 * this ensure that csum for a given extent are
1061 * either valid or do not exist.
1062 */
1063 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
1064 goto out_check;
1065 nocow = 1;
1066 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
1067 extent_end = found_key.offset +
1068 btrfs_file_extent_inline_len(leaf, fi);
1069 extent_end = ALIGN(extent_end, root->sectorsize);
1070 } else {
1071 BUG_ON(1);
1072 }
1073 out_check:
1074 if (extent_end <= start) {
1075 path->slots[0]++;
1076 goto next_slot;
1077 }
1078 if (!nocow) {
1079 if (cow_start == (u64)-1)
1080 cow_start = cur_offset;
1081 cur_offset = extent_end;
1082 if (cur_offset > end)
1083 break;
1084 path->slots[0]++;
1085 goto next_slot;
1086 }
1087
1088 btrfs_release_path(root, path);
1089 if (cow_start != (u64)-1) {
1090 ret = cow_file_range(inode, locked_page, cow_start,
1091 found_key.offset - 1, page_started,
1092 nr_written, 1);
1093 BUG_ON(ret);
1094 cow_start = (u64)-1;
1095 }
1096
1097 if (extent_type == BTRFS_FILE_EXTENT_PREALLOC) {
1098 struct extent_map *em;
1099 struct extent_map_tree *em_tree;
1100 em_tree = &BTRFS_I(inode)->extent_tree;
1101 em = alloc_extent_map(GFP_NOFS);
1102 em->start = cur_offset;
1103 em->orig_start = em->start;
1104 em->len = num_bytes;
1105 em->block_len = num_bytes;
1106 em->block_start = disk_bytenr;
1107 em->bdev = root->fs_info->fs_devices->latest_bdev;
1108 set_bit(EXTENT_FLAG_PINNED, &em->flags);
1109 while (1) {
1110 write_lock(&em_tree->lock);
1111 ret = add_extent_mapping(em_tree, em);
1112 write_unlock(&em_tree->lock);
1113 if (ret != -EEXIST) {
1114 free_extent_map(em);
1115 break;
1116 }
1117 btrfs_drop_extent_cache(inode, em->start,
1118 em->start + em->len - 1, 0);
1119 }
1120 type = BTRFS_ORDERED_PREALLOC;
1121 } else {
1122 type = BTRFS_ORDERED_NOCOW;
1123 }
1124
1125 ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr,
1126 num_bytes, num_bytes, type);
1127 BUG_ON(ret);
1128
1129 extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
1130 cur_offset, cur_offset + num_bytes - 1,
1131 locked_page, EXTENT_CLEAR_UNLOCK_PAGE |
1132 EXTENT_CLEAR_UNLOCK | EXTENT_CLEAR_DELALLOC |
1133 EXTENT_SET_PRIVATE2);
1134 cur_offset = extent_end;
1135 if (cur_offset > end)
1136 break;
1137 }
1138 btrfs_release_path(root, path);
1139
1140 if (cur_offset <= end && cow_start == (u64)-1)
1141 cow_start = cur_offset;
1142 if (cow_start != (u64)-1) {
1143 ret = cow_file_range(inode, locked_page, cow_start, end,
1144 page_started, nr_written, 1);
1145 BUG_ON(ret);
1146 }
1147
1148 ret = btrfs_end_transaction(trans, root);
1149 BUG_ON(ret);
1150 btrfs_free_path(path);
1151 return 0;
1152 }
1153
1154 /*
1155 * extent_io.c call back to do delayed allocation processing
1156 */
1157 static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1158 u64 start, u64 end, int *page_started,
1159 unsigned long *nr_written)
1160 {
1161 int ret;
1162 struct btrfs_root *root = BTRFS_I(inode)->root;
1163
1164 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW)
1165 ret = run_delalloc_nocow(inode, locked_page, start, end,
1166 page_started, 1, nr_written);
1167 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)
1168 ret = run_delalloc_nocow(inode, locked_page, start, end,
1169 page_started, 0, nr_written);
1170 else if (!btrfs_test_opt(root, COMPRESS))
1171 ret = cow_file_range(inode, locked_page, start, end,
1172 page_started, nr_written, 1);
1173 else
1174 ret = cow_file_range_async(inode, locked_page, start, end,
1175 page_started, nr_written);
1176 return ret;
1177 }
1178
1179 static int btrfs_split_extent_hook(struct inode *inode,
1180 struct extent_state *orig, u64 split)
1181 {
1182 struct btrfs_root *root = BTRFS_I(inode)->root;
1183 u64 size;
1184
1185 if (!(orig->state & EXTENT_DELALLOC))
1186 return 0;
1187
1188 size = orig->end - orig->start + 1;
1189 if (size > root->fs_info->max_extent) {
1190 u64 num_extents;
1191 u64 new_size;
1192
1193 new_size = orig->end - split + 1;
1194 num_extents = div64_u64(size + root->fs_info->max_extent - 1,
1195 root->fs_info->max_extent);
1196
1197 /*
1198 * if we break a large extent up then leave delalloc_extents be,
1199 * since we've already accounted for the large extent.
1200 */
1201 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1202 root->fs_info->max_extent) < num_extents)
1203 return 0;
1204 }
1205
1206 BTRFS_I(inode)->delalloc_extents++;
1207
1208 return 0;
1209 }
1210
1211 /*
1212 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1213 * extents so we can keep track of new extents that are just merged onto old
1214 * extents, such as when we are doing sequential writes, so we can properly
1215 * account for the metadata space we'll need.
1216 */
1217 static int btrfs_merge_extent_hook(struct inode *inode,
1218 struct extent_state *new,
1219 struct extent_state *other)
1220 {
1221 struct btrfs_root *root = BTRFS_I(inode)->root;
1222 u64 new_size, old_size;
1223 u64 num_extents;
1224
1225 /* not delalloc, ignore it */
1226 if (!(other->state & EXTENT_DELALLOC))
1227 return 0;
1228
1229 old_size = other->end - other->start + 1;
1230 if (new->start < other->start)
1231 new_size = other->end - new->start + 1;
1232 else
1233 new_size = new->end - other->start + 1;
1234
1235 /* we're not bigger than the max, unreserve the space and go */
1236 if (new_size <= root->fs_info->max_extent) {
1237 BTRFS_I(inode)->delalloc_extents--;
1238 return 0;
1239 }
1240
1241 /*
1242 * If we grew by another max_extent, just return, we want to keep that
1243 * reserved amount.
1244 */
1245 num_extents = div64_u64(old_size + root->fs_info->max_extent - 1,
1246 root->fs_info->max_extent);
1247 if (div64_u64(new_size + root->fs_info->max_extent - 1,
1248 root->fs_info->max_extent) > num_extents)
1249 return 0;
1250
1251 BTRFS_I(inode)->delalloc_extents--;
1252
1253 return 0;
1254 }
1255
1256 /*
1257 * extent_io.c set_bit_hook, used to track delayed allocation
1258 * bytes in this file, and to maintain the list of inodes that
1259 * have pending delalloc work to be done.
1260 */
1261 static int btrfs_set_bit_hook(struct inode *inode, u64 start, u64 end,
1262 unsigned long old, unsigned long bits)
1263 {
1264
1265 /*
1266 * set_bit and clear bit hooks normally require _irqsave/restore
1267 * but in this case, we are only testeing for the DELALLOC
1268 * bit, which is only set or cleared with irqs on
1269 */
1270 if (!(old & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1271 struct btrfs_root *root = BTRFS_I(inode)->root;
1272
1273 BTRFS_I(inode)->delalloc_extents++;
1274 btrfs_delalloc_reserve_space(root, inode, end - start + 1);
1275 spin_lock(&root->fs_info->delalloc_lock);
1276 BTRFS_I(inode)->delalloc_bytes += end - start + 1;
1277 root->fs_info->delalloc_bytes += end - start + 1;
1278 if (list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1279 list_add_tail(&BTRFS_I(inode)->delalloc_inodes,
1280 &root->fs_info->delalloc_inodes);
1281 }
1282 spin_unlock(&root->fs_info->delalloc_lock);
1283 }
1284 return 0;
1285 }
1286
1287 /*
1288 * extent_io.c clear_bit_hook, see set_bit_hook for why
1289 */
1290 static int btrfs_clear_bit_hook(struct inode *inode,
1291 struct extent_state *state, unsigned long bits)
1292 {
1293 /*
1294 * set_bit and clear bit hooks normally require _irqsave/restore
1295 * but in this case, we are only testeing for the DELALLOC
1296 * bit, which is only set or cleared with irqs on
1297 */
1298 if ((state->state & EXTENT_DELALLOC) && (bits & EXTENT_DELALLOC)) {
1299 struct btrfs_root *root = BTRFS_I(inode)->root;
1300
1301 BTRFS_I(inode)->delalloc_extents--;
1302 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
1303
1304 spin_lock(&root->fs_info->delalloc_lock);
1305 if (state->end - state->start + 1 >
1306 root->fs_info->delalloc_bytes) {
1307 printk(KERN_INFO "btrfs warning: delalloc account "
1308 "%llu %llu\n",
1309 (unsigned long long)
1310 state->end - state->start + 1,
1311 (unsigned long long)
1312 root->fs_info->delalloc_bytes);
1313 btrfs_delalloc_free_space(root, inode, (u64)-1);
1314 root->fs_info->delalloc_bytes = 0;
1315 BTRFS_I(inode)->delalloc_bytes = 0;
1316 } else {
1317 btrfs_delalloc_free_space(root, inode,
1318 state->end -
1319 state->start + 1);
1320 root->fs_info->delalloc_bytes -= state->end -
1321 state->start + 1;
1322 BTRFS_I(inode)->delalloc_bytes -= state->end -
1323 state->start + 1;
1324 }
1325 if (BTRFS_I(inode)->delalloc_bytes == 0 &&
1326 !list_empty(&BTRFS_I(inode)->delalloc_inodes)) {
1327 list_del_init(&BTRFS_I(inode)->delalloc_inodes);
1328 }
1329 spin_unlock(&root->fs_info->delalloc_lock);
1330 }
1331 return 0;
1332 }
1333
1334 /*
1335 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1336 * we don't create bios that span stripes or chunks
1337 */
1338 int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
1339 size_t size, struct bio *bio,
1340 unsigned long bio_flags)
1341 {
1342 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1343 struct btrfs_mapping_tree *map_tree;
1344 u64 logical = (u64)bio->bi_sector << 9;
1345 u64 length = 0;
1346 u64 map_length;
1347 int ret;
1348
1349 if (bio_flags & EXTENT_BIO_COMPRESSED)
1350 return 0;
1351
1352 length = bio->bi_size;
1353 map_tree = &root->fs_info->mapping_tree;
1354 map_length = length;
1355 ret = btrfs_map_block(map_tree, READ, logical,
1356 &map_length, NULL, 0);
1357
1358 if (map_length < length + size)
1359 return 1;
1360 return 0;
1361 }
1362
1363 /*
1364 * in order to insert checksums into the metadata in large chunks,
1365 * we wait until bio submission time. All the pages in the bio are
1366 * checksummed and sums are attached onto the ordered extent record.
1367 *
1368 * At IO completion time the cums attached on the ordered extent record
1369 * are inserted into the btree
1370 */
1371 static int __btrfs_submit_bio_start(struct inode *inode, int rw,
1372 struct bio *bio, int mirror_num,
1373 unsigned long bio_flags)
1374 {
1375 struct btrfs_root *root = BTRFS_I(inode)->root;
1376 int ret = 0;
1377
1378 ret = btrfs_csum_one_bio(root, inode, bio, 0, 0);
1379 BUG_ON(ret);
1380 return 0;
1381 }
1382
1383 /*
1384 * in order to insert checksums into the metadata in large chunks,
1385 * we wait until bio submission time. All the pages in the bio are
1386 * checksummed and sums are attached onto the ordered extent record.
1387 *
1388 * At IO completion time the cums attached on the ordered extent record
1389 * are inserted into the btree
1390 */
1391 static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
1392 int mirror_num, unsigned long bio_flags)
1393 {
1394 struct btrfs_root *root = BTRFS_I(inode)->root;
1395 return btrfs_map_bio(root, rw, bio, mirror_num, 1);
1396 }
1397
1398 /*
1399 * extent_io.c submission hook. This does the right thing for csum calculation
1400 * on write, or reading the csums from the tree before a read
1401 */
1402 static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1403 int mirror_num, unsigned long bio_flags)
1404 {
1405 struct btrfs_root *root = BTRFS_I(inode)->root;
1406 int ret = 0;
1407 int skip_sum;
1408
1409 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
1410
1411 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
1412 BUG_ON(ret);
1413
1414 if (!(rw & (1 << BIO_RW))) {
1415 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1416 return btrfs_submit_compressed_read(inode, bio,
1417 mirror_num, bio_flags);
1418 } else if (!skip_sum)
1419 btrfs_lookup_bio_sums(root, inode, bio, NULL);
1420 goto mapit;
1421 } else if (!skip_sum) {
1422 /* csum items have already been cloned */
1423 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
1424 goto mapit;
1425 /* we're doing a write, do the async checksumming */
1426 return btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
1427 inode, rw, bio, mirror_num,
1428 bio_flags, __btrfs_submit_bio_start,
1429 __btrfs_submit_bio_done);
1430 }
1431
1432 mapit:
1433 return btrfs_map_bio(root, rw, bio, mirror_num, 0);
1434 }
1435
1436 /*
1437 * given a list of ordered sums record them in the inode. This happens
1438 * at IO completion time based on sums calculated at bio submission time.
1439 */
1440 static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1441 struct inode *inode, u64 file_offset,
1442 struct list_head *list)
1443 {
1444 struct btrfs_ordered_sum *sum;
1445
1446 btrfs_set_trans_block_group(trans, inode);
1447
1448 list_for_each_entry(sum, list, list) {
1449 btrfs_csum_file_blocks(trans,
1450 BTRFS_I(inode)->root->fs_info->csum_root, sum);
1451 }
1452 return 0;
1453 }
1454
1455 int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end)
1456 {
1457 if ((end & (PAGE_CACHE_SIZE - 1)) == 0)
1458 WARN_ON(1);
1459 return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
1460 GFP_NOFS);
1461 }
1462
1463 /* see btrfs_writepage_start_hook for details on why this is required */
1464 struct btrfs_writepage_fixup {
1465 struct page *page;
1466 struct btrfs_work work;
1467 };
1468
1469 static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
1470 {
1471 struct btrfs_writepage_fixup *fixup;
1472 struct btrfs_ordered_extent *ordered;
1473 struct page *page;
1474 struct inode *inode;
1475 u64 page_start;
1476 u64 page_end;
1477
1478 fixup = container_of(work, struct btrfs_writepage_fixup, work);
1479 page = fixup->page;
1480 again:
1481 lock_page(page);
1482 if (!page->mapping || !PageDirty(page) || !PageChecked(page)) {
1483 ClearPageChecked(page);
1484 goto out_page;
1485 }
1486
1487 inode = page->mapping->host;
1488 page_start = page_offset(page);
1489 page_end = page_offset(page) + PAGE_CACHE_SIZE - 1;
1490
1491 lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1492
1493 /* already ordered? We're done */
1494 if (PagePrivate2(page))
1495 goto out;
1496
1497 ordered = btrfs_lookup_ordered_extent(inode, page_start);
1498 if (ordered) {
1499 unlock_extent(&BTRFS_I(inode)->io_tree, page_start,
1500 page_end, GFP_NOFS);
1501 unlock_page(page);
1502 btrfs_start_ordered_extent(inode, ordered, 1);
1503 goto again;
1504 }
1505
1506 btrfs_set_extent_delalloc(inode, page_start, page_end);
1507 ClearPageChecked(page);
1508 out:
1509 unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end, GFP_NOFS);
1510 out_page:
1511 unlock_page(page);
1512 page_cache_release(page);
1513 }
1514
1515 /*
1516 * There are a few paths in the higher layers of the kernel that directly
1517 * set the page dirty bit without asking the filesystem if it is a
1518 * good idea. This causes problems because we want to make sure COW
1519 * properly happens and the data=ordered rules are followed.
1520 *
1521 * In our case any range that doesn't have the ORDERED bit set
1522 * hasn't been properly setup for IO. We kick off an async process
1523 * to fix it up. The async helper will wait for ordered extents, set
1524 * the delalloc bit and make it safe to write the page.
1525 */
1526 static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end)
1527 {
1528 struct inode *inode = page->mapping->host;
1529 struct btrfs_writepage_fixup *fixup;
1530 struct btrfs_root *root = BTRFS_I(inode)->root;
1531
1532 /* this page is properly in the ordered list */
1533 if (TestClearPagePrivate2(page))
1534 return 0;
1535
1536 if (PageChecked(page))
1537 return -EAGAIN;
1538
1539 fixup = kzalloc(sizeof(*fixup), GFP_NOFS);
1540 if (!fixup)
1541 return -EAGAIN;
1542
1543 SetPageChecked(page);
1544 page_cache_get(page);
1545 fixup->work.func = btrfs_writepage_fixup_worker;
1546 fixup->page = page;
1547 btrfs_queue_worker(&root->fs_info->fixup_workers, &fixup->work);
1548 return -EAGAIN;
1549 }
1550
1551 static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
1552 struct inode *inode, u64 file_pos,
1553 u64 disk_bytenr, u64 disk_num_bytes,
1554 u64 num_bytes, u64 ram_bytes,
1555 u64 locked_end,
1556 u8 compression, u8 encryption,
1557 u16 other_encoding, int extent_type)
1558 {
1559 struct btrfs_root *root = BTRFS_I(inode)->root;
1560 struct btrfs_file_extent_item *fi;
1561 struct btrfs_path *path;
1562 struct extent_buffer *leaf;
1563 struct btrfs_key ins;
1564 u64 hint;
1565 int ret;
1566
1567 path = btrfs_alloc_path();
1568 BUG_ON(!path);
1569
1570 path->leave_spinning = 1;
1571
1572 /*
1573 * we may be replacing one extent in the tree with another.
1574 * The new extent is pinned in the extent map, and we don't want
1575 * to drop it from the cache until it is completely in the btree.
1576 *
1577 * So, tell btrfs_drop_extents to leave this extent in the cache.
1578 * the caller is expected to unpin it and allow it to be merged
1579 * with the others.
1580 */
1581 ret = btrfs_drop_extents(trans, root, inode, file_pos,
1582 file_pos + num_bytes, locked_end,
1583 file_pos, &hint, 0);
1584 BUG_ON(ret);
1585
1586 ins.objectid = inode->i_ino;
1587 ins.offset = file_pos;
1588 ins.type = BTRFS_EXTENT_DATA_KEY;
1589 ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi));
1590 BUG_ON(ret);
1591 leaf = path->nodes[0];
1592 fi = btrfs_item_ptr(leaf, path->slots[0],
1593 struct btrfs_file_extent_item);
1594 btrfs_set_file_extent_generation(leaf, fi, trans->transid);
1595 btrfs_set_file_extent_type(leaf, fi, extent_type);
1596 btrfs_set_file_extent_disk_bytenr(leaf, fi, disk_bytenr);
1597 btrfs_set_file_extent_disk_num_bytes(leaf, fi, disk_num_bytes);
1598 btrfs_set_file_extent_offset(leaf, fi, 0);
1599 btrfs_set_file_extent_num_bytes(leaf, fi, num_bytes);
1600 btrfs_set_file_extent_ram_bytes(leaf, fi, ram_bytes);
1601 btrfs_set_file_extent_compression(leaf, fi, compression);
1602 btrfs_set_file_extent_encryption(leaf, fi, encryption);
1603 btrfs_set_file_extent_other_encoding(leaf, fi, other_encoding);
1604
1605 btrfs_unlock_up_safe(path, 1);
1606 btrfs_set_lock_blocking(leaf);
1607
1608 btrfs_mark_buffer_dirty(leaf);
1609
1610 inode_add_bytes(inode, num_bytes);
1611
1612 ins.objectid = disk_bytenr;
1613 ins.offset = disk_num_bytes;
1614 ins.type = BTRFS_EXTENT_ITEM_KEY;
1615 ret = btrfs_alloc_reserved_file_extent(trans, root,
1616 root->root_key.objectid,
1617 inode->i_ino, file_pos, &ins);
1618 BUG_ON(ret);
1619 btrfs_free_path(path);
1620
1621 return 0;
1622 }
1623
1624 /*
1625 * helper function for btrfs_finish_ordered_io, this
1626 * just reads in some of the csum leaves to prime them into ram
1627 * before we start the transaction. It limits the amount of btree
1628 * reads required while inside the transaction.
1629 */
1630 static noinline void reada_csum(struct btrfs_root *root,
1631 struct btrfs_path *path,
1632 struct btrfs_ordered_extent *ordered_extent)
1633 {
1634 struct btrfs_ordered_sum *sum;
1635 u64 bytenr;
1636
1637 sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
1638 list);
1639 bytenr = sum->sums[0].bytenr;
1640
1641 /*
1642 * we don't care about the results, the point of this search is
1643 * just to get the btree leaves into ram
1644 */
1645 btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
1646 }
1647
1648 /* as ordered data IO finishes, this gets called so we can finish
1649 * an ordered extent if the range of bytes in the file it covers are
1650 * fully written.
1651 */
1652 static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1653 {
1654 struct btrfs_root *root = BTRFS_I(inode)->root;
1655 struct btrfs_trans_handle *trans;
1656 struct btrfs_ordered_extent *ordered_extent = NULL;
1657 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1658 struct btrfs_path *path;
1659 int compressed = 0;
1660 int ret;
1661
1662 ret = btrfs_dec_test_ordered_pending(inode, start, end - start + 1);
1663 if (!ret)
1664 return 0;
1665
1666 /*
1667 * before we join the transaction, try to do some of our IO.
1668 * This will limit the amount of IO that we have to do with
1669 * the transaction running. We're unlikely to need to do any
1670 * IO if the file extents are new, the disk_i_size checks
1671 * covers the most common case.
1672 */
1673 if (start < BTRFS_I(inode)->disk_i_size) {
1674 path = btrfs_alloc_path();
1675 if (path) {
1676 ret = btrfs_lookup_file_extent(NULL, root, path,
1677 inode->i_ino,
1678 start, 0);
1679 ordered_extent = btrfs_lookup_ordered_extent(inode,
1680 start);
1681 if (!list_empty(&ordered_extent->list)) {
1682 btrfs_release_path(root, path);
1683 reada_csum(root, path, ordered_extent);
1684 }
1685 btrfs_free_path(path);
1686 }
1687 }
1688
1689 trans = btrfs_join_transaction(root, 1);
1690
1691 if (!ordered_extent)
1692 ordered_extent = btrfs_lookup_ordered_extent(inode, start);
1693 BUG_ON(!ordered_extent);
1694 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags))
1695 goto nocow;
1696
1697 lock_extent(io_tree, ordered_extent->file_offset,
1698 ordered_extent->file_offset + ordered_extent->len - 1,
1699 GFP_NOFS);
1700
1701 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
1702 compressed = 1;
1703 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) {
1704 BUG_ON(compressed);
1705 ret = btrfs_mark_extent_written(trans, root, inode,
1706 ordered_extent->file_offset,
1707 ordered_extent->file_offset +
1708 ordered_extent->len);
1709 BUG_ON(ret);
1710 } else {
1711 ret = insert_reserved_file_extent(trans, inode,
1712 ordered_extent->file_offset,
1713 ordered_extent->start,
1714 ordered_extent->disk_len,
1715 ordered_extent->len,
1716 ordered_extent->len,
1717 ordered_extent->file_offset +
1718 ordered_extent->len,
1719 compressed, 0, 0,
1720 BTRFS_FILE_EXTENT_REG);
1721 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
1722 ordered_extent->file_offset,
1723 ordered_extent->len);
1724 BUG_ON(ret);
1725 }
1726 unlock_extent(io_tree, ordered_extent->file_offset,
1727 ordered_extent->file_offset + ordered_extent->len - 1,
1728 GFP_NOFS);
1729 nocow:
1730 add_pending_csums(trans, inode, ordered_extent->file_offset,
1731 &ordered_extent->list);
1732
1733 mutex_lock(&BTRFS_I(inode)->extent_mutex);
1734 btrfs_ordered_update_i_size(inode, ordered_extent);
1735 btrfs_update_inode(trans, root, inode);
1736 btrfs_remove_ordered_extent(inode, ordered_extent);
1737 mutex_unlock(&BTRFS_I(inode)->extent_mutex);
1738
1739 /* once for us */
1740 btrfs_put_ordered_extent(ordered_extent);
1741 /* once for the tree */
1742 btrfs_put_ordered_extent(ordered_extent);
1743
1744 btrfs_end_transaction(trans, root);
1745 return 0;
1746 }
1747
1748 static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1749 struct extent_state *state, int uptodate)
1750 {
1751 ClearPagePrivate2(page);
1752 return btrfs_finish_ordered_io(page->mapping->host, start, end);
1753 }
1754
1755 /*
1756 * When IO fails, either with EIO or csum verification fails, we
1757 * try other mirrors that might have a good copy of the data. This
1758 * io_failure_record is used to record state as we go through all the
1759 * mirrors. If another mirror has good data, the page is set up to date
1760 * and things continue. If a good mirror can't be found, the original
1761 * bio end_io callback is called to indicate things have failed.
1762 */
1763 struct io_failure_record {
1764 struct page *page;
1765 u64 start;
1766 u64 len;
1767 u64 logical;
1768 unsigned long bio_flags;
1769 int last_mirror;
1770 };
1771
1772 static int btrfs_io_failed_hook(struct bio *failed_bio,
1773 struct page *page, u64 start, u64 end,
1774 struct extent_state *state)
1775 {
1776 struct io_failure_record *failrec = NULL;
1777 u64 private;
1778 struct extent_map *em;
1779 struct inode *inode = page->mapping->host;
1780 struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree;
1781 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
1782 struct bio *bio;
1783 int num_copies;
1784 int ret;
1785 int rw;
1786 u64 logical;
1787
1788 ret = get_state_private(failure_tree, start, &private);
1789 if (ret) {
1790 failrec = kmalloc(sizeof(*failrec), GFP_NOFS);
1791 if (!failrec)
1792 return -ENOMEM;
1793 failrec->start = start;
1794 failrec->len = end - start + 1;
1795 failrec->last_mirror = 0;
1796 failrec->bio_flags = 0;
1797
1798 read_lock(&em_tree->lock);
1799 em = lookup_extent_mapping(em_tree, start, failrec->len);
1800 if (em->start > start || em->start + em->len < start) {
1801 free_extent_map(em);
1802 em = NULL;
1803 }
1804 read_unlock(&em_tree->lock);
1805
1806 if (!em || IS_ERR(em)) {
1807 kfree(failrec);
1808 return -EIO;
1809 }
1810 logical = start - em->start;
1811 logical = em->block_start + logical;
1812 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
1813 logical = em->block_start;
1814 failrec->bio_flags = EXTENT_BIO_COMPRESSED;
1815 }
1816 failrec->logical = logical;
1817 free_extent_map(em);
1818 set_extent_bits(failure_tree, start, end, EXTENT_LOCKED |
1819 EXTENT_DIRTY, GFP_NOFS);
1820 set_state_private(failure_tree, start,
1821 (u64)(unsigned long)failrec);
1822 } else {
1823 failrec = (struct io_failure_record *)(unsigned long)private;
1824 }
1825 num_copies = btrfs_num_copies(
1826 &BTRFS_I(inode)->root->fs_info->mapping_tree,
1827 failrec->logical, failrec->len);
1828 failrec->last_mirror++;
1829 if (!state) {
1830 spin_lock(&BTRFS_I(inode)->io_tree.lock);
1831 state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree,
1832 failrec->start,
1833 EXTENT_LOCKED);
1834 if (state && state->start != failrec->start)
1835 state = NULL;
1836 spin_unlock(&BTRFS_I(inode)->io_tree.lock);
1837 }
1838 if (!state || failrec->last_mirror > num_copies) {
1839 set_state_private(failure_tree, failrec->start, 0);
1840 clear_extent_bits(failure_tree, failrec->start,
1841 failrec->start + failrec->len - 1,
1842 EXTENT_LOCKED | EXTENT_DIRTY, GFP_NOFS);
1843 kfree(failrec);
1844 return -EIO;
1845 }
1846 bio = bio_alloc(GFP_NOFS, 1);
1847 bio->bi_private = state;
1848 bio->bi_end_io = failed_bio->bi_end_io;
1849 bio->bi_sector = failrec->logical >> 9;
1850 bio->bi_bdev = failed_bio->bi_bdev;
1851 bio->bi_size = 0;
1852
1853 bio_add_page(bio, page, failrec->len, start - page_offset(page));
1854 if (failed_bio->bi_rw & (1 << BIO_RW))
1855 rw = WRITE;
1856 else
1857 rw = READ;
1858
1859 BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
1860 failrec->last_mirror,
1861 failrec->bio_flags);
1862 return 0;
1863 }
1864
1865 /*
1866 * each time an IO finishes, we do a fast check in the IO failure tree
1867 * to see if we need to process or clean up an io_failure_record
1868 */
1869 static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1870 {
1871 u64 private;
1872 u64 private_failure;
1873 struct io_failure_record *failure;
1874 int ret;
1875
1876 private = 0;
1877 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1878 (u64)-1, 1, EXTENT_DIRTY)) {
1879 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1880 start, &private_failure);
1881 if (ret == 0) {
1882 failure = (struct io_failure_record *)(unsigned long)
1883 private_failure;
1884 set_state_private(&BTRFS_I(inode)->io_failure_tree,
1885 failure->start, 0);
1886 clear_extent_bits(&BTRFS_I(inode)->io_failure_tree,
1887 failure->start,
1888 failure->start + failure->len - 1,
1889 EXTENT_DIRTY | EXTENT_LOCKED,
1890 GFP_NOFS);
1891 kfree(failure);
1892 }
1893 }
1894 return 0;
1895 }
1896
1897 /*
1898 * when reads are done, we need to check csums to verify the data is correct
1899 * if there's a match, we allow the bio to finish. If not, we go through
1900 * the io_failure_record routines to find good copies
1901 */
1902 static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1903 struct extent_state *state)
1904 {
1905 size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT);
1906 struct inode *inode = page->mapping->host;
1907 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1908 char *kaddr;
1909 u64 private = ~(u32)0;
1910 int ret;
1911 struct btrfs_root *root = BTRFS_I(inode)->root;
1912 u32 csum = ~(u32)0;
1913
1914 if (PageChecked(page)) {
1915 ClearPageChecked(page);
1916 goto good;
1917 }
1918
1919 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1920 return 0;
1921
1922 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1923 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
1924 clear_extent_bits(io_tree, start, end, EXTENT_NODATASUM,
1925 GFP_NOFS);
1926 return 0;
1927 }
1928
1929 if (state && state->start == start) {
1930 private = state->private;
1931 ret = 0;
1932 } else {
1933 ret = get_state_private(io_tree, start, &private);
1934 }
1935 kaddr = kmap_atomic(page, KM_USER0);
1936 if (ret)
1937 goto zeroit;
1938
1939 csum = btrfs_csum_data(root, kaddr + offset, csum, end - start + 1);
1940 btrfs_csum_final(csum, (char *)&csum);
1941 if (csum != private)
1942 goto zeroit;
1943
1944 kunmap_atomic(kaddr, KM_USER0);
1945 good:
1946 /* if the io failure tree for this inode is non-empty,
1947 * check to see if we've recovered from a failed IO
1948 */
1949 btrfs_clean_io_failures(inode, start);
1950 return 0;
1951
1952 zeroit:
1953 if (printk_ratelimit()) {
1954 printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u "
1955 "private %llu\n", page->mapping->host->i_ino,
1956 (unsigned long long)start, csum,
1957 (unsigned long long)private);
1958 }
1959 memset(kaddr + offset, 1, end - start + 1);
1960 flush_dcache_page(page);
1961 kunmap_atomic(kaddr, KM_USER0);
1962 if (private == 0)
1963 return 0;
1964 return -EIO;
1965 }
1966
1967 /*
1968 * This creates an orphan entry for the given inode in case something goes
1969 * wrong in the middle of an unlink/truncate.
1970 */
1971 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
1972 {
1973 struct btrfs_root *root = BTRFS_I(inode)->root;
1974 int ret = 0;
1975
1976 spin_lock(&root->list_lock);
1977
1978 /* already on the orphan list, we're good */
1979 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
1980 spin_unlock(&root->list_lock);
1981 return 0;
1982 }
1983
1984 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
1985
1986 spin_unlock(&root->list_lock);
1987
1988 /*
1989 * insert an orphan item to track this unlinked/truncated file
1990 */
1991 ret = btrfs_insert_orphan_item(trans, root, inode->i_ino);
1992
1993 return ret;
1994 }
1995
1996 /*
1997 * We have done the truncate/delete so we can go ahead and remove the orphan
1998 * item for this particular inode.
1999 */
2000 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2001 {
2002 struct btrfs_root *root = BTRFS_I(inode)->root;
2003 int ret = 0;
2004
2005 spin_lock(&root->list_lock);
2006
2007 if (list_empty(&BTRFS_I(inode)->i_orphan)) {
2008 spin_unlock(&root->list_lock);
2009 return 0;
2010 }
2011
2012 list_del_init(&BTRFS_I(inode)->i_orphan);
2013 if (!trans) {
2014 spin_unlock(&root->list_lock);
2015 return 0;
2016 }
2017
2018 spin_unlock(&root->list_lock);
2019
2020 ret = btrfs_del_orphan_item(trans, root, inode->i_ino);
2021
2022 return ret;
2023 }
2024
2025 /*
2026 * this cleans up any orphans that may be left on the list from the last use
2027 * of this root.
2028 */
2029 void btrfs_orphan_cleanup(struct btrfs_root *root)
2030 {
2031 struct btrfs_path *path;
2032 struct extent_buffer *leaf;
2033 struct btrfs_item *item;
2034 struct btrfs_key key, found_key;
2035 struct btrfs_trans_handle *trans;
2036 struct inode *inode;
2037 int ret = 0, nr_unlink = 0, nr_truncate = 0;
2038
2039 path = btrfs_alloc_path();
2040 if (!path)
2041 return;
2042 path->reada = -1;
2043
2044 key.objectid = BTRFS_ORPHAN_OBJECTID;
2045 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY);
2046 key.offset = (u64)-1;
2047
2048
2049 while (1) {
2050 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2051 if (ret < 0) {
2052 printk(KERN_ERR "Error searching slot for orphan: %d"
2053 "\n", ret);
2054 break;
2055 }
2056
2057 /*
2058 * if ret == 0 means we found what we were searching for, which
2059 * is weird, but possible, so only screw with path if we didnt
2060 * find the key and see if we have stuff that matches
2061 */
2062 if (ret > 0) {
2063 if (path->slots[0] == 0)
2064 break;
2065 path->slots[0]--;
2066 }
2067
2068 /* pull out the item */
2069 leaf = path->nodes[0];
2070 item = btrfs_item_nr(leaf, path->slots[0]);
2071 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2072
2073 /* make sure the item matches what we want */
2074 if (found_key.objectid != BTRFS_ORPHAN_OBJECTID)
2075 break;
2076 if (btrfs_key_type(&found_key) != BTRFS_ORPHAN_ITEM_KEY)
2077 break;
2078
2079 /* release the path since we're done with it */
2080 btrfs_release_path(root, path);
2081
2082 /*
2083 * this is where we are basically btrfs_lookup, without the
2084 * crossing root thing. we store the inode number in the
2085 * offset of the orphan item.
2086 */
2087 found_key.objectid = found_key.offset;
2088 found_key.type = BTRFS_INODE_ITEM_KEY;
2089 found_key.offset = 0;
2090 inode = btrfs_iget(root->fs_info->sb, &found_key, root);
2091 if (IS_ERR(inode))
2092 break;
2093
2094 /*
2095 * add this inode to the orphan list so btrfs_orphan_del does
2096 * the proper thing when we hit it
2097 */
2098 spin_lock(&root->list_lock);
2099 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list);
2100 spin_unlock(&root->list_lock);
2101
2102 /*
2103 * if this is a bad inode, means we actually succeeded in
2104 * removing the inode, but not the orphan record, which means
2105 * we need to manually delete the orphan since iput will just
2106 * do a destroy_inode
2107 */
2108 if (is_bad_inode(inode)) {
2109 trans = btrfs_start_transaction(root, 1);
2110 btrfs_orphan_del(trans, inode);
2111 btrfs_end_transaction(trans, root);
2112 iput(inode);
2113 continue;
2114 }
2115
2116 /* if we have links, this was a truncate, lets do that */
2117 if (inode->i_nlink) {
2118 nr_truncate++;
2119 btrfs_truncate(inode);
2120 } else {
2121 nr_unlink++;
2122 }
2123
2124 /* this will do delete_inode and everything for us */
2125 iput(inode);
2126 }
2127
2128 if (nr_unlink)
2129 printk(KERN_INFO "btrfs: unlinked %d orphans\n", nr_unlink);
2130 if (nr_truncate)
2131 printk(KERN_INFO "btrfs: truncated %d orphans\n", nr_truncate);
2132
2133 btrfs_free_path(path);
2134 }
2135
2136 /*
2137 * very simple check to peek ahead in the leaf looking for xattrs. If we
2138 * don't find any xattrs, we know there can't be any acls.
2139 *
2140 * slot is the slot the inode is in, objectid is the objectid of the inode
2141 */
2142 static noinline int acls_after_inode_item(struct extent_buffer *leaf,
2143 int slot, u64 objectid)
2144 {
2145 u32 nritems = btrfs_header_nritems(leaf);
2146 struct btrfs_key found_key;
2147 int scanned = 0;
2148
2149 slot++;
2150 while (slot < nritems) {
2151 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2152
2153 /* we found a different objectid, there must not be acls */
2154 if (found_key.objectid != objectid)
2155 return 0;
2156
2157 /* we found an xattr, assume we've got an acl */
2158 if (found_key.type == BTRFS_XATTR_ITEM_KEY)
2159 return 1;
2160
2161 /*
2162 * we found a key greater than an xattr key, there can't
2163 * be any acls later on
2164 */
2165 if (found_key.type > BTRFS_XATTR_ITEM_KEY)
2166 return 0;
2167
2168 slot++;
2169 scanned++;
2170
2171 /*
2172 * it goes inode, inode backrefs, xattrs, extents,
2173 * so if there are a ton of hard links to an inode there can
2174 * be a lot of backrefs. Don't waste time searching too hard,
2175 * this is just an optimization
2176 */
2177 if (scanned >= 8)
2178 break;
2179 }
2180 /* we hit the end of the leaf before we found an xattr or
2181 * something larger than an xattr. We have to assume the inode
2182 * has acls
2183 */
2184 return 1;
2185 }
2186
2187 /*
2188 * read an inode from the btree into the in-memory inode
2189 */
2190 static void btrfs_read_locked_inode(struct inode *inode)
2191 {
2192 struct btrfs_path *path;
2193 struct extent_buffer *leaf;
2194 struct btrfs_inode_item *inode_item;
2195 struct btrfs_timespec *tspec;
2196 struct btrfs_root *root = BTRFS_I(inode)->root;
2197 struct btrfs_key location;
2198 int maybe_acls;
2199 u64 alloc_group_block;
2200 u32 rdev;
2201 int ret;
2202
2203 path = btrfs_alloc_path();
2204 BUG_ON(!path);
2205 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2206
2207 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
2208 if (ret)
2209 goto make_bad;
2210
2211 leaf = path->nodes[0];
2212 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2213 struct btrfs_inode_item);
2214
2215 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2216 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
2217 inode->i_uid = btrfs_inode_uid(leaf, inode_item);
2218 inode->i_gid = btrfs_inode_gid(leaf, inode_item);
2219 btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
2220
2221 tspec = btrfs_inode_atime(inode_item);
2222 inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2223 inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2224
2225 tspec = btrfs_inode_mtime(inode_item);
2226 inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2227 inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2228
2229 tspec = btrfs_inode_ctime(inode_item);
2230 inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
2231 inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
2232
2233 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2234 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2235 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item);
2236 inode->i_generation = BTRFS_I(inode)->generation;
2237 inode->i_rdev = 0;
2238 rdev = btrfs_inode_rdev(leaf, inode_item);
2239
2240 BTRFS_I(inode)->index_cnt = (u64)-1;
2241 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2242
2243 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2244
2245 /*
2246 * try to precache a NULL acl entry for files that don't have
2247 * any xattrs or acls
2248 */
2249 maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino);
2250 if (!maybe_acls)
2251 cache_no_acl(inode);
2252
2253 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0,
2254 alloc_group_block, 0);
2255 btrfs_free_path(path);
2256 inode_item = NULL;
2257
2258 switch (inode->i_mode & S_IFMT) {
2259 case S_IFREG:
2260 inode->i_mapping->a_ops = &btrfs_aops;
2261 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2262 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
2263 inode->i_fop = &btrfs_file_operations;
2264 inode->i_op = &btrfs_file_inode_operations;
2265 break;
2266 case S_IFDIR:
2267 inode->i_fop = &btrfs_dir_file_operations;
2268 if (root == root->fs_info->tree_root)
2269 inode->i_op = &btrfs_dir_ro_inode_operations;
2270 else
2271 inode->i_op = &btrfs_dir_inode_operations;
2272 break;
2273 case S_IFLNK:
2274 inode->i_op = &btrfs_symlink_inode_operations;
2275 inode->i_mapping->a_ops = &btrfs_symlink_aops;
2276 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
2277 break;
2278 default:
2279 inode->i_op = &btrfs_special_inode_operations;
2280 init_special_inode(inode, inode->i_mode, rdev);
2281 break;
2282 }
2283
2284 btrfs_update_iflags(inode);
2285 return;
2286
2287 make_bad:
2288 btrfs_free_path(path);
2289 make_bad_inode(inode);
2290 }
2291
2292 /*
2293 * given a leaf and an inode, copy the inode fields into the leaf
2294 */
2295 static void fill_inode_item(struct btrfs_trans_handle *trans,
2296 struct extent_buffer *leaf,
2297 struct btrfs_inode_item *item,
2298 struct inode *inode)
2299 {
2300 btrfs_set_inode_uid(leaf, item, inode->i_uid);
2301 btrfs_set_inode_gid(leaf, item, inode->i_gid);
2302 btrfs_set_inode_size(leaf, item, BTRFS_I(inode)->disk_i_size);
2303 btrfs_set_inode_mode(leaf, item, inode->i_mode);
2304 btrfs_set_inode_nlink(leaf, item, inode->i_nlink);
2305
2306 btrfs_set_timespec_sec(leaf, btrfs_inode_atime(item),
2307 inode->i_atime.tv_sec);
2308 btrfs_set_timespec_nsec(leaf, btrfs_inode_atime(item),
2309 inode->i_atime.tv_nsec);
2310
2311 btrfs_set_timespec_sec(leaf, btrfs_inode_mtime(item),
2312 inode->i_mtime.tv_sec);
2313 btrfs_set_timespec_nsec(leaf, btrfs_inode_mtime(item),
2314 inode->i_mtime.tv_nsec);
2315
2316 btrfs_set_timespec_sec(leaf, btrfs_inode_ctime(item),
2317 inode->i_ctime.tv_sec);
2318 btrfs_set_timespec_nsec(leaf, btrfs_inode_ctime(item),
2319 inode->i_ctime.tv_nsec);
2320
2321 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2322 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2323 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence);
2324 btrfs_set_inode_transid(leaf, item, trans->transid);
2325 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2326 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2327 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group);
2328 }
2329
2330 /*
2331 * copy everything in the in-memory inode into the btree.
2332 */
2333 noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
2334 struct btrfs_root *root, struct inode *inode)
2335 {
2336 struct btrfs_inode_item *inode_item;
2337 struct btrfs_path *path;
2338 struct extent_buffer *leaf;
2339 int ret;
2340
2341 path = btrfs_alloc_path();
2342 BUG_ON(!path);
2343 path->leave_spinning = 1;
2344 ret = btrfs_lookup_inode(trans, root, path,
2345 &BTRFS_I(inode)->location, 1);
2346 if (ret) {
2347 if (ret > 0)
2348 ret = -ENOENT;
2349 goto failed;
2350 }
2351
2352 btrfs_unlock_up_safe(path, 1);
2353 leaf = path->nodes[0];
2354 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2355 struct btrfs_inode_item);
2356
2357 fill_inode_item(trans, leaf, inode_item, inode);
2358 btrfs_mark_buffer_dirty(leaf);
2359 btrfs_set_inode_last_trans(trans, inode);
2360 ret = 0;
2361 failed:
2362 btrfs_free_path(path);
2363 return ret;
2364 }
2365
2366
2367 /*
2368 * unlink helper that gets used here in inode.c and in the tree logging
2369 * recovery code. It remove a link in a directory with a given name, and
2370 * also drops the back refs in the inode to the directory
2371 */
2372 int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
2373 struct btrfs_root *root,
2374 struct inode *dir, struct inode *inode,
2375 const char *name, int name_len)
2376 {
2377 struct btrfs_path *path;
2378 int ret = 0;
2379 struct extent_buffer *leaf;
2380 struct btrfs_dir_item *di;
2381 struct btrfs_key key;
2382 u64 index;
2383
2384 path = btrfs_alloc_path();
2385 if (!path) {
2386 ret = -ENOMEM;
2387 goto err;
2388 }
2389
2390 path->leave_spinning = 1;
2391 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2392 name, name_len, -1);
2393 if (IS_ERR(di)) {
2394 ret = PTR_ERR(di);
2395 goto err;
2396 }
2397 if (!di) {
2398 ret = -ENOENT;
2399 goto err;
2400 }
2401 leaf = path->nodes[0];
2402 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2403 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2404 if (ret)
2405 goto err;
2406 btrfs_release_path(root, path);
2407
2408 ret = btrfs_del_inode_ref(trans, root, name, name_len,
2409 inode->i_ino,
2410 dir->i_ino, &index);
2411 if (ret) {
2412 printk(KERN_INFO "btrfs failed to delete reference to %.*s, "
2413 "inode %lu parent %lu\n", name_len, name,
2414 inode->i_ino, dir->i_ino);
2415 goto err;
2416 }
2417
2418 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2419 index, name, name_len, -1);
2420 if (IS_ERR(di)) {
2421 ret = PTR_ERR(di);
2422 goto err;
2423 }
2424 if (!di) {
2425 ret = -ENOENT;
2426 goto err;
2427 }
2428 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2429 btrfs_release_path(root, path);
2430
2431 ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
2432 inode, dir->i_ino);
2433 BUG_ON(ret != 0 && ret != -ENOENT);
2434
2435 ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len,
2436 dir, index);
2437 BUG_ON(ret);
2438 err:
2439 btrfs_free_path(path);
2440 if (ret)
2441 goto out;
2442
2443 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2444 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2445 btrfs_update_inode(trans, root, dir);
2446 btrfs_drop_nlink(inode);
2447 ret = btrfs_update_inode(trans, root, inode);
2448 out:
2449 return ret;
2450 }
2451
2452 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
2453 {
2454 struct btrfs_root *root;
2455 struct btrfs_trans_handle *trans;
2456 struct inode *inode = dentry->d_inode;
2457 int ret;
2458 unsigned long nr = 0;
2459
2460 root = BTRFS_I(dir)->root;
2461
2462 trans = btrfs_start_transaction(root, 1);
2463
2464 btrfs_set_trans_block_group(trans, dir);
2465
2466 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
2467
2468 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2469 dentry->d_name.name, dentry->d_name.len);
2470
2471 if (inode->i_nlink == 0)
2472 ret = btrfs_orphan_add(trans, inode);
2473
2474 nr = trans->blocks_used;
2475
2476 btrfs_end_transaction_throttle(trans, root);
2477 btrfs_btree_balance_dirty(root, nr);
2478 return ret;
2479 }
2480
2481 int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
2482 struct btrfs_root *root,
2483 struct inode *dir, u64 objectid,
2484 const char *name, int name_len)
2485 {
2486 struct btrfs_path *path;
2487 struct extent_buffer *leaf;
2488 struct btrfs_dir_item *di;
2489 struct btrfs_key key;
2490 u64 index;
2491 int ret;
2492
2493 path = btrfs_alloc_path();
2494 if (!path)
2495 return -ENOMEM;
2496
2497 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino,
2498 name, name_len, -1);
2499 BUG_ON(!di || IS_ERR(di));
2500
2501 leaf = path->nodes[0];
2502 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2503 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2504 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2505 BUG_ON(ret);
2506 btrfs_release_path(root, path);
2507
2508 ret = btrfs_del_root_ref(trans, root->fs_info->tree_root,
2509 objectid, root->root_key.objectid,
2510 dir->i_ino, &index, name, name_len);
2511 if (ret < 0) {
2512 BUG_ON(ret != -ENOENT);
2513 di = btrfs_search_dir_index_item(root, path, dir->i_ino,
2514 name, name_len);
2515 BUG_ON(!di || IS_ERR(di));
2516
2517 leaf = path->nodes[0];
2518 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2519 btrfs_release_path(root, path);
2520 index = key.offset;
2521 }
2522
2523 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino,
2524 index, name, name_len, -1);
2525 BUG_ON(!di || IS_ERR(di));
2526
2527 leaf = path->nodes[0];
2528 btrfs_dir_item_key_to_cpu(leaf, di, &key);
2529 WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
2530 ret = btrfs_delete_one_dir_name(trans, root, path, di);
2531 BUG_ON(ret);
2532 btrfs_release_path(root, path);
2533
2534 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2535 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2536 ret = btrfs_update_inode(trans, root, dir);
2537 BUG_ON(ret);
2538 dir->i_sb->s_dirt = 1;
2539
2540 btrfs_free_path(path);
2541 return 0;
2542 }
2543
2544 static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
2545 {
2546 struct inode *inode = dentry->d_inode;
2547 int err = 0;
2548 int ret;
2549 struct btrfs_root *root = BTRFS_I(dir)->root;
2550 struct btrfs_trans_handle *trans;
2551 unsigned long nr = 0;
2552
2553 if (inode->i_size > BTRFS_EMPTY_DIR_SIZE ||
2554 inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
2555 return -ENOTEMPTY;
2556
2557 trans = btrfs_start_transaction(root, 1);
2558 btrfs_set_trans_block_group(trans, dir);
2559
2560 if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
2561 err = btrfs_unlink_subvol(trans, root, dir,
2562 BTRFS_I(inode)->location.objectid,
2563 dentry->d_name.name,
2564 dentry->d_name.len);
2565 goto out;
2566 }
2567
2568 err = btrfs_orphan_add(trans, inode);
2569 if (err)
2570 goto out;
2571
2572 /* now the directory is empty */
2573 err = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
2574 dentry->d_name.name, dentry->d_name.len);
2575 if (!err)
2576 btrfs_i_size_write(inode, 0);
2577 out:
2578 nr = trans->blocks_used;
2579 ret = btrfs_end_transaction_throttle(trans, root);
2580 btrfs_btree_balance_dirty(root, nr);
2581
2582 if (ret && !err)
2583 err = ret;
2584 return err;
2585 }
2586
2587 #if 0
2588 /*
2589 * when truncating bytes in a file, it is possible to avoid reading
2590 * the leaves that contain only checksum items. This can be the
2591 * majority of the IO required to delete a large file, but it must
2592 * be done carefully.
2593 *
2594 * The keys in the level just above the leaves are checked to make sure
2595 * the lowest key in a given leaf is a csum key, and starts at an offset
2596 * after the new size.
2597 *
2598 * Then the key for the next leaf is checked to make sure it also has
2599 * a checksum item for the same file. If it does, we know our target leaf
2600 * contains only checksum items, and it can be safely freed without reading
2601 * it.
2602 *
2603 * This is just an optimization targeted at large files. It may do
2604 * nothing. It will return 0 unless things went badly.
2605 */
2606 static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans,
2607 struct btrfs_root *root,
2608 struct btrfs_path *path,
2609 struct inode *inode, u64 new_size)
2610 {
2611 struct btrfs_key key;
2612 int ret;
2613 int nritems;
2614 struct btrfs_key found_key;
2615 struct btrfs_key other_key;
2616 struct btrfs_leaf_ref *ref;
2617 u64 leaf_gen;
2618 u64 leaf_start;
2619
2620 path->lowest_level = 1;
2621 key.objectid = inode->i_ino;
2622 key.type = BTRFS_CSUM_ITEM_KEY;
2623 key.offset = new_size;
2624 again:
2625 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2626 if (ret < 0)
2627 goto out;
2628
2629 if (path->nodes[1] == NULL) {
2630 ret = 0;
2631 goto out;
2632 }
2633 ret = 0;
2634 btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]);
2635 nritems = btrfs_header_nritems(path->nodes[1]);
2636
2637 if (!nritems)
2638 goto out;
2639
2640 if (path->slots[1] >= nritems)
2641 goto next_node;
2642
2643 /* did we find a key greater than anything we want to delete? */
2644 if (found_key.objectid > inode->i_ino ||
2645 (found_key.objectid == inode->i_ino && found_key.type > key.type))
2646 goto out;
2647
2648 /* we check the next key in the node to make sure the leave contains
2649 * only checksum items. This comparison doesn't work if our
2650 * leaf is the last one in the node
2651 */
2652 if (path->slots[1] + 1 >= nritems) {
2653 next_node:
2654 /* search forward from the last key in the node, this
2655 * will bring us into the next node in the tree
2656 */
2657 btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1);
2658
2659 /* unlikely, but we inc below, so check to be safe */
2660 if (found_key.offset == (u64)-1)
2661 goto out;
2662
2663 /* search_forward needs a path with locks held, do the
2664 * search again for the original key. It is possible
2665 * this will race with a balance and return a path that
2666 * we could modify, but this drop is just an optimization
2667 * and is allowed to miss some leaves.
2668 */
2669 btrfs_release_path(root, path);
2670 found_key.offset++;
2671
2672 /* setup a max key for search_forward */
2673 other_key.offset = (u64)-1;
2674 other_key.type = key.type;
2675 other_key.objectid = key.objectid;
2676
2677 path->keep_locks = 1;
2678 ret = btrfs_search_forward(root, &found_key, &other_key,
2679 path, 0, 0);
2680 path->keep_locks = 0;
2681 if (ret || found_key.objectid != key.objectid ||
2682 found_key.type != key.type) {
2683 ret = 0;
2684 goto out;
2685 }
2686
2687 key.offset = found_key.offset;
2688 btrfs_release_path(root, path);
2689 cond_resched();
2690 goto again;
2691 }
2692
2693 /* we know there's one more slot after us in the tree,
2694 * read that key so we can verify it is also a checksum item
2695 */
2696 btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1);
2697
2698 if (found_key.objectid < inode->i_ino)
2699 goto next_key;
2700
2701 if (found_key.type != key.type || found_key.offset < new_size)
2702 goto next_key;
2703
2704 /*
2705 * if the key for the next leaf isn't a csum key from this objectid,
2706 * we can't be sure there aren't good items inside this leaf.
2707 * Bail out
2708 */
2709 if (other_key.objectid != inode->i_ino || other_key.type != key.type)
2710 goto out;
2711
2712 leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]);
2713 leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]);
2714 /*
2715 * it is safe to delete this leaf, it contains only
2716 * csum items from this inode at an offset >= new_size
2717 */
2718 ret = btrfs_del_leaf(trans, root, path, leaf_start);
2719 BUG_ON(ret);
2720
2721 if (root->ref_cows && leaf_gen < trans->transid) {
2722 ref = btrfs_alloc_leaf_ref(root, 0);
2723 if (ref) {
2724 ref->root_gen = root->root_key.offset;
2725 ref->bytenr = leaf_start;
2726 ref->owner = 0;
2727 ref->generation = leaf_gen;
2728 ref->nritems = 0;
2729
2730 btrfs_sort_leaf_ref(ref);
2731
2732 ret = btrfs_add_leaf_ref(root, ref, 0);
2733 WARN_ON(ret);
2734 btrfs_free_leaf_ref(root, ref);
2735 } else {
2736 WARN_ON(1);
2737 }
2738 }
2739 next_key:
2740 btrfs_release_path(root, path);
2741
2742 if (other_key.objectid == inode->i_ino &&
2743 other_key.type == key.type && other_key.offset > key.offset) {
2744 key.offset = other_key.offset;
2745 cond_resched();
2746 goto again;
2747 }
2748 ret = 0;
2749 out:
2750 /* fixup any changes we've made to the path */
2751 path->lowest_level = 0;
2752 path->keep_locks = 0;
2753 btrfs_release_path(root, path);
2754 return ret;
2755 }
2756
2757 #endif
2758
2759 /*
2760 * this can truncate away extent items, csum items and directory items.
2761 * It starts at a high offset and removes keys until it can't find
2762 * any higher than new_size
2763 *
2764 * csum items that cross the new i_size are truncated to the new size
2765 * as well.
2766 *
2767 * min_type is the minimum key type to truncate down to. If set to 0, this
2768 * will kill all the items on this inode, including the INODE_ITEM_KEY.
2769 */
2770 noinline int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
2771 struct btrfs_root *root,
2772 struct inode *inode,
2773 u64 new_size, u32 min_type)
2774 {
2775 int ret;
2776 struct btrfs_path *path;
2777 struct btrfs_key key;
2778 struct btrfs_key found_key;
2779 u32 found_type = (u8)-1;
2780 struct extent_buffer *leaf;
2781 struct btrfs_file_extent_item *fi;
2782 u64 extent_start = 0;
2783 u64 extent_num_bytes = 0;
2784 u64 extent_offset = 0;
2785 u64 item_end = 0;
2786 int found_extent;
2787 int del_item;
2788 int pending_del_nr = 0;
2789 int pending_del_slot = 0;
2790 int extent_type = -1;
2791 int encoding;
2792 u64 mask = root->sectorsize - 1;
2793
2794 if (root->ref_cows)
2795 btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0);
2796 path = btrfs_alloc_path();
2797 BUG_ON(!path);
2798 path->reada = -1;
2799
2800 /* FIXME, add redo link to tree so we don't leak on crash */
2801 key.objectid = inode->i_ino;
2802 key.offset = (u64)-1;
2803 key.type = (u8)-1;
2804
2805 search_again:
2806 path->leave_spinning = 1;
2807 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2808 if (ret < 0)
2809 goto error;
2810
2811 if (ret > 0) {
2812 /* there are no items in the tree for us to truncate, we're
2813 * done
2814 */
2815 if (path->slots[0] == 0) {
2816 ret = 0;
2817 goto error;
2818 }
2819 path->slots[0]--;
2820 }
2821
2822 while (1) {
2823 fi = NULL;
2824 leaf = path->nodes[0];
2825 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
2826 found_type = btrfs_key_type(&found_key);
2827 encoding = 0;
2828
2829 if (found_key.objectid != inode->i_ino)
2830 break;
2831
2832 if (found_type < min_type)
2833 break;
2834
2835 item_end = found_key.offset;
2836 if (found_type == BTRFS_EXTENT_DATA_KEY) {
2837 fi = btrfs_item_ptr(leaf, path->slots[0],
2838 struct btrfs_file_extent_item);
2839 extent_type = btrfs_file_extent_type(leaf, fi);
2840 encoding = btrfs_file_extent_compression(leaf, fi);
2841 encoding |= btrfs_file_extent_encryption(leaf, fi);
2842 encoding |= btrfs_file_extent_other_encoding(leaf, fi);
2843
2844 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2845 item_end +=
2846 btrfs_file_extent_num_bytes(leaf, fi);
2847 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2848 item_end += btrfs_file_extent_inline_len(leaf,
2849 fi);
2850 }
2851 item_end--;
2852 }
2853 if (item_end < new_size) {
2854 if (found_type == BTRFS_DIR_ITEM_KEY)
2855 found_type = BTRFS_INODE_ITEM_KEY;
2856 else if (found_type == BTRFS_EXTENT_ITEM_KEY)
2857 found_type = BTRFS_EXTENT_DATA_KEY;
2858 else if (found_type == BTRFS_EXTENT_DATA_KEY)
2859 found_type = BTRFS_XATTR_ITEM_KEY;
2860 else if (found_type == BTRFS_XATTR_ITEM_KEY)
2861 found_type = BTRFS_INODE_REF_KEY;
2862 else if (found_type)
2863 found_type--;
2864 else
2865 break;
2866 btrfs_set_key_type(&key, found_type);
2867 goto next;
2868 }
2869 if (found_key.offset >= new_size)
2870 del_item = 1;
2871 else
2872 del_item = 0;
2873 found_extent = 0;
2874
2875 /* FIXME, shrink the extent if the ref count is only 1 */
2876 if (found_type != BTRFS_EXTENT_DATA_KEY)
2877 goto delete;
2878
2879 if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
2880 u64 num_dec;
2881 extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
2882 if (!del_item && !encoding) {
2883 u64 orig_num_bytes =
2884 btrfs_file_extent_num_bytes(leaf, fi);
2885 extent_num_bytes = new_size -
2886 found_key.offset + root->sectorsize - 1;
2887 extent_num_bytes = extent_num_bytes &
2888 ~((u64)root->sectorsize - 1);
2889 btrfs_set_file_extent_num_bytes(leaf, fi,
2890 extent_num_bytes);
2891 num_dec = (orig_num_bytes -
2892 extent_num_bytes);
2893 if (root->ref_cows && extent_start != 0)
2894 inode_sub_bytes(inode, num_dec);
2895 btrfs_mark_buffer_dirty(leaf);
2896 } else {
2897 extent_num_bytes =
2898 btrfs_file_extent_disk_num_bytes(leaf,
2899 fi);
2900 extent_offset = found_key.offset -
2901 btrfs_file_extent_offset(leaf, fi);
2902
2903 /* FIXME blocksize != 4096 */
2904 num_dec = btrfs_file_extent_num_bytes(leaf, fi);
2905 if (extent_start != 0) {
2906 found_extent = 1;
2907 if (root->ref_cows)
2908 inode_sub_bytes(inode, num_dec);
2909 }
2910 }
2911 } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) {
2912 /*
2913 * we can't truncate inline items that have had
2914 * special encodings
2915 */
2916 if (!del_item &&
2917 btrfs_file_extent_compression(leaf, fi) == 0 &&
2918 btrfs_file_extent_encryption(leaf, fi) == 0 &&
2919 btrfs_file_extent_other_encoding(leaf, fi) == 0) {
2920 u32 size = new_size - found_key.offset;
2921
2922 if (root->ref_cows) {
2923 inode_sub_bytes(inode, item_end + 1 -
2924 new_size);
2925 }
2926 size =
2927 btrfs_file_extent_calc_inline_size(size);
2928 ret = btrfs_truncate_item(trans, root, path,
2929 size, 1);
2930 BUG_ON(ret);
2931 } else if (root->ref_cows) {
2932 inode_sub_bytes(inode, item_end + 1 -
2933 found_key.offset);
2934 }
2935 }
2936 delete:
2937 if (del_item) {
2938 if (!pending_del_nr) {
2939 /* no pending yet, add ourselves */
2940 pending_del_slot = path->slots[0];
2941 pending_del_nr = 1;
2942 } else if (pending_del_nr &&
2943 path->slots[0] + 1 == pending_del_slot) {
2944 /* hop on the pending chunk */
2945 pending_del_nr++;
2946 pending_del_slot = path->slots[0];
2947 } else {
2948 BUG();
2949 }
2950 } else {
2951 break;
2952 }
2953 if (found_extent && root->ref_cows) {
2954 btrfs_set_path_blocking(path);
2955 ret = btrfs_free_extent(trans, root, extent_start,
2956 extent_num_bytes, 0,
2957 btrfs_header_owner(leaf),
2958 inode->i_ino, extent_offset);
2959 BUG_ON(ret);
2960 }
2961 next:
2962 if (path->slots[0] == 0) {
2963 if (pending_del_nr)
2964 goto del_pending;
2965 btrfs_release_path(root, path);
2966 if (found_type == BTRFS_INODE_ITEM_KEY)
2967 break;
2968 goto search_again;
2969 }
2970
2971 path->slots[0]--;
2972 if (pending_del_nr &&
2973 path->slots[0] + 1 != pending_del_slot) {
2974 struct btrfs_key debug;
2975 del_pending:
2976 btrfs_item_key_to_cpu(path->nodes[0], &debug,
2977 pending_del_slot);
2978 ret = btrfs_del_items(trans, root, path,
2979 pending_del_slot,
2980 pending_del_nr);
2981 BUG_ON(ret);
2982 pending_del_nr = 0;
2983 btrfs_release_path(root, path);
2984 if (found_type == BTRFS_INODE_ITEM_KEY)
2985 break;
2986 goto search_again;
2987 }
2988 }
2989 ret = 0;
2990 error:
2991 if (pending_del_nr) {
2992 ret = btrfs_del_items(trans, root, path, pending_del_slot,
2993 pending_del_nr);
2994 }
2995 btrfs_free_path(path);
2996 return ret;
2997 }
2998
2999 /*
3000 * taken from block_truncate_page, but does cow as it zeros out
3001 * any bytes left in the last page in the file.
3002 */
3003 static int btrfs_truncate_page(struct address_space *mapping, loff_t from)
3004 {
3005 struct inode *inode = mapping->host;
3006 struct btrfs_root *root = BTRFS_I(inode)->root;
3007 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3008 struct btrfs_ordered_extent *ordered;
3009 char *kaddr;
3010 u32 blocksize = root->sectorsize;
3011 pgoff_t index = from >> PAGE_CACHE_SHIFT;
3012 unsigned offset = from & (PAGE_CACHE_SIZE-1);
3013 struct page *page;
3014 int ret = 0;
3015 u64 page_start;
3016 u64 page_end;
3017
3018 if ((offset & (blocksize - 1)) == 0)
3019 goto out;
3020
3021 ret = -ENOMEM;
3022 again:
3023 page = grab_cache_page(mapping, index);
3024 if (!page)
3025 goto out;
3026
3027 page_start = page_offset(page);
3028 page_end = page_start + PAGE_CACHE_SIZE - 1;
3029
3030 if (!PageUptodate(page)) {
3031 ret = btrfs_readpage(NULL, page);
3032 lock_page(page);
3033 if (page->mapping != mapping) {
3034 unlock_page(page);
3035 page_cache_release(page);
3036 goto again;
3037 }
3038 if (!PageUptodate(page)) {
3039 ret = -EIO;
3040 goto out_unlock;
3041 }
3042 }
3043 wait_on_page_writeback(page);
3044
3045 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
3046 set_page_extent_mapped(page);
3047
3048 ordered = btrfs_lookup_ordered_extent(inode, page_start);
3049 if (ordered) {
3050 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3051 unlock_page(page);
3052 page_cache_release(page);
3053 btrfs_start_ordered_extent(inode, ordered, 1);
3054 btrfs_put_ordered_extent(ordered);
3055 goto again;
3056 }
3057
3058 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
3059 if (ret) {
3060 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3061 goto out_unlock;
3062 }
3063
3064 ret = 0;
3065 if (offset != PAGE_CACHE_SIZE) {
3066 kaddr = kmap(page);
3067 memset(kaddr + offset, 0, PAGE_CACHE_SIZE - offset);
3068 flush_dcache_page(page);
3069 kunmap(page);
3070 }
3071 ClearPageChecked(page);
3072 set_page_dirty(page);
3073 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
3074
3075 out_unlock:
3076 unlock_page(page);
3077 page_cache_release(page);
3078 out:
3079 return ret;
3080 }
3081
3082 int btrfs_cont_expand(struct inode *inode, loff_t size)
3083 {
3084 struct btrfs_trans_handle *trans;
3085 struct btrfs_root *root = BTRFS_I(inode)->root;
3086 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
3087 struct extent_map *em;
3088 u64 mask = root->sectorsize - 1;
3089 u64 hole_start = (inode->i_size + mask) & ~mask;
3090 u64 block_end = (size + mask) & ~mask;
3091 u64 last_byte;
3092 u64 cur_offset;
3093 u64 hole_size;
3094 int err = 0;
3095
3096 if (size <= hole_start)
3097 return 0;
3098
3099 btrfs_truncate_page(inode->i_mapping, inode->i_size);
3100
3101 while (1) {
3102 struct btrfs_ordered_extent *ordered;
3103 btrfs_wait_ordered_range(inode, hole_start,
3104 block_end - hole_start);
3105 lock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3106 ordered = btrfs_lookup_ordered_extent(inode, hole_start);
3107 if (!ordered)
3108 break;
3109 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3110 btrfs_put_ordered_extent(ordered);
3111 }
3112
3113 trans = btrfs_start_transaction(root, 1);
3114 btrfs_set_trans_block_group(trans, inode);
3115
3116 cur_offset = hole_start;
3117 while (1) {
3118 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
3119 block_end - cur_offset, 0);
3120 BUG_ON(IS_ERR(em) || !em);
3121 last_byte = min(extent_map_end(em), block_end);
3122 last_byte = (last_byte + mask) & ~mask;
3123 if (test_bit(EXTENT_FLAG_VACANCY, &em->flags)) {
3124 u64 hint_byte = 0;
3125 hole_size = last_byte - cur_offset;
3126 err = btrfs_drop_extents(trans, root, inode,
3127 cur_offset,
3128 cur_offset + hole_size,
3129 block_end,
3130 cur_offset, &hint_byte, 1);
3131 if (err)
3132 break;
3133
3134 err = btrfs_reserve_metadata_space(root, 1);
3135 if (err)
3136 break;
3137
3138 err = btrfs_insert_file_extent(trans, root,
3139 inode->i_ino, cur_offset, 0,
3140 0, hole_size, 0, hole_size,
3141 0, 0, 0);
3142 btrfs_drop_extent_cache(inode, hole_start,
3143 last_byte - 1, 0);
3144 btrfs_unreserve_metadata_space(root, 1);
3145 }
3146 free_extent_map(em);
3147 cur_offset = last_byte;
3148 if (err || cur_offset >= block_end)
3149 break;
3150 }
3151
3152 btrfs_end_transaction(trans, root);
3153 unlock_extent(io_tree, hole_start, block_end - 1, GFP_NOFS);
3154 return err;
3155 }
3156
3157 static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3158 {
3159 struct inode *inode = dentry->d_inode;
3160 int err;
3161
3162 err = inode_change_ok(inode, attr);
3163 if (err)
3164 return err;
3165
3166 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
3167 if (attr->ia_size > inode->i_size) {
3168 err = btrfs_cont_expand(inode, attr->ia_size);
3169 if (err)
3170 return err;
3171 } else if (inode->i_size > 0 &&
3172 attr->ia_size == 0) {
3173
3174 /* we're truncating a file that used to have good
3175 * data down to zero. Make sure it gets into
3176 * the ordered flush list so that any new writes
3177 * get down to disk quickly.
3178 */
3179 BTRFS_I(inode)->ordered_data_close = 1;
3180 }
3181 }
3182
3183 err = inode_setattr(inode, attr);
3184
3185 if (!err && ((attr->ia_valid & ATTR_MODE)))
3186 err = btrfs_acl_chmod(inode);
3187 return err;
3188 }
3189
3190 void btrfs_delete_inode(struct inode *inode)
3191 {
3192 struct btrfs_trans_handle *trans;
3193 struct btrfs_root *root = BTRFS_I(inode)->root;
3194 unsigned long nr;
3195 int ret;
3196
3197 truncate_inode_pages(&inode->i_data, 0);
3198 if (is_bad_inode(inode)) {
3199 btrfs_orphan_del(NULL, inode);
3200 goto no_delete;
3201 }
3202 btrfs_wait_ordered_range(inode, 0, (u64)-1);
3203
3204 if (inode->i_nlink > 0) {
3205 BUG_ON(btrfs_root_refs(&root->root_item) != 0);
3206 goto no_delete;
3207 }
3208
3209 btrfs_i_size_write(inode, 0);
3210 trans = btrfs_join_transaction(root, 1);
3211
3212 btrfs_set_trans_block_group(trans, inode);
3213 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size, 0);
3214 if (ret) {
3215 btrfs_orphan_del(NULL, inode);
3216 goto no_delete_lock;
3217 }
3218
3219 btrfs_orphan_del(trans, inode);
3220
3221 nr = trans->blocks_used;
3222 clear_inode(inode);
3223
3224 btrfs_end_transaction(trans, root);
3225 btrfs_btree_balance_dirty(root, nr);
3226 return;
3227
3228 no_delete_lock:
3229 nr = trans->blocks_used;
3230 btrfs_end_transaction(trans, root);
3231 btrfs_btree_balance_dirty(root, nr);
3232 no_delete:
3233 clear_inode(inode);
3234 }
3235
3236 /*
3237 * this returns the key found in the dir entry in the location pointer.
3238 * If no dir entries were found, location->objectid is 0.
3239 */
3240 static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3241 struct btrfs_key *location)
3242 {
3243 const char *name = dentry->d_name.name;
3244 int namelen = dentry->d_name.len;
3245 struct btrfs_dir_item *di;
3246 struct btrfs_path *path;
3247 struct btrfs_root *root = BTRFS_I(dir)->root;
3248 int ret = 0;
3249
3250 path = btrfs_alloc_path();
3251 BUG_ON(!path);
3252
3253 di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name,
3254 namelen, 0);
3255 if (IS_ERR(di))
3256 ret = PTR_ERR(di);
3257
3258 if (!di || IS_ERR(di))
3259 goto out_err;
3260
3261 btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3262 out:
3263 btrfs_free_path(path);
3264 return ret;
3265 out_err:
3266 location->objectid = 0;
3267 goto out;
3268 }
3269
3270 /*
3271 * when we hit a tree root in a directory, the btrfs part of the inode
3272 * needs to be changed to reflect the root directory of the tree root. This
3273 * is kind of like crossing a mount point.
3274 */
3275 static int fixup_tree_root_location(struct btrfs_root *root,
3276 struct inode *dir,
3277 struct dentry *dentry,
3278 struct btrfs_key *location,
3279 struct btrfs_root **sub_root)
3280 {
3281 struct btrfs_path *path;
3282 struct btrfs_root *new_root;
3283 struct btrfs_root_ref *ref;
3284 struct extent_buffer *leaf;
3285 int ret;
3286 int err = 0;
3287
3288 path = btrfs_alloc_path();
3289 if (!path) {
3290 err = -ENOMEM;
3291 goto out;
3292 }
3293
3294 err = -ENOENT;
3295 ret = btrfs_find_root_ref(root->fs_info->tree_root, path,
3296 BTRFS_I(dir)->root->root_key.objectid,
3297 location->objectid);
3298 if (ret) {
3299 if (ret < 0)
3300 err = ret;
3301 goto out;
3302 }
3303
3304 leaf = path->nodes[0];
3305 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref);
3306 if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino ||
3307 btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len)
3308 goto out;
3309
3310 ret = memcmp_extent_buffer(leaf, dentry->d_name.name,
3311 (unsigned long)(ref + 1),
3312 dentry->d_name.len);
3313 if (ret)
3314 goto out;
3315
3316 btrfs_release_path(root->fs_info->tree_root, path);
3317
3318 new_root = btrfs_read_fs_root_no_name(root->fs_info, location);
3319 if (IS_ERR(new_root)) {
3320 err = PTR_ERR(new_root);
3321 goto out;
3322 }
3323
3324 if (btrfs_root_refs(&new_root->root_item) == 0) {
3325 err = -ENOENT;
3326 goto out;
3327 }
3328
3329 *sub_root = new_root;
3330 location->objectid = btrfs_root_dirid(&new_root->root_item);
3331 location->type = BTRFS_INODE_ITEM_KEY;
3332 location->offset = 0;
3333 err = 0;
3334 out:
3335 btrfs_free_path(path);
3336 return err;
3337 }
3338
3339 static void inode_tree_add(struct inode *inode)
3340 {
3341 struct btrfs_root *root = BTRFS_I(inode)->root;
3342 struct btrfs_inode *entry;
3343 struct rb_node **p;
3344 struct rb_node *parent;
3345 again:
3346 p = &root->inode_tree.rb_node;
3347 parent = NULL;
3348
3349 if (hlist_unhashed(&inode->i_hash))
3350 return;
3351
3352 spin_lock(&root->inode_lock);
3353 while (*p) {
3354 parent = *p;
3355 entry = rb_entry(parent, struct btrfs_inode, rb_node);
3356
3357 if (inode->i_ino < entry->vfs_inode.i_ino)
3358 p = &parent->rb_left;
3359 else if (inode->i_ino > entry->vfs_inode.i_ino)
3360 p = &parent->rb_right;
3361 else {
3362 WARN_ON(!(entry->vfs_inode.i_state &
3363 (I_WILL_FREE | I_FREEING | I_CLEAR)));
3364 rb_erase(parent, &root->inode_tree);
3365 RB_CLEAR_NODE(parent);
3366 spin_unlock(&root->inode_lock);
3367 goto again;
3368 }
3369 }
3370 rb_link_node(&BTRFS_I(inode)->rb_node, parent, p);
3371 rb_insert_color(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3372 spin_unlock(&root->inode_lock);
3373 }
3374
3375 static void inode_tree_del(struct inode *inode)
3376 {
3377 struct btrfs_root *root = BTRFS_I(inode)->root;
3378 int empty = 0;
3379
3380 spin_lock(&root->inode_lock);
3381 if (!RB_EMPTY_NODE(&BTRFS_I(inode)->rb_node)) {
3382 rb_erase(&BTRFS_I(inode)->rb_node, &root->inode_tree);
3383 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3384 empty = RB_EMPTY_ROOT(&root->inode_tree);
3385 }
3386 spin_unlock(&root->inode_lock);
3387
3388 if (empty && btrfs_root_refs(&root->root_item) == 0) {
3389 synchronize_srcu(&root->fs_info->subvol_srcu);
3390 spin_lock(&root->inode_lock);
3391 empty = RB_EMPTY_ROOT(&root->inode_tree);
3392 spin_unlock(&root->inode_lock);
3393 if (empty)
3394 btrfs_add_dead_root(root);
3395 }
3396 }
3397
3398 int btrfs_invalidate_inodes(struct btrfs_root *root)
3399 {
3400 struct rb_node *node;
3401 struct rb_node *prev;
3402 struct btrfs_inode *entry;
3403 struct inode *inode;
3404 u64 objectid = 0;
3405
3406 WARN_ON(btrfs_root_refs(&root->root_item) != 0);
3407
3408 spin_lock(&root->inode_lock);
3409 again:
3410 node = root->inode_tree.rb_node;
3411 prev = NULL;
3412 while (node) {
3413 prev = node;
3414 entry = rb_entry(node, struct btrfs_inode, rb_node);
3415
3416 if (objectid < entry->vfs_inode.i_ino)
3417 node = node->rb_left;
3418 else if (objectid > entry->vfs_inode.i_ino)
3419 node = node->rb_right;
3420 else
3421 break;
3422 }
3423 if (!node) {
3424 while (prev) {
3425 entry = rb_entry(prev, struct btrfs_inode, rb_node);
3426 if (objectid <= entry->vfs_inode.i_ino) {
3427 node = prev;
3428 break;
3429 }
3430 prev = rb_next(prev);
3431 }
3432 }
3433 while (node) {
3434 entry = rb_entry(node, struct btrfs_inode, rb_node);
3435 objectid = entry->vfs_inode.i_ino + 1;
3436 inode = igrab(&entry->vfs_inode);
3437 if (inode) {
3438 spin_unlock(&root->inode_lock);
3439 if (atomic_read(&inode->i_count) > 1)
3440 d_prune_aliases(inode);
3441 /*
3442 * btrfs_drop_inode will remove it from
3443 * the inode cache when its usage count
3444 * hits zero.
3445 */
3446 iput(inode);
3447 cond_resched();
3448 spin_lock(&root->inode_lock);
3449 goto again;
3450 }
3451
3452 if (cond_resched_lock(&root->inode_lock))
3453 goto again;
3454
3455 node = rb_next(node);
3456 }
3457 spin_unlock(&root->inode_lock);
3458 return 0;
3459 }
3460
3461 static noinline void init_btrfs_i(struct inode *inode)
3462 {
3463 struct btrfs_inode *bi = BTRFS_I(inode);
3464
3465 bi->generation = 0;
3466 bi->sequence = 0;
3467 bi->last_trans = 0;
3468 bi->logged_trans = 0;
3469 bi->delalloc_bytes = 0;
3470 bi->reserved_bytes = 0;
3471 bi->disk_i_size = 0;
3472 bi->flags = 0;
3473 bi->index_cnt = (u64)-1;
3474 bi->last_unlink_trans = 0;
3475 bi->ordered_data_close = 0;
3476 extent_map_tree_init(&BTRFS_I(inode)->extent_tree, GFP_NOFS);
3477 extent_io_tree_init(&BTRFS_I(inode)->io_tree,
3478 inode->i_mapping, GFP_NOFS);
3479 extent_io_tree_init(&BTRFS_I(inode)->io_failure_tree,
3480 inode->i_mapping, GFP_NOFS);
3481 INIT_LIST_HEAD(&BTRFS_I(inode)->delalloc_inodes);
3482 INIT_LIST_HEAD(&BTRFS_I(inode)->ordered_operations);
3483 RB_CLEAR_NODE(&BTRFS_I(inode)->rb_node);
3484 btrfs_ordered_inode_tree_init(&BTRFS_I(inode)->ordered_tree);
3485 mutex_init(&BTRFS_I(inode)->extent_mutex);
3486 mutex_init(&BTRFS_I(inode)->log_mutex);
3487 }
3488
3489 static int btrfs_init_locked_inode(struct inode *inode, void *p)
3490 {
3491 struct btrfs_iget_args *args = p;
3492 inode->i_ino = args->ino;
3493 init_btrfs_i(inode);
3494 BTRFS_I(inode)->root = args->root;
3495 btrfs_set_inode_space_info(args->root, inode);
3496 return 0;
3497 }
3498
3499 static int btrfs_find_actor(struct inode *inode, void *opaque)
3500 {
3501 struct btrfs_iget_args *args = opaque;
3502 return args->ino == inode->i_ino &&
3503 args->root == BTRFS_I(inode)->root;
3504 }
3505
3506 static struct inode *btrfs_iget_locked(struct super_block *s,
3507 u64 objectid,
3508 struct btrfs_root *root)
3509 {
3510 struct inode *inode;
3511 struct btrfs_iget_args args;
3512 args.ino = objectid;
3513 args.root = root;
3514
3515 inode = iget5_locked(s, objectid, btrfs_find_actor,
3516 btrfs_init_locked_inode,
3517 (void *)&args);
3518 return inode;
3519 }
3520
3521 /* Get an inode object given its location and corresponding root.
3522 * Returns in *is_new if the inode was read from disk
3523 */
3524 struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
3525 struct btrfs_root *root)
3526 {
3527 struct inode *inode;
3528
3529 inode = btrfs_iget_locked(s, location->objectid, root);
3530 if (!inode)
3531 return ERR_PTR(-ENOMEM);
3532
3533 if (inode->i_state & I_NEW) {
3534 BTRFS_I(inode)->root = root;
3535 memcpy(&BTRFS_I(inode)->location, location, sizeof(*location));
3536 btrfs_read_locked_inode(inode);
3537
3538 inode_tree_add(inode);
3539 unlock_new_inode(inode);
3540 }
3541
3542 return inode;
3543 }
3544
3545 static struct inode *new_simple_dir(struct super_block *s,
3546 struct btrfs_key *key,
3547 struct btrfs_root *root)
3548 {
3549 struct inode *inode = new_inode(s);
3550
3551 if (!inode)
3552 return ERR_PTR(-ENOMEM);
3553
3554 init_btrfs_i(inode);
3555
3556 BTRFS_I(inode)->root = root;
3557 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
3558 BTRFS_I(inode)->dummy_inode = 1;
3559
3560 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
3561 inode->i_op = &simple_dir_inode_operations;
3562 inode->i_fop = &simple_dir_operations;
3563 inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
3564 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3565
3566 return inode;
3567 }
3568
3569 struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3570 {
3571 struct inode *inode;
3572 struct btrfs_root *root = BTRFS_I(dir)->root;
3573 struct btrfs_root *sub_root = root;
3574 struct btrfs_key location;
3575 int index;
3576 int ret;
3577
3578 dentry->d_op = &btrfs_dentry_operations;
3579
3580 if (dentry->d_name.len > BTRFS_NAME_LEN)
3581 return ERR_PTR(-ENAMETOOLONG);
3582
3583 ret = btrfs_inode_by_name(dir, dentry, &location);
3584
3585 if (ret < 0)
3586 return ERR_PTR(ret);
3587
3588 if (location.objectid == 0)
3589 return NULL;
3590
3591 if (location.type == BTRFS_INODE_ITEM_KEY) {
3592 inode = btrfs_iget(dir->i_sb, &location, root);
3593 return inode;
3594 }
3595
3596 BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3597
3598 index = srcu_read_lock(&root->fs_info->subvol_srcu);
3599 ret = fixup_tree_root_location(root, dir, dentry,
3600 &location, &sub_root);
3601 if (ret < 0) {
3602 if (ret != -ENOENT)
3603 inode = ERR_PTR(ret);
3604 else
3605 inode = new_simple_dir(dir->i_sb, &location, sub_root);
3606 } else {
3607 inode = btrfs_iget(dir->i_sb, &location, sub_root);
3608 }
3609 srcu_read_unlock(&root->fs_info->subvol_srcu, index);
3610
3611 return inode;
3612 }
3613
3614 static int btrfs_dentry_delete(struct dentry *dentry)
3615 {
3616 struct btrfs_root *root;
3617
3618 if (!dentry->d_inode)
3619 return 0;
3620
3621 root = BTRFS_I(dentry->d_inode)->root;
3622 if (btrfs_root_refs(&root->root_item) == 0)
3623 return 1;
3624 return 0;
3625 }
3626
3627 static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
3628 struct nameidata *nd)
3629 {
3630 struct inode *inode;
3631
3632 inode = btrfs_lookup_dentry(dir, dentry);
3633 if (IS_ERR(inode))
3634 return ERR_CAST(inode);
3635
3636 return d_splice_alias(inode, dentry);
3637 }
3638
3639 static unsigned char btrfs_filetype_table[] = {
3640 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
3641 };
3642
3643 static int btrfs_real_readdir(struct file *filp, void *dirent,
3644 filldir_t filldir)
3645 {
3646 struct inode *inode = filp->f_dentry->d_inode;
3647 struct btrfs_root *root = BTRFS_I(inode)->root;
3648 struct btrfs_item *item;
3649 struct btrfs_dir_item *di;
3650 struct btrfs_key key;
3651 struct btrfs_key found_key;
3652 struct btrfs_path *path;
3653 int ret;
3654 u32 nritems;
3655 struct extent_buffer *leaf;
3656 int slot;
3657 int advance;
3658 unsigned char d_type;
3659 int over = 0;
3660 u32 di_cur;
3661 u32 di_total;
3662 u32 di_len;
3663 int key_type = BTRFS_DIR_INDEX_KEY;
3664 char tmp_name[32];
3665 char *name_ptr;
3666 int name_len;
3667
3668 /* FIXME, use a real flag for deciding about the key type */
3669 if (root->fs_info->tree_root == root)
3670 key_type = BTRFS_DIR_ITEM_KEY;
3671
3672 /* special case for "." */
3673 if (filp->f_pos == 0) {
3674 over = filldir(dirent, ".", 1,
3675 1, inode->i_ino,
3676 DT_DIR);
3677 if (over)
3678 return 0;
3679 filp->f_pos = 1;
3680 }
3681 /* special case for .., just use the back ref */
3682 if (filp->f_pos == 1) {
3683 u64 pino = parent_ino(filp->f_path.dentry);
3684 over = filldir(dirent, "..", 2,
3685 2, pino, DT_DIR);
3686 if (over)
3687 return 0;
3688 filp->f_pos = 2;
3689 }
3690 path = btrfs_alloc_path();
3691 path->reada = 2;
3692
3693 btrfs_set_key_type(&key, key_type);
3694 key.offset = filp->f_pos;
3695 key.objectid = inode->i_ino;
3696
3697 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3698 if (ret < 0)
3699 goto err;
3700 advance = 0;
3701
3702 while (1) {
3703 leaf = path->nodes[0];
3704 nritems = btrfs_header_nritems(leaf);
3705 slot = path->slots[0];
3706 if (advance || slot >= nritems) {
3707 if (slot >= nritems - 1) {
3708 ret = btrfs_next_leaf(root, path);
3709 if (ret)
3710 break;
3711 leaf = path->nodes[0];
3712 nritems = btrfs_header_nritems(leaf);
3713 slot = path->slots[0];
3714 } else {
3715 slot++;
3716 path->slots[0]++;
3717 }
3718 }
3719
3720 advance = 1;
3721 item = btrfs_item_nr(leaf, slot);
3722 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3723
3724 if (found_key.objectid != key.objectid)
3725 break;
3726 if (btrfs_key_type(&found_key) != key_type)
3727 break;
3728 if (found_key.offset < filp->f_pos)
3729 continue;
3730
3731 filp->f_pos = found_key.offset;
3732
3733 di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item);
3734 di_cur = 0;
3735 di_total = btrfs_item_size(leaf, item);
3736
3737 while (di_cur < di_total) {
3738 struct btrfs_key location;
3739
3740 name_len = btrfs_dir_name_len(leaf, di);
3741 if (name_len <= sizeof(tmp_name)) {
3742 name_ptr = tmp_name;
3743 } else {
3744 name_ptr = kmalloc(name_len, GFP_NOFS);
3745 if (!name_ptr) {
3746 ret = -ENOMEM;
3747 goto err;
3748 }
3749 }
3750 read_extent_buffer(leaf, name_ptr,
3751 (unsigned long)(di + 1), name_len);
3752
3753 d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
3754 btrfs_dir_item_key_to_cpu(leaf, di, &location);
3755
3756 /* is this a reference to our own snapshot? If so
3757 * skip it
3758 */
3759 if (location.type == BTRFS_ROOT_ITEM_KEY &&
3760 location.objectid == root->root_key.objectid) {
3761 over = 0;
3762 goto skip;
3763 }
3764 over = filldir(dirent, name_ptr, name_len,
3765 found_key.offset, location.objectid,
3766 d_type);
3767
3768 skip:
3769 if (name_ptr != tmp_name)
3770 kfree(name_ptr);
3771
3772 if (over)
3773 goto nopos;
3774 di_len = btrfs_dir_name_len(leaf, di) +
3775 btrfs_dir_data_len(leaf, di) + sizeof(*di);
3776 di_cur += di_len;
3777 di = (struct btrfs_dir_item *)((char *)di + di_len);
3778 }
3779 }
3780
3781 /* Reached end of directory/root. Bump pos past the last item. */
3782 if (key_type == BTRFS_DIR_INDEX_KEY)
3783 filp->f_pos = INT_LIMIT(off_t);
3784 else
3785 filp->f_pos++;
3786 nopos:
3787 ret = 0;
3788 err:
3789 btrfs_free_path(path);
3790 return ret;
3791 }
3792
3793 int btrfs_write_inode(struct inode *inode, int wait)
3794 {
3795 struct btrfs_root *root = BTRFS_I(inode)->root;
3796 struct btrfs_trans_handle *trans;
3797 int ret = 0;
3798
3799 if (root->fs_info->btree_inode == inode)
3800 return 0;
3801
3802 if (wait) {
3803 trans = btrfs_join_transaction(root, 1);
3804 btrfs_set_trans_block_group(trans, inode);
3805 ret = btrfs_commit_transaction(trans, root);
3806 }
3807 return ret;
3808 }
3809
3810 /*
3811 * This is somewhat expensive, updating the tree every time the
3812 * inode changes. But, it is most likely to find the inode in cache.
3813 * FIXME, needs more benchmarking...there are no reasons other than performance
3814 * to keep or drop this code.
3815 */
3816 void btrfs_dirty_inode(struct inode *inode)
3817 {
3818 struct btrfs_root *root = BTRFS_I(inode)->root;
3819 struct btrfs_trans_handle *trans;
3820
3821 trans = btrfs_join_transaction(root, 1);
3822 btrfs_set_trans_block_group(trans, inode);
3823 btrfs_update_inode(trans, root, inode);
3824 btrfs_end_transaction(trans, root);
3825 }
3826
3827 /*
3828 * find the highest existing sequence number in a directory
3829 * and then set the in-memory index_cnt variable to reflect
3830 * free sequence numbers
3831 */
3832 static int btrfs_set_inode_index_count(struct inode *inode)
3833 {
3834 struct btrfs_root *root = BTRFS_I(inode)->root;
3835 struct btrfs_key key, found_key;
3836 struct btrfs_path *path;
3837 struct extent_buffer *leaf;
3838 int ret;
3839
3840 key.objectid = inode->i_ino;
3841 btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY);
3842 key.offset = (u64)-1;
3843
3844 path = btrfs_alloc_path();
3845 if (!path)
3846 return -ENOMEM;
3847
3848 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3849 if (ret < 0)
3850 goto out;
3851 /* FIXME: we should be able to handle this */
3852 if (ret == 0)
3853 goto out;
3854 ret = 0;
3855
3856 /*
3857 * MAGIC NUMBER EXPLANATION:
3858 * since we search a directory based on f_pos we have to start at 2
3859 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
3860 * else has to start at 2
3861 */
3862 if (path->slots[0] == 0) {
3863 BTRFS_I(inode)->index_cnt = 2;
3864 goto out;
3865 }
3866
3867 path->slots[0]--;
3868
3869 leaf = path->nodes[0];
3870 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3871
3872 if (found_key.objectid != inode->i_ino ||
3873 btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) {
3874 BTRFS_I(inode)->index_cnt = 2;
3875 goto out;
3876 }
3877
3878 BTRFS_I(inode)->index_cnt = found_key.offset + 1;
3879 out:
3880 btrfs_free_path(path);
3881 return ret;
3882 }
3883
3884 /*
3885 * helper to find a free sequence number in a given directory. This current
3886 * code is very simple, later versions will do smarter things in the btree
3887 */
3888 int btrfs_set_inode_index(struct inode *dir, u64 *index)
3889 {
3890 int ret = 0;
3891
3892 if (BTRFS_I(dir)->index_cnt == (u64)-1) {
3893 ret = btrfs_set_inode_index_count(dir);
3894 if (ret)
3895 return ret;
3896 }
3897
3898 *index = BTRFS_I(dir)->index_cnt;
3899 BTRFS_I(dir)->index_cnt++;
3900
3901 return ret;
3902 }
3903
3904 static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
3905 struct btrfs_root *root,
3906 struct inode *dir,
3907 const char *name, int name_len,
3908 u64 ref_objectid, u64 objectid,
3909 u64 alloc_hint, int mode, u64 *index)
3910 {
3911 struct inode *inode;
3912 struct btrfs_inode_item *inode_item;
3913 struct btrfs_key *location;
3914 struct btrfs_path *path;
3915 struct btrfs_inode_ref *ref;
3916 struct btrfs_key key[2];
3917 u32 sizes[2];
3918 unsigned long ptr;
3919 int ret;
3920 int owner;
3921
3922 path = btrfs_alloc_path();
3923 BUG_ON(!path);
3924
3925 inode = new_inode(root->fs_info->sb);
3926 if (!inode)
3927 return ERR_PTR(-ENOMEM);
3928
3929 if (dir) {
3930 ret = btrfs_set_inode_index(dir, index);
3931 if (ret) {
3932 iput(inode);
3933 return ERR_PTR(ret);
3934 }
3935 }
3936 /*
3937 * index_cnt is ignored for everything but a dir,
3938 * btrfs_get_inode_index_count has an explanation for the magic
3939 * number
3940 */
3941 init_btrfs_i(inode);
3942 BTRFS_I(inode)->index_cnt = 2;
3943 BTRFS_I(inode)->root = root;
3944 BTRFS_I(inode)->generation = trans->transid;
3945 btrfs_set_inode_space_info(root, inode);
3946
3947 if (mode & S_IFDIR)
3948 owner = 0;
3949 else
3950 owner = 1;
3951 BTRFS_I(inode)->block_group =
3952 btrfs_find_block_group(root, 0, alloc_hint, owner);
3953
3954 key[0].objectid = objectid;
3955 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
3956 key[0].offset = 0;
3957
3958 key[1].objectid = objectid;
3959 btrfs_set_key_type(&key[1], BTRFS_INODE_REF_KEY);
3960 key[1].offset = ref_objectid;
3961
3962 sizes[0] = sizeof(struct btrfs_inode_item);
3963 sizes[1] = name_len + sizeof(*ref);
3964
3965 path->leave_spinning = 1;
3966 ret = btrfs_insert_empty_items(trans, root, path, key, sizes, 2);
3967 if (ret != 0)
3968 goto fail;
3969
3970 inode->i_uid = current_fsuid();
3971
3972 if (dir && (dir->i_mode & S_ISGID)) {
3973 inode->i_gid = dir->i_gid;
3974 if (S_ISDIR(mode))
3975 mode |= S_ISGID;
3976 } else
3977 inode->i_gid = current_fsgid();
3978
3979 inode->i_mode = mode;
3980 inode->i_ino = objectid;
3981 inode_set_bytes(inode, 0);
3982 inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
3983 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3984 struct btrfs_inode_item);
3985 fill_inode_item(trans, path->nodes[0], inode_item, inode);
3986
3987 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3988 struct btrfs_inode_ref);
3989 btrfs_set_inode_ref_name_len(path->nodes[0], ref, name_len);
3990 btrfs_set_inode_ref_index(path->nodes[0], ref, *index);
3991 ptr = (unsigned long)(ref + 1);
3992 write_extent_buffer(path->nodes[0], name, ptr, name_len);
3993
3994 btrfs_mark_buffer_dirty(path->nodes[0]);
3995 btrfs_free_path(path);
3996
3997 location = &BTRFS_I(inode)->location;
3998 location->objectid = objectid;
3999 location->offset = 0;
4000 btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY);
4001
4002 btrfs_inherit_iflags(inode, dir);
4003
4004 if ((mode & S_IFREG)) {
4005 if (btrfs_test_opt(root, NODATASUM))
4006 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
4007 if (btrfs_test_opt(root, NODATACOW))
4008 BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW;
4009 }
4010
4011 insert_inode_hash(inode);
4012 inode_tree_add(inode);
4013 return inode;
4014 fail:
4015 if (dir)
4016 BTRFS_I(dir)->index_cnt--;
4017 btrfs_free_path(path);
4018 iput(inode);
4019 return ERR_PTR(ret);
4020 }
4021
4022 static inline u8 btrfs_inode_type(struct inode *inode)
4023 {
4024 return btrfs_type_by_mode[(inode->i_mode & S_IFMT) >> S_SHIFT];
4025 }
4026
4027 /*
4028 * utility function to add 'inode' into 'parent_inode' with
4029 * a give name and a given sequence number.
4030 * if 'add_backref' is true, also insert a backref from the
4031 * inode to the parent directory.
4032 */
4033 int btrfs_add_link(struct btrfs_trans_handle *trans,
4034 struct inode *parent_inode, struct inode *inode,
4035 const char *name, int name_len, int add_backref, u64 index)
4036 {
4037 int ret = 0;
4038 struct btrfs_key key;
4039 struct btrfs_root *root = BTRFS_I(parent_inode)->root;
4040
4041 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4042 memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key));
4043 } else {
4044 key.objectid = inode->i_ino;
4045 btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
4046 key.offset = 0;
4047 }
4048
4049 if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
4050 ret = btrfs_add_root_ref(trans, root->fs_info->tree_root,
4051 key.objectid, root->root_key.objectid,
4052 parent_inode->i_ino,
4053 index, name, name_len);
4054 } else if (add_backref) {
4055 ret = btrfs_insert_inode_ref(trans, root,
4056 name, name_len, inode->i_ino,
4057 parent_inode->i_ino, index);
4058 }
4059
4060 if (ret == 0) {
4061 ret = btrfs_insert_dir_item(trans, root, name, name_len,
4062 parent_inode->i_ino, &key,
4063 btrfs_inode_type(inode), index);
4064 BUG_ON(ret);
4065
4066 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4067 name_len * 2);
4068 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4069 ret = btrfs_update_inode(trans, root, parent_inode);
4070 }
4071 return ret;
4072 }
4073
4074 static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4075 struct dentry *dentry, struct inode *inode,
4076 int backref, u64 index)
4077 {
4078 int err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4079 inode, dentry->d_name.name,
4080 dentry->d_name.len, backref, index);
4081 if (!err) {
4082 d_instantiate(dentry, inode);
4083 return 0;
4084 }
4085 if (err > 0)
4086 err = -EEXIST;
4087 return err;
4088 }
4089
4090 static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4091 int mode, dev_t rdev)
4092 {
4093 struct btrfs_trans_handle *trans;
4094 struct btrfs_root *root = BTRFS_I(dir)->root;
4095 struct inode *inode = NULL;
4096 int err;
4097 int drop_inode = 0;
4098 u64 objectid;
4099 unsigned long nr = 0;
4100 u64 index = 0;
4101
4102 if (!new_valid_dev(rdev))
4103 return -EINVAL;
4104
4105 /*
4106 * 2 for inode item and ref
4107 * 2 for dir items
4108 * 1 for xattr if selinux is on
4109 */
4110 err = btrfs_reserve_metadata_space(root, 5);
4111 if (err)
4112 return err;
4113
4114 trans = btrfs_start_transaction(root, 1);
4115 if (!trans)
4116 goto fail;
4117 btrfs_set_trans_block_group(trans, dir);
4118
4119 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4120 if (err) {
4121 err = -ENOSPC;
4122 goto out_unlock;
4123 }
4124
4125 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4126 dentry->d_name.len,
4127 dentry->d_parent->d_inode->i_ino, objectid,
4128 BTRFS_I(dir)->block_group, mode, &index);
4129 err = PTR_ERR(inode);
4130 if (IS_ERR(inode))
4131 goto out_unlock;
4132
4133 err = btrfs_init_inode_security(inode, dir);
4134 if (err) {
4135 drop_inode = 1;
4136 goto out_unlock;
4137 }
4138
4139 btrfs_set_trans_block_group(trans, inode);
4140 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4141 if (err)
4142 drop_inode = 1;
4143 else {
4144 inode->i_op = &btrfs_special_inode_operations;
4145 init_special_inode(inode, inode->i_mode, rdev);
4146 btrfs_update_inode(trans, root, inode);
4147 }
4148 btrfs_update_inode_block_group(trans, inode);
4149 btrfs_update_inode_block_group(trans, dir);
4150 out_unlock:
4151 nr = trans->blocks_used;
4152 btrfs_end_transaction_throttle(trans, root);
4153 fail:
4154 btrfs_unreserve_metadata_space(root, 5);
4155 if (drop_inode) {
4156 inode_dec_link_count(inode);
4157 iput(inode);
4158 }
4159 btrfs_btree_balance_dirty(root, nr);
4160 return err;
4161 }
4162
4163 static int btrfs_create(struct inode *dir, struct dentry *dentry,
4164 int mode, struct nameidata *nd)
4165 {
4166 struct btrfs_trans_handle *trans;
4167 struct btrfs_root *root = BTRFS_I(dir)->root;
4168 struct inode *inode = NULL;
4169 int err;
4170 int drop_inode = 0;
4171 unsigned long nr = 0;
4172 u64 objectid;
4173 u64 index = 0;
4174
4175 /*
4176 * 2 for inode item and ref
4177 * 2 for dir items
4178 * 1 for xattr if selinux is on
4179 */
4180 err = btrfs_reserve_metadata_space(root, 5);
4181 if (err)
4182 return err;
4183
4184 trans = btrfs_start_transaction(root, 1);
4185 if (!trans)
4186 goto fail;
4187 btrfs_set_trans_block_group(trans, dir);
4188
4189 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4190 if (err) {
4191 err = -ENOSPC;
4192 goto out_unlock;
4193 }
4194
4195 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4196 dentry->d_name.len,
4197 dentry->d_parent->d_inode->i_ino,
4198 objectid, BTRFS_I(dir)->block_group, mode,
4199 &index);
4200 err = PTR_ERR(inode);
4201 if (IS_ERR(inode))
4202 goto out_unlock;
4203
4204 err = btrfs_init_inode_security(inode, dir);
4205 if (err) {
4206 drop_inode = 1;
4207 goto out_unlock;
4208 }
4209
4210 btrfs_set_trans_block_group(trans, inode);
4211 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
4212 if (err)
4213 drop_inode = 1;
4214 else {
4215 inode->i_mapping->a_ops = &btrfs_aops;
4216 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
4217 inode->i_fop = &btrfs_file_operations;
4218 inode->i_op = &btrfs_file_inode_operations;
4219 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4220 }
4221 btrfs_update_inode_block_group(trans, inode);
4222 btrfs_update_inode_block_group(trans, dir);
4223 out_unlock:
4224 nr = trans->blocks_used;
4225 btrfs_end_transaction_throttle(trans, root);
4226 fail:
4227 btrfs_unreserve_metadata_space(root, 5);
4228 if (drop_inode) {
4229 inode_dec_link_count(inode);
4230 iput(inode);
4231 }
4232 btrfs_btree_balance_dirty(root, nr);
4233 return err;
4234 }
4235
4236 static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4237 struct dentry *dentry)
4238 {
4239 struct btrfs_trans_handle *trans;
4240 struct btrfs_root *root = BTRFS_I(dir)->root;
4241 struct inode *inode = old_dentry->d_inode;
4242 u64 index;
4243 unsigned long nr = 0;
4244 int err;
4245 int drop_inode = 0;
4246
4247 if (inode->i_nlink == 0)
4248 return -ENOENT;
4249
4250 /*
4251 * 1 item for inode ref
4252 * 2 items for dir items
4253 */
4254 err = btrfs_reserve_metadata_space(root, 3);
4255 if (err)
4256 return err;
4257
4258 btrfs_inc_nlink(inode);
4259
4260 err = btrfs_set_inode_index(dir, &index);
4261 if (err)
4262 goto fail;
4263
4264 trans = btrfs_start_transaction(root, 1);
4265
4266 btrfs_set_trans_block_group(trans, dir);
4267 atomic_inc(&inode->i_count);
4268
4269 err = btrfs_add_nondir(trans, dentry, inode, 1, index);
4270
4271 if (err) {
4272 drop_inode = 1;
4273 } else {
4274 btrfs_update_inode_block_group(trans, dir);
4275 err = btrfs_update_inode(trans, root, inode);
4276 BUG_ON(err);
4277 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent);
4278 }
4279
4280 nr = trans->blocks_used;
4281 btrfs_end_transaction_throttle(trans, root);
4282 fail:
4283 btrfs_unreserve_metadata_space(root, 3);
4284 if (drop_inode) {
4285 inode_dec_link_count(inode);
4286 iput(inode);
4287 }
4288 btrfs_btree_balance_dirty(root, nr);
4289 return err;
4290 }
4291
4292 static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4293 {
4294 struct inode *inode = NULL;
4295 struct btrfs_trans_handle *trans;
4296 struct btrfs_root *root = BTRFS_I(dir)->root;
4297 int err = 0;
4298 int drop_on_err = 0;
4299 u64 objectid = 0;
4300 u64 index = 0;
4301 unsigned long nr = 1;
4302
4303 /*
4304 * 2 items for inode and ref
4305 * 2 items for dir items
4306 * 1 for xattr if selinux is on
4307 */
4308 err = btrfs_reserve_metadata_space(root, 5);
4309 if (err)
4310 return err;
4311
4312 trans = btrfs_start_transaction(root, 1);
4313 if (!trans) {
4314 err = -ENOMEM;
4315 goto out_unlock;
4316 }
4317 btrfs_set_trans_block_group(trans, dir);
4318
4319 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
4320 if (err) {
4321 err = -ENOSPC;
4322 goto out_unlock;
4323 }
4324
4325 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4326 dentry->d_name.len,
4327 dentry->d_parent->d_inode->i_ino, objectid,
4328 BTRFS_I(dir)->block_group, S_IFDIR | mode,
4329 &index);
4330 if (IS_ERR(inode)) {
4331 err = PTR_ERR(inode);
4332 goto out_fail;
4333 }
4334
4335 drop_on_err = 1;
4336
4337 err = btrfs_init_inode_security(inode, dir);
4338 if (err)
4339 goto out_fail;
4340
4341 inode->i_op = &btrfs_dir_inode_operations;
4342 inode->i_fop = &btrfs_dir_file_operations;
4343 btrfs_set_trans_block_group(trans, inode);
4344
4345 btrfs_i_size_write(inode, 0);
4346 err = btrfs_update_inode(trans, root, inode);
4347 if (err)
4348 goto out_fail;
4349
4350 err = btrfs_add_link(trans, dentry->d_parent->d_inode,
4351 inode, dentry->d_name.name,
4352 dentry->d_name.len, 0, index);
4353 if (err)
4354 goto out_fail;
4355
4356 d_instantiate(dentry, inode);
4357 drop_on_err = 0;
4358 btrfs_update_inode_block_group(trans, inode);
4359 btrfs_update_inode_block_group(trans, dir);
4360
4361 out_fail:
4362 nr = trans->blocks_used;
4363 btrfs_end_transaction_throttle(trans, root);
4364
4365 out_unlock:
4366 btrfs_unreserve_metadata_space(root, 5);
4367 if (drop_on_err)
4368 iput(inode);
4369 btrfs_btree_balance_dirty(root, nr);
4370 return err;
4371 }
4372
4373 /* helper for btfs_get_extent. Given an existing extent in the tree,
4374 * and an extent that you want to insert, deal with overlap and insert
4375 * the new extent into the tree.
4376 */
4377 static int merge_extent_mapping(struct extent_map_tree *em_tree,
4378 struct extent_map *existing,
4379 struct extent_map *em,
4380 u64 map_start, u64 map_len)
4381 {
4382 u64 start_diff;
4383
4384 BUG_ON(map_start < em->start || map_start >= extent_map_end(em));
4385 start_diff = map_start - em->start;
4386 em->start = map_start;
4387 em->len = map_len;
4388 if (em->block_start < EXTENT_MAP_LAST_BYTE &&
4389 !test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
4390 em->block_start += start_diff;
4391 em->block_len -= start_diff;
4392 }
4393 return add_extent_mapping(em_tree, em);
4394 }
4395
4396 static noinline int uncompress_inline(struct btrfs_path *path,
4397 struct inode *inode, struct page *page,
4398 size_t pg_offset, u64 extent_offset,
4399 struct btrfs_file_extent_item *item)
4400 {
4401 int ret;
4402 struct extent_buffer *leaf = path->nodes[0];
4403 char *tmp;
4404 size_t max_size;
4405 unsigned long inline_size;
4406 unsigned long ptr;
4407
4408 WARN_ON(pg_offset != 0);
4409 max_size = btrfs_file_extent_ram_bytes(leaf, item);
4410 inline_size = btrfs_file_extent_inline_item_len(leaf,
4411 btrfs_item_nr(leaf, path->slots[0]));
4412 tmp = kmalloc(inline_size, GFP_NOFS);
4413 ptr = btrfs_file_extent_inline_start(item);
4414
4415 read_extent_buffer(leaf, tmp, ptr, inline_size);
4416
4417 max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
4418 ret = btrfs_zlib_decompress(tmp, page, extent_offset,
4419 inline_size, max_size);
4420 if (ret) {
4421 char *kaddr = kmap_atomic(page, KM_USER0);
4422 unsigned long copy_size = min_t(u64,
4423 PAGE_CACHE_SIZE - pg_offset,
4424 max_size - extent_offset);
4425 memset(kaddr + pg_offset, 0, copy_size);
4426 kunmap_atomic(kaddr, KM_USER0);
4427 }
4428 kfree(tmp);
4429 return 0;
4430 }
4431
4432 /*
4433 * a bit scary, this does extent mapping from logical file offset to the disk.
4434 * the ugly parts come from merging extents from the disk with the in-ram
4435 * representation. This gets more complex because of the data=ordered code,
4436 * where the in-ram extents might be locked pending data=ordered completion.
4437 *
4438 * This also copies inline extents directly into the page.
4439 */
4440
4441 struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
4442 size_t pg_offset, u64 start, u64 len,
4443 int create)
4444 {
4445 int ret;
4446 int err = 0;
4447 u64 bytenr;
4448 u64 extent_start = 0;
4449 u64 extent_end = 0;
4450 u64 objectid = inode->i_ino;
4451 u32 found_type;
4452 struct btrfs_path *path = NULL;
4453 struct btrfs_root *root = BTRFS_I(inode)->root;
4454 struct btrfs_file_extent_item *item;
4455 struct extent_buffer *leaf;
4456 struct btrfs_key found_key;
4457 struct extent_map *em = NULL;
4458 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
4459 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4460 struct btrfs_trans_handle *trans = NULL;
4461 int compressed;
4462
4463 again:
4464 read_lock(&em_tree->lock);
4465 em = lookup_extent_mapping(em_tree, start, len);
4466 if (em)
4467 em->bdev = root->fs_info->fs_devices->latest_bdev;
4468 read_unlock(&em_tree->lock);
4469
4470 if (em) {
4471 if (em->start > start || em->start + em->len <= start)
4472 free_extent_map(em);
4473 else if (em->block_start == EXTENT_MAP_INLINE && page)
4474 free_extent_map(em);
4475 else
4476 goto out;
4477 }
4478 em = alloc_extent_map(GFP_NOFS);
4479 if (!em) {
4480 err = -ENOMEM;
4481 goto out;
4482 }
4483 em->bdev = root->fs_info->fs_devices->latest_bdev;
4484 em->start = EXTENT_MAP_HOLE;
4485 em->orig_start = EXTENT_MAP_HOLE;
4486 em->len = (u64)-1;
4487 em->block_len = (u64)-1;
4488
4489 if (!path) {
4490 path = btrfs_alloc_path();
4491 BUG_ON(!path);
4492 }
4493
4494 ret = btrfs_lookup_file_extent(trans, root, path,
4495 objectid, start, trans != NULL);
4496 if (ret < 0) {
4497 err = ret;
4498 goto out;
4499 }
4500
4501 if (ret != 0) {
4502 if (path->slots[0] == 0)
4503 goto not_found;
4504 path->slots[0]--;
4505 }
4506
4507 leaf = path->nodes[0];
4508 item = btrfs_item_ptr(leaf, path->slots[0],
4509 struct btrfs_file_extent_item);
4510 /* are we inside the extent that was found? */
4511 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4512 found_type = btrfs_key_type(&found_key);
4513 if (found_key.objectid != objectid ||
4514 found_type != BTRFS_EXTENT_DATA_KEY) {
4515 goto not_found;
4516 }
4517
4518 found_type = btrfs_file_extent_type(leaf, item);
4519 extent_start = found_key.offset;
4520 compressed = btrfs_file_extent_compression(leaf, item);
4521 if (found_type == BTRFS_FILE_EXTENT_REG ||
4522 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4523 extent_end = extent_start +
4524 btrfs_file_extent_num_bytes(leaf, item);
4525 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4526 size_t size;
4527 size = btrfs_file_extent_inline_len(leaf, item);
4528 extent_end = (extent_start + size + root->sectorsize - 1) &
4529 ~((u64)root->sectorsize - 1);
4530 }
4531
4532 if (start >= extent_end) {
4533 path->slots[0]++;
4534 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
4535 ret = btrfs_next_leaf(root, path);
4536 if (ret < 0) {
4537 err = ret;
4538 goto out;
4539 }
4540 if (ret > 0)
4541 goto not_found;
4542 leaf = path->nodes[0];
4543 }
4544 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4545 if (found_key.objectid != objectid ||
4546 found_key.type != BTRFS_EXTENT_DATA_KEY)
4547 goto not_found;
4548 if (start + len <= found_key.offset)
4549 goto not_found;
4550 em->start = start;
4551 em->len = found_key.offset - start;
4552 goto not_found_em;
4553 }
4554
4555 if (found_type == BTRFS_FILE_EXTENT_REG ||
4556 found_type == BTRFS_FILE_EXTENT_PREALLOC) {
4557 em->start = extent_start;
4558 em->len = extent_end - extent_start;
4559 em->orig_start = extent_start -
4560 btrfs_file_extent_offset(leaf, item);
4561 bytenr = btrfs_file_extent_disk_bytenr(leaf, item);
4562 if (bytenr == 0) {
4563 em->block_start = EXTENT_MAP_HOLE;
4564 goto insert;
4565 }
4566 if (compressed) {
4567 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4568 em->block_start = bytenr;
4569 em->block_len = btrfs_file_extent_disk_num_bytes(leaf,
4570 item);
4571 } else {
4572 bytenr += btrfs_file_extent_offset(leaf, item);
4573 em->block_start = bytenr;
4574 em->block_len = em->len;
4575 if (found_type == BTRFS_FILE_EXTENT_PREALLOC)
4576 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
4577 }
4578 goto insert;
4579 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
4580 unsigned long ptr;
4581 char *map;
4582 size_t size;
4583 size_t extent_offset;
4584 size_t copy_size;
4585
4586 em->block_start = EXTENT_MAP_INLINE;
4587 if (!page || create) {
4588 em->start = extent_start;
4589 em->len = extent_end - extent_start;
4590 goto out;
4591 }
4592
4593 size = btrfs_file_extent_inline_len(leaf, item);
4594 extent_offset = page_offset(page) + pg_offset - extent_start;
4595 copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset,
4596 size - extent_offset);
4597 em->start = extent_start + extent_offset;
4598 em->len = (copy_size + root->sectorsize - 1) &
4599 ~((u64)root->sectorsize - 1);
4600 em->orig_start = EXTENT_MAP_INLINE;
4601 if (compressed)
4602 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
4603 ptr = btrfs_file_extent_inline_start(item) + extent_offset;
4604 if (create == 0 && !PageUptodate(page)) {
4605 if (btrfs_file_extent_compression(leaf, item) ==
4606 BTRFS_COMPRESS_ZLIB) {
4607 ret = uncompress_inline(path, inode, page,
4608 pg_offset,
4609 extent_offset, item);
4610 BUG_ON(ret);
4611 } else {
4612 map = kmap(page);
4613 read_extent_buffer(leaf, map + pg_offset, ptr,
4614 copy_size);
4615 if (pg_offset + copy_size < PAGE_CACHE_SIZE) {
4616 memset(map + pg_offset + copy_size, 0,
4617 PAGE_CACHE_SIZE - pg_offset -
4618 copy_size);
4619 }
4620 kunmap(page);
4621 }
4622 flush_dcache_page(page);
4623 } else if (create && PageUptodate(page)) {
4624 if (!trans) {
4625 kunmap(page);
4626 free_extent_map(em);
4627 em = NULL;
4628 btrfs_release_path(root, path);
4629 trans = btrfs_join_transaction(root, 1);
4630 goto again;
4631 }
4632 map = kmap(page);
4633 write_extent_buffer(leaf, map + pg_offset, ptr,
4634 copy_size);
4635 kunmap(page);
4636 btrfs_mark_buffer_dirty(leaf);
4637 }
4638 set_extent_uptodate(io_tree, em->start,
4639 extent_map_end(em) - 1, GFP_NOFS);
4640 goto insert;
4641 } else {
4642 printk(KERN_ERR "btrfs unknown found_type %d\n", found_type);
4643 WARN_ON(1);
4644 }
4645 not_found:
4646 em->start = start;
4647 em->len = len;
4648 not_found_em:
4649 em->block_start = EXTENT_MAP_HOLE;
4650 set_bit(EXTENT_FLAG_VACANCY, &em->flags);
4651 insert:
4652 btrfs_release_path(root, path);
4653 if (em->start > start || extent_map_end(em) <= start) {
4654 printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed "
4655 "[%llu %llu]\n", (unsigned long long)em->start,
4656 (unsigned long long)em->len,
4657 (unsigned long long)start,
4658 (unsigned long long)len);
4659 err = -EIO;
4660 goto out;
4661 }
4662
4663 err = 0;
4664 write_lock(&em_tree->lock);
4665 ret = add_extent_mapping(em_tree, em);
4666 /* it is possible that someone inserted the extent into the tree
4667 * while we had the lock dropped. It is also possible that
4668 * an overlapping map exists in the tree
4669 */
4670 if (ret == -EEXIST) {
4671 struct extent_map *existing;
4672
4673 ret = 0;
4674
4675 existing = lookup_extent_mapping(em_tree, start, len);
4676 if (existing && (existing->start > start ||
4677 existing->start + existing->len <= start)) {
4678 free_extent_map(existing);
4679 existing = NULL;
4680 }
4681 if (!existing) {
4682 existing = lookup_extent_mapping(em_tree, em->start,
4683 em->len);
4684 if (existing) {
4685 err = merge_extent_mapping(em_tree, existing,
4686 em, start,
4687 root->sectorsize);
4688 free_extent_map(existing);
4689 if (err) {
4690 free_extent_map(em);
4691 em = NULL;
4692 }
4693 } else {
4694 err = -EIO;
4695 free_extent_map(em);
4696 em = NULL;
4697 }
4698 } else {
4699 free_extent_map(em);
4700 em = existing;
4701 err = 0;
4702 }
4703 }
4704 write_unlock(&em_tree->lock);
4705 out:
4706 if (path)
4707 btrfs_free_path(path);
4708 if (trans) {
4709 ret = btrfs_end_transaction(trans, root);
4710 if (!err)
4711 err = ret;
4712 }
4713 if (err) {
4714 free_extent_map(em);
4715 return ERR_PTR(err);
4716 }
4717 return em;
4718 }
4719
4720 static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
4721 const struct iovec *iov, loff_t offset,
4722 unsigned long nr_segs)
4723 {
4724 return -EINVAL;
4725 }
4726
4727 static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4728 __u64 start, __u64 len)
4729 {
4730 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
4731 }
4732
4733 int btrfs_readpage(struct file *file, struct page *page)
4734 {
4735 struct extent_io_tree *tree;
4736 tree = &BTRFS_I(page->mapping->host)->io_tree;
4737 return extent_read_full_page(tree, page, btrfs_get_extent);
4738 }
4739
4740 static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
4741 {
4742 struct extent_io_tree *tree;
4743
4744
4745 if (current->flags & PF_MEMALLOC) {
4746 redirty_page_for_writepage(wbc, page);
4747 unlock_page(page);
4748 return 0;
4749 }
4750 tree = &BTRFS_I(page->mapping->host)->io_tree;
4751 return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
4752 }
4753
4754 int btrfs_writepages(struct address_space *mapping,
4755 struct writeback_control *wbc)
4756 {
4757 struct extent_io_tree *tree;
4758
4759 tree = &BTRFS_I(mapping->host)->io_tree;
4760 return extent_writepages(tree, mapping, btrfs_get_extent, wbc);
4761 }
4762
4763 static int
4764 btrfs_readpages(struct file *file, struct address_space *mapping,
4765 struct list_head *pages, unsigned nr_pages)
4766 {
4767 struct extent_io_tree *tree;
4768 tree = &BTRFS_I(mapping->host)->io_tree;
4769 return extent_readpages(tree, mapping, pages, nr_pages,
4770 btrfs_get_extent);
4771 }
4772 static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4773 {
4774 struct extent_io_tree *tree;
4775 struct extent_map_tree *map;
4776 int ret;
4777
4778 tree = &BTRFS_I(page->mapping->host)->io_tree;
4779 map = &BTRFS_I(page->mapping->host)->extent_tree;
4780 ret = try_release_extent_mapping(map, tree, page, gfp_flags);
4781 if (ret == 1) {
4782 ClearPagePrivate(page);
4783 set_page_private(page, 0);
4784 page_cache_release(page);
4785 }
4786 return ret;
4787 }
4788
4789 static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
4790 {
4791 if (PageWriteback(page) || PageDirty(page))
4792 return 0;
4793 return __btrfs_releasepage(page, gfp_flags & GFP_NOFS);
4794 }
4795
4796 static void btrfs_invalidatepage(struct page *page, unsigned long offset)
4797 {
4798 struct extent_io_tree *tree;
4799 struct btrfs_ordered_extent *ordered;
4800 u64 page_start = page_offset(page);
4801 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
4802
4803
4804 /*
4805 * we have the page locked, so new writeback can't start,
4806 * and the dirty bit won't be cleared while we are here.
4807 *
4808 * Wait for IO on this page so that we can safely clear
4809 * the PagePrivate2 bit and do ordered accounting
4810 */
4811 wait_on_page_writeback(page);
4812
4813 tree = &BTRFS_I(page->mapping->host)->io_tree;
4814 if (offset) {
4815 btrfs_releasepage(page, GFP_NOFS);
4816 return;
4817 }
4818 lock_extent(tree, page_start, page_end, GFP_NOFS);
4819 ordered = btrfs_lookup_ordered_extent(page->mapping->host,
4820 page_offset(page));
4821 if (ordered) {
4822 /*
4823 * IO on this page will never be started, so we need
4824 * to account for any ordered extents now
4825 */
4826 clear_extent_bit(tree, page_start, page_end,
4827 EXTENT_DIRTY | EXTENT_DELALLOC |
4828 EXTENT_LOCKED, 1, 0, NULL, GFP_NOFS);
4829 /*
4830 * whoever cleared the private bit is responsible
4831 * for the finish_ordered_io
4832 */
4833 if (TestClearPagePrivate2(page)) {
4834 btrfs_finish_ordered_io(page->mapping->host,
4835 page_start, page_end);
4836 }
4837 btrfs_put_ordered_extent(ordered);
4838 lock_extent(tree, page_start, page_end, GFP_NOFS);
4839 }
4840 clear_extent_bit(tree, page_start, page_end,
4841 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
4842 1, 1, NULL, GFP_NOFS);
4843 __btrfs_releasepage(page, GFP_NOFS);
4844
4845 ClearPageChecked(page);
4846 if (PagePrivate(page)) {
4847 ClearPagePrivate(page);
4848 set_page_private(page, 0);
4849 page_cache_release(page);
4850 }
4851 }
4852
4853 /*
4854 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
4855 * called from a page fault handler when a page is first dirtied. Hence we must
4856 * be careful to check for EOF conditions here. We set the page up correctly
4857 * for a written page which means we get ENOSPC checking when writing into
4858 * holes and correct delalloc and unwritten extent mapping on filesystems that
4859 * support these features.
4860 *
4861 * We are not allowed to take the i_mutex here so we have to play games to
4862 * protect against truncate races as the page could now be beyond EOF. Because
4863 * vmtruncate() writes the inode size before removing pages, once we have the
4864 * page lock we can determine safely if the page is beyond EOF. If it is not
4865 * beyond EOF, then the page is guaranteed safe against truncation until we
4866 * unlock the page.
4867 */
4868 int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4869 {
4870 struct page *page = vmf->page;
4871 struct inode *inode = fdentry(vma->vm_file)->d_inode;
4872 struct btrfs_root *root = BTRFS_I(inode)->root;
4873 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4874 struct btrfs_ordered_extent *ordered;
4875 char *kaddr;
4876 unsigned long zero_start;
4877 loff_t size;
4878 int ret;
4879 u64 page_start;
4880 u64 page_end;
4881
4882 ret = btrfs_check_data_free_space(root, inode, PAGE_CACHE_SIZE);
4883 if (ret) {
4884 if (ret == -ENOMEM)
4885 ret = VM_FAULT_OOM;
4886 else /* -ENOSPC, -EIO, etc */
4887 ret = VM_FAULT_SIGBUS;
4888 goto out;
4889 }
4890
4891 ret = btrfs_reserve_metadata_for_delalloc(root, inode, 1);
4892 if (ret) {
4893 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4894 ret = VM_FAULT_SIGBUS;
4895 goto out;
4896 }
4897
4898 ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
4899 again:
4900 lock_page(page);
4901 size = i_size_read(inode);
4902 page_start = page_offset(page);
4903 page_end = page_start + PAGE_CACHE_SIZE - 1;
4904
4905 if ((page->mapping != inode->i_mapping) ||
4906 (page_start >= size)) {
4907 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4908 /* page got truncated out from underneath us */
4909 goto out_unlock;
4910 }
4911 wait_on_page_writeback(page);
4912
4913 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4914 set_page_extent_mapped(page);
4915
4916 /*
4917 * we can't set the delalloc bits if there are pending ordered
4918 * extents. Drop our locks and wait for them to finish
4919 */
4920 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4921 if (ordered) {
4922 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4923 unlock_page(page);
4924 btrfs_start_ordered_extent(inode, ordered, 1);
4925 btrfs_put_ordered_extent(ordered);
4926 goto again;
4927 }
4928
4929 /*
4930 * XXX - page_mkwrite gets called every time the page is dirtied, even
4931 * if it was already dirty, so for space accounting reasons we need to
4932 * clear any delalloc bits for the range we are fixing to save. There
4933 * is probably a better way to do this, but for now keep consistent with
4934 * prepare_pages in the normal write path.
4935 */
4936 clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end,
4937 EXTENT_DIRTY | EXTENT_DELALLOC, GFP_NOFS);
4938
4939 ret = btrfs_set_extent_delalloc(inode, page_start, page_end);
4940 if (ret) {
4941 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4942 ret = VM_FAULT_SIGBUS;
4943 btrfs_free_reserved_data_space(root, inode, PAGE_CACHE_SIZE);
4944 goto out_unlock;
4945 }
4946 ret = 0;
4947
4948 /* page is wholly or partially inside EOF */
4949 if (page_start + PAGE_CACHE_SIZE > size)
4950 zero_start = size & ~PAGE_CACHE_MASK;
4951 else
4952 zero_start = PAGE_CACHE_SIZE;
4953
4954 if (zero_start != PAGE_CACHE_SIZE) {
4955 kaddr = kmap(page);
4956 memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start);
4957 flush_dcache_page(page);
4958 kunmap(page);
4959 }
4960 ClearPageChecked(page);
4961 set_page_dirty(page);
4962 SetPageUptodate(page);
4963
4964 BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
4965 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4966
4967 out_unlock:
4968 btrfs_unreserve_metadata_for_delalloc(root, inode, 1);
4969 if (!ret)
4970 return VM_FAULT_LOCKED;
4971 unlock_page(page);
4972 out:
4973 return ret;
4974 }
4975
4976 static void btrfs_truncate(struct inode *inode)
4977 {
4978 struct btrfs_root *root = BTRFS_I(inode)->root;
4979 int ret;
4980 struct btrfs_trans_handle *trans;
4981 unsigned long nr;
4982 u64 mask = root->sectorsize - 1;
4983
4984 if (!S_ISREG(inode->i_mode))
4985 return;
4986 if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
4987 return;
4988
4989 btrfs_truncate_page(inode->i_mapping, inode->i_size);
4990 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
4991
4992 trans = btrfs_start_transaction(root, 1);
4993
4994 /*
4995 * setattr is responsible for setting the ordered_data_close flag,
4996 * but that is only tested during the last file release. That
4997 * could happen well after the next commit, leaving a great big
4998 * window where new writes may get lost if someone chooses to write
4999 * to this file after truncating to zero
5000 *
5001 * The inode doesn't have any dirty data here, and so if we commit
5002 * this is a noop. If someone immediately starts writing to the inode
5003 * it is very likely we'll catch some of their writes in this
5004 * transaction, and the commit will find this file on the ordered
5005 * data list with good things to send down.
5006 *
5007 * This is a best effort solution, there is still a window where
5008 * using truncate to replace the contents of the file will
5009 * end up with a zero length file after a crash.
5010 */
5011 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close)
5012 btrfs_add_ordered_operation(trans, root, inode);
5013
5014 btrfs_set_trans_block_group(trans, inode);
5015 btrfs_i_size_write(inode, inode->i_size);
5016
5017 ret = btrfs_orphan_add(trans, inode);
5018 if (ret)
5019 goto out;
5020 /* FIXME, add redo link to tree so we don't leak on crash */
5021 ret = btrfs_truncate_inode_items(trans, root, inode, inode->i_size,
5022 BTRFS_EXTENT_DATA_KEY);
5023 btrfs_update_inode(trans, root, inode);
5024
5025 ret = btrfs_orphan_del(trans, inode);
5026 BUG_ON(ret);
5027
5028 out:
5029 nr = trans->blocks_used;
5030 ret = btrfs_end_transaction_throttle(trans, root);
5031 BUG_ON(ret);
5032 btrfs_btree_balance_dirty(root, nr);
5033 }
5034
5035 /*
5036 * create a new subvolume directory/inode (helper for the ioctl).
5037 */
5038 int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
5039 struct btrfs_root *new_root,
5040 u64 new_dirid, u64 alloc_hint)
5041 {
5042 struct inode *inode;
5043 int err;
5044 u64 index = 0;
5045
5046 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
5047 new_dirid, alloc_hint, S_IFDIR | 0700, &index);
5048 if (IS_ERR(inode))
5049 return PTR_ERR(inode);
5050 inode->i_op = &btrfs_dir_inode_operations;
5051 inode->i_fop = &btrfs_dir_file_operations;
5052
5053 inode->i_nlink = 1;
5054 btrfs_i_size_write(inode, 0);
5055
5056 err = btrfs_update_inode(trans, new_root, inode);
5057 BUG_ON(err);
5058
5059 iput(inode);
5060 return 0;
5061 }
5062
5063 /* helper function for file defrag and space balancing. This
5064 * forces readahead on a given range of bytes in an inode
5065 */
5066 unsigned long btrfs_force_ra(struct address_space *mapping,
5067 struct file_ra_state *ra, struct file *file,
5068 pgoff_t offset, pgoff_t last_index)
5069 {
5070 pgoff_t req_size = last_index - offset + 1;
5071
5072 page_cache_sync_readahead(mapping, ra, file, offset, req_size);
5073 return offset + req_size;
5074 }
5075
5076 struct inode *btrfs_alloc_inode(struct super_block *sb)
5077 {
5078 struct btrfs_inode *ei;
5079
5080 ei = kmem_cache_alloc(btrfs_inode_cachep, GFP_NOFS);
5081 if (!ei)
5082 return NULL;
5083 ei->last_trans = 0;
5084 ei->logged_trans = 0;
5085 ei->delalloc_extents = 0;
5086 ei->delalloc_reserved_extents = 0;
5087 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
5088 INIT_LIST_HEAD(&ei->i_orphan);
5089 INIT_LIST_HEAD(&ei->ordered_operations);
5090 return &ei->vfs_inode;
5091 }
5092
5093 void btrfs_destroy_inode(struct inode *inode)
5094 {
5095 struct btrfs_ordered_extent *ordered;
5096 struct btrfs_root *root = BTRFS_I(inode)->root;
5097
5098 WARN_ON(!list_empty(&inode->i_dentry));
5099 WARN_ON(inode->i_data.nrpages);
5100
5101 /*
5102 * Make sure we're properly removed from the ordered operation
5103 * lists.
5104 */
5105 smp_mb();
5106 if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
5107 spin_lock(&root->fs_info->ordered_extent_lock);
5108 list_del_init(&BTRFS_I(inode)->ordered_operations);
5109 spin_unlock(&root->fs_info->ordered_extent_lock);
5110 }
5111
5112 spin_lock(&root->list_lock);
5113 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
5114 printk(KERN_ERR "BTRFS: inode %lu: inode still on the orphan"
5115 " list\n", inode->i_ino);
5116 dump_stack();
5117 }
5118 spin_unlock(&root->list_lock);
5119
5120 while (1) {
5121 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
5122 if (!ordered)
5123 break;
5124 else {
5125 printk(KERN_ERR "btrfs found ordered "
5126 "extent %llu %llu on inode cleanup\n",
5127 (unsigned long long)ordered->file_offset,
5128 (unsigned long long)ordered->len);
5129 btrfs_remove_ordered_extent(inode, ordered);
5130 btrfs_put_ordered_extent(ordered);
5131 btrfs_put_ordered_extent(ordered);
5132 }
5133 }
5134 inode_tree_del(inode);
5135 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
5136 kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
5137 }
5138
5139 void btrfs_drop_inode(struct inode *inode)
5140 {
5141 struct btrfs_root *root = BTRFS_I(inode)->root;
5142
5143 if (inode->i_nlink > 0 && btrfs_root_refs(&root->root_item) == 0)
5144 generic_delete_inode(inode);
5145 else
5146 generic_drop_inode(inode);
5147 }
5148
5149 static void init_once(void *foo)
5150 {
5151 struct btrfs_inode *ei = (struct btrfs_inode *) foo;
5152
5153 inode_init_once(&ei->vfs_inode);
5154 }
5155
5156 void btrfs_destroy_cachep(void)
5157 {
5158 if (btrfs_inode_cachep)
5159 kmem_cache_destroy(btrfs_inode_cachep);
5160 if (btrfs_trans_handle_cachep)
5161 kmem_cache_destroy(btrfs_trans_handle_cachep);
5162 if (btrfs_transaction_cachep)
5163 kmem_cache_destroy(btrfs_transaction_cachep);
5164 if (btrfs_path_cachep)
5165 kmem_cache_destroy(btrfs_path_cachep);
5166 }
5167
5168 int btrfs_init_cachep(void)
5169 {
5170 btrfs_inode_cachep = kmem_cache_create("btrfs_inode_cache",
5171 sizeof(struct btrfs_inode), 0,
5172 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, init_once);
5173 if (!btrfs_inode_cachep)
5174 goto fail;
5175
5176 btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle_cache",
5177 sizeof(struct btrfs_trans_handle), 0,
5178 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5179 if (!btrfs_trans_handle_cachep)
5180 goto fail;
5181
5182 btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction_cache",
5183 sizeof(struct btrfs_transaction), 0,
5184 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5185 if (!btrfs_transaction_cachep)
5186 goto fail;
5187
5188 btrfs_path_cachep = kmem_cache_create("btrfs_path_cache",
5189 sizeof(struct btrfs_path), 0,
5190 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
5191 if (!btrfs_path_cachep)
5192 goto fail;
5193
5194 return 0;
5195 fail:
5196 btrfs_destroy_cachep();
5197 return -ENOMEM;
5198 }
5199
5200 static int btrfs_getattr(struct vfsmount *mnt,
5201 struct dentry *dentry, struct kstat *stat)
5202 {
5203 struct inode *inode = dentry->d_inode;
5204 generic_fillattr(inode, stat);
5205 stat->dev = BTRFS_I(inode)->root->anon_super.s_dev;
5206 stat->blksize = PAGE_CACHE_SIZE;
5207 stat->blocks = (inode_get_bytes(inode) +
5208 BTRFS_I(inode)->delalloc_bytes) >> 9;
5209 return 0;
5210 }
5211
5212 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
5213 struct inode *new_dir, struct dentry *new_dentry)
5214 {
5215 struct btrfs_trans_handle *trans;
5216 struct btrfs_root *root = BTRFS_I(old_dir)->root;
5217 struct btrfs_root *dest = BTRFS_I(new_dir)->root;
5218 struct inode *new_inode = new_dentry->d_inode;
5219 struct inode *old_inode = old_dentry->d_inode;
5220 struct timespec ctime = CURRENT_TIME;
5221 u64 index = 0;
5222 u64 root_objectid;
5223 int ret;
5224
5225 if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)
5226 return -EPERM;
5227
5228 /* we only allow rename subvolume link between subvolumes */
5229 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
5230 return -EXDEV;
5231
5232 if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID ||
5233 (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID))
5234 return -ENOTEMPTY;
5235
5236 if (S_ISDIR(old_inode->i_mode) && new_inode &&
5237 new_inode->i_size > BTRFS_EMPTY_DIR_SIZE)
5238 return -ENOTEMPTY;
5239
5240 /*
5241 * 2 items for dir items
5242 * 1 item for orphan entry
5243 * 1 item for ref
5244 */
5245 ret = btrfs_reserve_metadata_space(root, 4);
5246 if (ret)
5247 return ret;
5248
5249 /*
5250 * we're using rename to replace one file with another.
5251 * and the replacement file is large. Start IO on it now so
5252 * we don't add too much work to the end of the transaction
5253 */
5254 if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
5255 old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
5256 filemap_flush(old_inode->i_mapping);
5257
5258 /* close the racy window with snapshot create/destroy ioctl */
5259 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5260 down_read(&root->fs_info->subvol_sem);
5261
5262 trans = btrfs_start_transaction(root, 1);
5263 btrfs_set_trans_block_group(trans, new_dir);
5264
5265 if (dest != root)
5266 btrfs_record_root_in_trans(trans, dest);
5267
5268 ret = btrfs_set_inode_index(new_dir, &index);
5269 if (ret)
5270 goto out_fail;
5271
5272 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5273 /* force full log commit if subvolume involved. */
5274 root->fs_info->last_trans_log_full_commit = trans->transid;
5275 } else {
5276 ret = btrfs_insert_inode_ref(trans, dest,
5277 new_dentry->d_name.name,
5278 new_dentry->d_name.len,
5279 old_inode->i_ino,
5280 new_dir->i_ino, index);
5281 if (ret)
5282 goto out_fail;
5283 /*
5284 * this is an ugly little race, but the rename is required
5285 * to make sure that if we crash, the inode is either at the
5286 * old name or the new one. pinning the log transaction lets
5287 * us make sure we don't allow a log commit to come in after
5288 * we unlink the name but before we add the new name back in.
5289 */
5290 btrfs_pin_log_trans(root);
5291 }
5292 /*
5293 * make sure the inode gets flushed if it is replacing
5294 * something.
5295 */
5296 if (new_inode && new_inode->i_size &&
5297 old_inode && S_ISREG(old_inode->i_mode)) {
5298 btrfs_add_ordered_operation(trans, root, old_inode);
5299 }
5300
5301 old_dir->i_ctime = old_dir->i_mtime = ctime;
5302 new_dir->i_ctime = new_dir->i_mtime = ctime;
5303 old_inode->i_ctime = ctime;
5304
5305 if (old_dentry->d_parent != new_dentry->d_parent)
5306 btrfs_record_unlink_dir(trans, old_dir, old_inode, 1);
5307
5308 if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) {
5309 root_objectid = BTRFS_I(old_inode)->root->root_key.objectid;
5310 ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid,
5311 old_dentry->d_name.name,
5312 old_dentry->d_name.len);
5313 } else {
5314 btrfs_inc_nlink(old_dentry->d_inode);
5315 ret = btrfs_unlink_inode(trans, root, old_dir,
5316 old_dentry->d_inode,
5317 old_dentry->d_name.name,
5318 old_dentry->d_name.len);
5319 }
5320 BUG_ON(ret);
5321
5322 if (new_inode) {
5323 new_inode->i_ctime = CURRENT_TIME;
5324 if (unlikely(new_inode->i_ino ==
5325 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
5326 root_objectid = BTRFS_I(new_inode)->location.objectid;
5327 ret = btrfs_unlink_subvol(trans, dest, new_dir,
5328 root_objectid,
5329 new_dentry->d_name.name,
5330 new_dentry->d_name.len);
5331 BUG_ON(new_inode->i_nlink == 0);
5332 } else {
5333 ret = btrfs_unlink_inode(trans, dest, new_dir,
5334 new_dentry->d_inode,
5335 new_dentry->d_name.name,
5336 new_dentry->d_name.len);
5337 }
5338 BUG_ON(ret);
5339 if (new_inode->i_nlink == 0) {
5340 ret = btrfs_orphan_add(trans, new_dentry->d_inode);
5341 BUG_ON(ret);
5342 }
5343 }
5344
5345 ret = btrfs_add_link(trans, new_dir, old_inode,
5346 new_dentry->d_name.name,
5347 new_dentry->d_name.len, 0, index);
5348 BUG_ON(ret);
5349
5350 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
5351 btrfs_log_new_name(trans, old_inode, old_dir,
5352 new_dentry->d_parent);
5353 btrfs_end_log_trans(root);
5354 }
5355 out_fail:
5356 btrfs_end_transaction_throttle(trans, root);
5357
5358 if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)
5359 up_read(&root->fs_info->subvol_sem);
5360
5361 btrfs_unreserve_metadata_space(root, 4);
5362 return ret;
5363 }
5364
5365 /*
5366 * some fairly slow code that needs optimization. This walks the list
5367 * of all the inodes with pending delalloc and forces them to disk.
5368 */
5369 int btrfs_start_delalloc_inodes(struct btrfs_root *root)
5370 {
5371 struct list_head *head = &root->fs_info->delalloc_inodes;
5372 struct btrfs_inode *binode;
5373 struct inode *inode;
5374
5375 if (root->fs_info->sb->s_flags & MS_RDONLY)
5376 return -EROFS;
5377
5378 spin_lock(&root->fs_info->delalloc_lock);
5379 while (!list_empty(head)) {
5380 binode = list_entry(head->next, struct btrfs_inode,
5381 delalloc_inodes);
5382 inode = igrab(&binode->vfs_inode);
5383 if (!inode)
5384 list_del_init(&binode->delalloc_inodes);
5385 spin_unlock(&root->fs_info->delalloc_lock);
5386 if (inode) {
5387 filemap_flush(inode->i_mapping);
5388 iput(inode);
5389 }
5390 cond_resched();
5391 spin_lock(&root->fs_info->delalloc_lock);
5392 }
5393 spin_unlock(&root->fs_info->delalloc_lock);
5394
5395 /* the filemap_flush will queue IO into the worker threads, but
5396 * we have to make sure the IO is actually started and that
5397 * ordered extents get created before we return
5398 */
5399 atomic_inc(&root->fs_info->async_submit_draining);
5400 while (atomic_read(&root->fs_info->nr_async_submits) ||
5401 atomic_read(&root->fs_info->async_delalloc_pages)) {
5402 wait_event(root->fs_info->async_submit_wait,
5403 (atomic_read(&root->fs_info->nr_async_submits) == 0 &&
5404 atomic_read(&root->fs_info->async_delalloc_pages) == 0));
5405 }
5406 atomic_dec(&root->fs_info->async_submit_draining);
5407 return 0;
5408 }
5409
5410 static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
5411 const char *symname)
5412 {
5413 struct btrfs_trans_handle *trans;
5414 struct btrfs_root *root = BTRFS_I(dir)->root;
5415 struct btrfs_path *path;
5416 struct btrfs_key key;
5417 struct inode *inode = NULL;
5418 int err;
5419 int drop_inode = 0;
5420 u64 objectid;
5421 u64 index = 0 ;
5422 int name_len;
5423 int datasize;
5424 unsigned long ptr;
5425 struct btrfs_file_extent_item *ei;
5426 struct extent_buffer *leaf;
5427 unsigned long nr = 0;
5428
5429 name_len = strlen(symname) + 1;
5430 if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root))
5431 return -ENAMETOOLONG;
5432
5433 /*
5434 * 2 items for inode item and ref
5435 * 2 items for dir items
5436 * 1 item for xattr if selinux is on
5437 */
5438 err = btrfs_reserve_metadata_space(root, 5);
5439 if (err)
5440 return err;
5441
5442 trans = btrfs_start_transaction(root, 1);
5443 if (!trans)
5444 goto out_fail;
5445 btrfs_set_trans_block_group(trans, dir);
5446
5447 err = btrfs_find_free_objectid(trans, root, dir->i_ino, &objectid);
5448 if (err) {
5449 err = -ENOSPC;
5450 goto out_unlock;
5451 }
5452
5453 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
5454 dentry->d_name.len,
5455 dentry->d_parent->d_inode->i_ino, objectid,
5456 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
5457 &index);
5458 err = PTR_ERR(inode);
5459 if (IS_ERR(inode))
5460 goto out_unlock;
5461
5462 err = btrfs_init_inode_security(inode, dir);
5463 if (err) {
5464 drop_inode = 1;
5465 goto out_unlock;
5466 }
5467
5468 btrfs_set_trans_block_group(trans, inode);
5469 err = btrfs_add_nondir(trans, dentry, inode, 0, index);
5470 if (err)
5471 drop_inode = 1;
5472 else {
5473 inode->i_mapping->a_ops = &btrfs_aops;
5474 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5475 inode->i_fop = &btrfs_file_operations;
5476 inode->i_op = &btrfs_file_inode_operations;
5477 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
5478 }
5479 btrfs_update_inode_block_group(trans, inode);
5480 btrfs_update_inode_block_group(trans, dir);
5481 if (drop_inode)
5482 goto out_unlock;
5483
5484 path = btrfs_alloc_path();
5485 BUG_ON(!path);
5486 key.objectid = inode->i_ino;
5487 key.offset = 0;
5488 btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY);
5489 datasize = btrfs_file_extent_calc_inline_size(name_len);
5490 err = btrfs_insert_empty_item(trans, root, path, &key,
5491 datasize);
5492 if (err) {
5493 drop_inode = 1;
5494 goto out_unlock;
5495 }
5496 leaf = path->nodes[0];
5497 ei = btrfs_item_ptr(leaf, path->slots[0],
5498 struct btrfs_file_extent_item);
5499 btrfs_set_file_extent_generation(leaf, ei, trans->transid);
5500 btrfs_set_file_extent_type(leaf, ei,
5501 BTRFS_FILE_EXTENT_INLINE);
5502 btrfs_set_file_extent_encryption(leaf, ei, 0);
5503 btrfs_set_file_extent_compression(leaf, ei, 0);
5504 btrfs_set_file_extent_other_encoding(leaf, ei, 0);
5505 btrfs_set_file_extent_ram_bytes(leaf, ei, name_len);
5506
5507 ptr = btrfs_file_extent_inline_start(ei);
5508 write_extent_buffer(leaf, symname, ptr, name_len);
5509 btrfs_mark_buffer_dirty(leaf);
5510 btrfs_free_path(path);
5511
5512 inode->i_op = &btrfs_symlink_inode_operations;
5513 inode->i_mapping->a_ops = &btrfs_symlink_aops;
5514 inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
5515 inode_set_bytes(inode, name_len);
5516 btrfs_i_size_write(inode, name_len - 1);
5517 err = btrfs_update_inode(trans, root, inode);
5518 if (err)
5519 drop_inode = 1;
5520
5521 out_unlock:
5522 nr = trans->blocks_used;
5523 btrfs_end_transaction_throttle(trans, root);
5524 out_fail:
5525 btrfs_unreserve_metadata_space(root, 5);
5526 if (drop_inode) {
5527 inode_dec_link_count(inode);
5528 iput(inode);
5529 }
5530 btrfs_btree_balance_dirty(root, nr);
5531 return err;
5532 }
5533
5534 static int prealloc_file_range(struct btrfs_trans_handle *trans,
5535 struct inode *inode, u64 start, u64 end,
5536 u64 locked_end, u64 alloc_hint, int mode)
5537 {
5538 struct btrfs_root *root = BTRFS_I(inode)->root;
5539 struct btrfs_key ins;
5540 u64 alloc_size;
5541 u64 cur_offset = start;
5542 u64 num_bytes = end - start;
5543 int ret = 0;
5544
5545 while (num_bytes > 0) {
5546 alloc_size = min(num_bytes, root->fs_info->max_extent);
5547
5548 ret = btrfs_reserve_metadata_space(root, 1);
5549 if (ret)
5550 goto out;
5551
5552 ret = btrfs_reserve_extent(trans, root, alloc_size,
5553 root->sectorsize, 0, alloc_hint,
5554 (u64)-1, &ins, 1);
5555 if (ret) {
5556 WARN_ON(1);
5557 goto out;
5558 }
5559 ret = insert_reserved_file_extent(trans, inode,
5560 cur_offset, ins.objectid,
5561 ins.offset, ins.offset,
5562 ins.offset, locked_end,
5563 0, 0, 0,
5564 BTRFS_FILE_EXTENT_PREALLOC);
5565 BUG_ON(ret);
5566 btrfs_drop_extent_cache(inode, cur_offset,
5567 cur_offset + ins.offset -1, 0);
5568 num_bytes -= ins.offset;
5569 cur_offset += ins.offset;
5570 alloc_hint = ins.objectid + ins.offset;
5571 btrfs_unreserve_metadata_space(root, 1);
5572 }
5573 out:
5574 if (cur_offset > start) {
5575 inode->i_ctime = CURRENT_TIME;
5576 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
5577 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
5578 cur_offset > i_size_read(inode))
5579 btrfs_i_size_write(inode, cur_offset);
5580 ret = btrfs_update_inode(trans, root, inode);
5581 BUG_ON(ret);
5582 }
5583
5584 return ret;
5585 }
5586
5587 static long btrfs_fallocate(struct inode *inode, int mode,
5588 loff_t offset, loff_t len)
5589 {
5590 u64 cur_offset;
5591 u64 last_byte;
5592 u64 alloc_start;
5593 u64 alloc_end;
5594 u64 alloc_hint = 0;
5595 u64 locked_end;
5596 u64 mask = BTRFS_I(inode)->root->sectorsize - 1;
5597 struct extent_map *em;
5598 struct btrfs_trans_handle *trans;
5599 struct btrfs_root *root;
5600 int ret;
5601
5602 alloc_start = offset & ~mask;
5603 alloc_end = (offset + len + mask) & ~mask;
5604
5605 /*
5606 * wait for ordered IO before we have any locks. We'll loop again
5607 * below with the locks held.
5608 */
5609 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
5610
5611 mutex_lock(&inode->i_mutex);
5612 if (alloc_start > inode->i_size) {
5613 ret = btrfs_cont_expand(inode, alloc_start);
5614 if (ret)
5615 goto out;
5616 }
5617
5618 root = BTRFS_I(inode)->root;
5619
5620 ret = btrfs_check_data_free_space(root, inode,
5621 alloc_end - alloc_start);
5622 if (ret)
5623 goto out;
5624
5625 locked_end = alloc_end - 1;
5626 while (1) {
5627 struct btrfs_ordered_extent *ordered;
5628
5629 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
5630 if (!trans) {
5631 ret = -EIO;
5632 goto out_free;
5633 }
5634
5635 /* the extent lock is ordered inside the running
5636 * transaction
5637 */
5638 lock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5639 GFP_NOFS);
5640 ordered = btrfs_lookup_first_ordered_extent(inode,
5641 alloc_end - 1);
5642 if (ordered &&
5643 ordered->file_offset + ordered->len > alloc_start &&
5644 ordered->file_offset < alloc_end) {
5645 btrfs_put_ordered_extent(ordered);
5646 unlock_extent(&BTRFS_I(inode)->io_tree,
5647 alloc_start, locked_end, GFP_NOFS);
5648 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5649
5650 /*
5651 * we can't wait on the range with the transaction
5652 * running or with the extent lock held
5653 */
5654 btrfs_wait_ordered_range(inode, alloc_start,
5655 alloc_end - alloc_start);
5656 } else {
5657 if (ordered)
5658 btrfs_put_ordered_extent(ordered);
5659 break;
5660 }
5661 }
5662
5663 cur_offset = alloc_start;
5664 while (1) {
5665 em = btrfs_get_extent(inode, NULL, 0, cur_offset,
5666 alloc_end - cur_offset, 0);
5667 BUG_ON(IS_ERR(em) || !em);
5668 last_byte = min(extent_map_end(em), alloc_end);
5669 last_byte = (last_byte + mask) & ~mask;
5670 if (em->block_start == EXTENT_MAP_HOLE) {
5671 ret = prealloc_file_range(trans, inode, cur_offset,
5672 last_byte, locked_end + 1,
5673 alloc_hint, mode);
5674 if (ret < 0) {
5675 free_extent_map(em);
5676 break;
5677 }
5678 }
5679 if (em->block_start <= EXTENT_MAP_LAST_BYTE)
5680 alloc_hint = em->block_start;
5681 free_extent_map(em);
5682
5683 cur_offset = last_byte;
5684 if (cur_offset >= alloc_end) {
5685 ret = 0;
5686 break;
5687 }
5688 }
5689 unlock_extent(&BTRFS_I(inode)->io_tree, alloc_start, locked_end,
5690 GFP_NOFS);
5691
5692 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
5693 out_free:
5694 btrfs_free_reserved_data_space(root, inode, alloc_end - alloc_start);
5695 out:
5696 mutex_unlock(&inode->i_mutex);
5697 return ret;
5698 }
5699
5700 static int btrfs_set_page_dirty(struct page *page)
5701 {
5702 return __set_page_dirty_nobuffers(page);
5703 }
5704
5705 static int btrfs_permission(struct inode *inode, int mask)
5706 {
5707 if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE))
5708 return -EACCES;
5709 return generic_permission(inode, mask, btrfs_check_acl);
5710 }
5711
5712 static struct inode_operations btrfs_dir_inode_operations = {
5713 .getattr = btrfs_getattr,
5714 .lookup = btrfs_lookup,
5715 .create = btrfs_create,
5716 .unlink = btrfs_unlink,
5717 .link = btrfs_link,
5718 .mkdir = btrfs_mkdir,
5719 .rmdir = btrfs_rmdir,
5720 .rename = btrfs_rename,
5721 .symlink = btrfs_symlink,
5722 .setattr = btrfs_setattr,
5723 .mknod = btrfs_mknod,
5724 .setxattr = btrfs_setxattr,
5725 .getxattr = btrfs_getxattr,
5726 .listxattr = btrfs_listxattr,
5727 .removexattr = btrfs_removexattr,
5728 .permission = btrfs_permission,
5729 };
5730 static struct inode_operations btrfs_dir_ro_inode_operations = {
5731 .lookup = btrfs_lookup,
5732 .permission = btrfs_permission,
5733 };
5734
5735 static struct file_operations btrfs_dir_file_operations = {
5736 .llseek = generic_file_llseek,
5737 .read = generic_read_dir,
5738 .readdir = btrfs_real_readdir,
5739 .unlocked_ioctl = btrfs_ioctl,
5740 #ifdef CONFIG_COMPAT
5741 .compat_ioctl = btrfs_ioctl,
5742 #endif
5743 .release = btrfs_release_file,
5744 .fsync = btrfs_sync_file,
5745 };
5746
5747 static struct extent_io_ops btrfs_extent_io_ops = {
5748 .fill_delalloc = run_delalloc_range,
5749 .submit_bio_hook = btrfs_submit_bio_hook,
5750 .merge_bio_hook = btrfs_merge_bio_hook,
5751 .readpage_end_io_hook = btrfs_readpage_end_io_hook,
5752 .writepage_end_io_hook = btrfs_writepage_end_io_hook,
5753 .writepage_start_hook = btrfs_writepage_start_hook,
5754 .readpage_io_failed_hook = btrfs_io_failed_hook,
5755 .set_bit_hook = btrfs_set_bit_hook,
5756 .clear_bit_hook = btrfs_clear_bit_hook,
5757 .merge_extent_hook = btrfs_merge_extent_hook,
5758 .split_extent_hook = btrfs_split_extent_hook,
5759 };
5760
5761 /*
5762 * btrfs doesn't support the bmap operation because swapfiles
5763 * use bmap to make a mapping of extents in the file. They assume
5764 * these extents won't change over the life of the file and they
5765 * use the bmap result to do IO directly to the drive.
5766 *
5767 * the btrfs bmap call would return logical addresses that aren't
5768 * suitable for IO and they also will change frequently as COW
5769 * operations happen. So, swapfile + btrfs == corruption.
5770 *
5771 * For now we're avoiding this by dropping bmap.
5772 */
5773 static struct address_space_operations btrfs_aops = {
5774 .readpage = btrfs_readpage,
5775 .writepage = btrfs_writepage,
5776 .writepages = btrfs_writepages,
5777 .readpages = btrfs_readpages,
5778 .sync_page = block_sync_page,
5779 .direct_IO = btrfs_direct_IO,
5780 .invalidatepage = btrfs_invalidatepage,
5781 .releasepage = btrfs_releasepage,
5782 .set_page_dirty = btrfs_set_page_dirty,
5783 };
5784
5785 static struct address_space_operations btrfs_symlink_aops = {
5786 .readpage = btrfs_readpage,
5787 .writepage = btrfs_writepage,
5788 .invalidatepage = btrfs_invalidatepage,
5789 .releasepage = btrfs_releasepage,
5790 };
5791
5792 static struct inode_operations btrfs_file_inode_operations = {
5793 .truncate = btrfs_truncate,
5794 .getattr = btrfs_getattr,
5795 .setattr = btrfs_setattr,
5796 .setxattr = btrfs_setxattr,
5797 .getxattr = btrfs_getxattr,
5798 .listxattr = btrfs_listxattr,
5799 .removexattr = btrfs_removexattr,
5800 .permission = btrfs_permission,
5801 .fallocate = btrfs_fallocate,
5802 .fiemap = btrfs_fiemap,
5803 };
5804 static struct inode_operations btrfs_special_inode_operations = {
5805 .getattr = btrfs_getattr,
5806 .setattr = btrfs_setattr,
5807 .permission = btrfs_permission,
5808 .setxattr = btrfs_setxattr,
5809 .getxattr = btrfs_getxattr,
5810 .listxattr = btrfs_listxattr,
5811 .removexattr = btrfs_removexattr,
5812 };
5813 static struct inode_operations btrfs_symlink_inode_operations = {
5814 .readlink = generic_readlink,
5815 .follow_link = page_follow_link_light,
5816 .put_link = page_put_link,
5817 .permission = btrfs_permission,
5818 .setxattr = btrfs_setxattr,
5819 .getxattr = btrfs_getxattr,
5820 .listxattr = btrfs_listxattr,
5821 .removexattr = btrfs_removexattr,
5822 };
5823
5824 struct dentry_operations btrfs_dentry_operations = {
5825 .d_delete = btrfs_dentry_delete,
5826 };
This page took 0.145969 seconds and 4 git commands to generate.