2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
44 static struct kmem_cache
*btrfs_inode_defrag_cachep
;
46 * when auto defrag is enabled we
47 * queue up these defrag structs to remember which
48 * inodes need defragging passes
51 struct rb_node rb_node
;
55 * transid where the defrag was added, we search for
56 * extents newer than this
63 /* last offset we were able to defrag */
66 /* if we've wrapped around back to zero once already */
70 static int __compare_inode_defrag(struct inode_defrag
*defrag1
,
71 struct inode_defrag
*defrag2
)
73 if (defrag1
->root
> defrag2
->root
)
75 else if (defrag1
->root
< defrag2
->root
)
77 else if (defrag1
->ino
> defrag2
->ino
)
79 else if (defrag1
->ino
< defrag2
->ino
)
85 /* pop a record for an inode into the defrag tree. The lock
86 * must be held already
88 * If you're inserting a record for an older transid than an
89 * existing record, the transid already in the tree is lowered
91 * If an existing record is found the defrag item you
94 static int __btrfs_add_inode_defrag(struct inode
*inode
,
95 struct inode_defrag
*defrag
)
97 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
98 struct inode_defrag
*entry
;
100 struct rb_node
*parent
= NULL
;
103 p
= &root
->fs_info
->defrag_inodes
.rb_node
;
106 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
108 ret
= __compare_inode_defrag(defrag
, entry
);
110 p
= &parent
->rb_left
;
112 p
= &parent
->rb_right
;
114 /* if we're reinserting an entry for
115 * an old defrag run, make sure to
116 * lower the transid of our existing record
118 if (defrag
->transid
< entry
->transid
)
119 entry
->transid
= defrag
->transid
;
120 if (defrag
->last_offset
> entry
->last_offset
)
121 entry
->last_offset
= defrag
->last_offset
;
125 set_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
126 rb_link_node(&defrag
->rb_node
, parent
, p
);
127 rb_insert_color(&defrag
->rb_node
, &root
->fs_info
->defrag_inodes
);
131 static inline int __need_auto_defrag(struct btrfs_root
*root
)
133 if (!btrfs_test_opt(root
, AUTO_DEFRAG
))
136 if (btrfs_fs_closing(root
->fs_info
))
143 * insert a defrag record for this inode if auto defrag is
146 int btrfs_add_inode_defrag(struct btrfs_trans_handle
*trans
,
149 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
150 struct inode_defrag
*defrag
;
154 if (!__need_auto_defrag(root
))
157 if (test_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
))
161 transid
= trans
->transid
;
163 transid
= BTRFS_I(inode
)->root
->last_trans
;
165 defrag
= kmem_cache_zalloc(btrfs_inode_defrag_cachep
, GFP_NOFS
);
169 defrag
->ino
= btrfs_ino(inode
);
170 defrag
->transid
= transid
;
171 defrag
->root
= root
->root_key
.objectid
;
173 spin_lock(&root
->fs_info
->defrag_inodes_lock
);
174 if (!test_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
)) {
176 * If we set IN_DEFRAG flag and evict the inode from memory,
177 * and then re-read this inode, this new inode doesn't have
178 * IN_DEFRAG flag. At the case, we may find the existed defrag.
180 ret
= __btrfs_add_inode_defrag(inode
, defrag
);
182 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
184 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
186 spin_unlock(&root
->fs_info
->defrag_inodes_lock
);
191 * Requeue the defrag object. If there is a defrag object that points to
192 * the same inode in the tree, we will merge them together (by
193 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
195 void btrfs_requeue_inode_defrag(struct inode
*inode
,
196 struct inode_defrag
*defrag
)
198 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
201 if (!__need_auto_defrag(root
))
205 * Here we don't check the IN_DEFRAG flag, because we need merge
208 spin_lock(&root
->fs_info
->defrag_inodes_lock
);
209 ret
= __btrfs_add_inode_defrag(inode
, defrag
);
210 spin_unlock(&root
->fs_info
->defrag_inodes_lock
);
215 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
219 * pick the defragable inode that we want, if it doesn't exist, we will get
222 static struct inode_defrag
*
223 btrfs_pick_defrag_inode(struct btrfs_fs_info
*fs_info
, u64 root
, u64 ino
)
225 struct inode_defrag
*entry
= NULL
;
226 struct inode_defrag tmp
;
228 struct rb_node
*parent
= NULL
;
234 spin_lock(&fs_info
->defrag_inodes_lock
);
235 p
= fs_info
->defrag_inodes
.rb_node
;
238 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
240 ret
= __compare_inode_defrag(&tmp
, entry
);
244 p
= parent
->rb_right
;
249 if (parent
&& __compare_inode_defrag(&tmp
, entry
) > 0) {
250 parent
= rb_next(parent
);
252 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
258 rb_erase(parent
, &fs_info
->defrag_inodes
);
259 spin_unlock(&fs_info
->defrag_inodes_lock
);
263 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info
*fs_info
)
265 struct inode_defrag
*defrag
;
266 struct rb_node
*node
;
268 spin_lock(&fs_info
->defrag_inodes_lock
);
269 node
= rb_first(&fs_info
->defrag_inodes
);
271 rb_erase(node
, &fs_info
->defrag_inodes
);
272 defrag
= rb_entry(node
, struct inode_defrag
, rb_node
);
273 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
275 if (need_resched()) {
276 spin_unlock(&fs_info
->defrag_inodes_lock
);
278 spin_lock(&fs_info
->defrag_inodes_lock
);
281 node
= rb_first(&fs_info
->defrag_inodes
);
283 spin_unlock(&fs_info
->defrag_inodes_lock
);
286 #define BTRFS_DEFRAG_BATCH 1024
288 static int __btrfs_run_defrag_inode(struct btrfs_fs_info
*fs_info
,
289 struct inode_defrag
*defrag
)
291 struct btrfs_root
*inode_root
;
293 struct btrfs_key key
;
294 struct btrfs_ioctl_defrag_range_args range
;
298 key
.objectid
= defrag
->root
;
299 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
300 key
.offset
= (u64
)-1;
301 inode_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
302 if (IS_ERR(inode_root
)) {
303 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
304 return PTR_ERR(inode_root
);
307 key
.objectid
= defrag
->ino
;
308 btrfs_set_key_type(&key
, BTRFS_INODE_ITEM_KEY
);
310 inode
= btrfs_iget(fs_info
->sb
, &key
, inode_root
, NULL
);
312 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
313 return PTR_ERR(inode
);
316 /* do a chunk of defrag */
317 clear_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
318 memset(&range
, 0, sizeof(range
));
320 range
.start
= defrag
->last_offset
;
322 sb_start_write(fs_info
->sb
);
323 num_defrag
= btrfs_defrag_file(inode
, NULL
, &range
, defrag
->transid
,
325 sb_end_write(fs_info
->sb
);
327 * if we filled the whole defrag batch, there
328 * must be more work to do. Queue this defrag
331 if (num_defrag
== BTRFS_DEFRAG_BATCH
) {
332 defrag
->last_offset
= range
.start
;
333 btrfs_requeue_inode_defrag(inode
, defrag
);
334 } else if (defrag
->last_offset
&& !defrag
->cycled
) {
336 * we didn't fill our defrag batch, but
337 * we didn't start at zero. Make sure we loop
338 * around to the start of the file.
340 defrag
->last_offset
= 0;
342 btrfs_requeue_inode_defrag(inode
, defrag
);
344 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
352 * run through the list of inodes in the FS that need
355 int btrfs_run_defrag_inodes(struct btrfs_fs_info
*fs_info
)
357 struct inode_defrag
*defrag
;
359 u64 root_objectid
= 0;
361 atomic_inc(&fs_info
->defrag_running
);
363 if (!__need_auto_defrag(fs_info
->tree_root
))
366 /* find an inode to defrag */
367 defrag
= btrfs_pick_defrag_inode(fs_info
, root_objectid
,
370 if (root_objectid
|| first_ino
) {
379 first_ino
= defrag
->ino
+ 1;
380 root_objectid
= defrag
->root
;
382 __btrfs_run_defrag_inode(fs_info
, defrag
);
384 atomic_dec(&fs_info
->defrag_running
);
387 * during unmount, we use the transaction_wait queue to
388 * wait for the defragger to stop
390 wake_up(&fs_info
->transaction_wait
);
394 /* simple helper to fault in pages and copy. This should go away
395 * and be replaced with calls into generic code.
397 static noinline
int btrfs_copy_from_user(loff_t pos
, int num_pages
,
399 struct page
**prepared_pages
,
403 size_t total_copied
= 0;
405 int offset
= pos
& (PAGE_CACHE_SIZE
- 1);
407 while (write_bytes
> 0) {
408 size_t count
= min_t(size_t,
409 PAGE_CACHE_SIZE
- offset
, write_bytes
);
410 struct page
*page
= prepared_pages
[pg
];
412 * Copy data from userspace to the current page
414 * Disable pagefault to avoid recursive lock since
415 * the pages are already locked
418 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, count
);
421 /* Flush processor's dcache for this page */
422 flush_dcache_page(page
);
425 * if we get a partial write, we can end up with
426 * partially up to date pages. These add
427 * a lot of complexity, so make sure they don't
428 * happen by forcing this copy to be retried.
430 * The rest of the btrfs_file_write code will fall
431 * back to page at a time copies after we return 0.
433 if (!PageUptodate(page
) && copied
< count
)
436 iov_iter_advance(i
, copied
);
437 write_bytes
-= copied
;
438 total_copied
+= copied
;
440 /* Return to btrfs_file_aio_write to fault page */
441 if (unlikely(copied
== 0))
444 if (unlikely(copied
< PAGE_CACHE_SIZE
- offset
)) {
455 * unlocks pages after btrfs_file_write is done with them
457 void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
460 for (i
= 0; i
< num_pages
; i
++) {
461 /* page checked is some magic around finding pages that
462 * have been modified without going through btrfs_set_page_dirty
465 ClearPageChecked(pages
[i
]);
466 unlock_page(pages
[i
]);
467 mark_page_accessed(pages
[i
]);
468 page_cache_release(pages
[i
]);
473 * after copy_from_user, pages need to be dirtied and we need to make
474 * sure holes are created between the current EOF and the start of
475 * any next extents (if required).
477 * this also makes the decision about creating an inline extent vs
478 * doing real data extents, marking pages dirty and delalloc as required.
480 int btrfs_dirty_pages(struct btrfs_root
*root
, struct inode
*inode
,
481 struct page
**pages
, size_t num_pages
,
482 loff_t pos
, size_t write_bytes
,
483 struct extent_state
**cached
)
489 u64 end_of_last_block
;
490 u64 end_pos
= pos
+ write_bytes
;
491 loff_t isize
= i_size_read(inode
);
493 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
494 num_bytes
= (write_bytes
+ pos
- start_pos
+
495 root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
497 end_of_last_block
= start_pos
+ num_bytes
- 1;
498 err
= btrfs_set_extent_delalloc(inode
, start_pos
, end_of_last_block
,
503 for (i
= 0; i
< num_pages
; i
++) {
504 struct page
*p
= pages
[i
];
511 * we've only changed i_size in ram, and we haven't updated
512 * the disk i_size. There is no need to log the inode
516 i_size_write(inode
, end_pos
);
521 * this drops all the extents in the cache that intersect the range
522 * [start, end]. Existing extents are split as required.
524 void btrfs_drop_extent_cache(struct inode
*inode
, u64 start
, u64 end
,
527 struct extent_map
*em
;
528 struct extent_map
*split
= NULL
;
529 struct extent_map
*split2
= NULL
;
530 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
531 u64 len
= end
- start
+ 1;
538 WARN_ON(end
< start
);
539 if (end
== (u64
)-1) {
547 split
= alloc_extent_map();
549 split2
= alloc_extent_map();
550 if (!split
|| !split2
)
553 write_lock(&em_tree
->lock
);
554 em
= lookup_extent_mapping(em_tree
, start
, len
);
556 write_unlock(&em_tree
->lock
);
560 gen
= em
->generation
;
561 if (skip_pinned
&& test_bit(EXTENT_FLAG_PINNED
, &em
->flags
)) {
562 if (testend
&& em
->start
+ em
->len
>= start
+ len
) {
564 write_unlock(&em_tree
->lock
);
567 start
= em
->start
+ em
->len
;
569 len
= start
+ len
- (em
->start
+ em
->len
);
571 write_unlock(&em_tree
->lock
);
574 compressed
= test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
575 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
576 remove_extent_mapping(em_tree
, em
);
580 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
582 split
->start
= em
->start
;
583 split
->len
= start
- em
->start
;
584 split
->orig_start
= em
->orig_start
;
585 split
->block_start
= em
->block_start
;
588 split
->block_len
= em
->block_len
;
590 split
->block_len
= split
->len
;
591 split
->generation
= gen
;
592 split
->bdev
= em
->bdev
;
593 split
->flags
= flags
;
594 split
->compress_type
= em
->compress_type
;
595 ret
= add_extent_mapping(em_tree
, split
);
596 BUG_ON(ret
); /* Logic error */
597 list_move(&split
->list
, &em_tree
->modified_extents
);
598 free_extent_map(split
);
602 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
603 testend
&& em
->start
+ em
->len
> start
+ len
) {
604 u64 diff
= start
+ len
- em
->start
;
606 split
->start
= start
+ len
;
607 split
->len
= em
->start
+ em
->len
- (start
+ len
);
608 split
->bdev
= em
->bdev
;
609 split
->flags
= flags
;
610 split
->compress_type
= em
->compress_type
;
611 split
->generation
= gen
;
614 split
->block_len
= em
->block_len
;
615 split
->block_start
= em
->block_start
;
616 split
->orig_start
= em
->orig_start
;
618 split
->block_len
= split
->len
;
619 split
->block_start
= em
->block_start
+ diff
;
620 split
->orig_start
= split
->start
;
623 ret
= add_extent_mapping(em_tree
, split
);
624 BUG_ON(ret
); /* Logic error */
625 list_move(&split
->list
, &em_tree
->modified_extents
);
626 free_extent_map(split
);
630 write_unlock(&em_tree
->lock
);
634 /* once for the tree*/
638 free_extent_map(split
);
640 free_extent_map(split2
);
644 * this is very complex, but the basic idea is to drop all extents
645 * in the range start - end. hint_block is filled in with a block number
646 * that would be a good hint to the block allocator for this file.
648 * If an extent intersects the range but is not entirely inside the range
649 * it is either truncated or split. Anything entirely inside the range
650 * is deleted from the tree.
652 int __btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
653 struct btrfs_root
*root
, struct inode
*inode
,
654 struct btrfs_path
*path
, u64 start
, u64 end
,
655 u64
*drop_end
, int drop_cache
)
657 struct extent_buffer
*leaf
;
658 struct btrfs_file_extent_item
*fi
;
659 struct btrfs_key key
;
660 struct btrfs_key new_key
;
661 u64 ino
= btrfs_ino(inode
);
662 u64 search_start
= start
;
665 u64 extent_offset
= 0;
672 int modify_tree
= -1;
673 int update_refs
= (root
->ref_cows
|| root
== root
->fs_info
->tree_root
);
677 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
679 if (start
>= BTRFS_I(inode
)->disk_i_size
)
684 ret
= btrfs_lookup_file_extent(trans
, root
, path
, ino
,
685 search_start
, modify_tree
);
688 if (ret
> 0 && path
->slots
[0] > 0 && search_start
== start
) {
689 leaf
= path
->nodes
[0];
690 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
691 if (key
.objectid
== ino
&&
692 key
.type
== BTRFS_EXTENT_DATA_KEY
)
697 leaf
= path
->nodes
[0];
698 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
700 ret
= btrfs_next_leaf(root
, path
);
707 leaf
= path
->nodes
[0];
711 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
712 if (key
.objectid
> ino
||
713 key
.type
> BTRFS_EXTENT_DATA_KEY
|| key
.offset
>= end
)
716 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
717 struct btrfs_file_extent_item
);
718 extent_type
= btrfs_file_extent_type(leaf
, fi
);
720 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
721 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
722 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
723 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
724 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
725 extent_end
= key
.offset
+
726 btrfs_file_extent_num_bytes(leaf
, fi
);
727 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
728 extent_end
= key
.offset
+
729 btrfs_file_extent_inline_len(leaf
, fi
);
732 extent_end
= search_start
;
735 if (extent_end
<= search_start
) {
741 search_start
= max(key
.offset
, start
);
742 if (recow
|| !modify_tree
) {
744 btrfs_release_path(path
);
749 * | - range to drop - |
750 * | -------- extent -------- |
752 if (start
> key
.offset
&& end
< extent_end
) {
754 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
756 memcpy(&new_key
, &key
, sizeof(new_key
));
757 new_key
.offset
= start
;
758 ret
= btrfs_duplicate_item(trans
, root
, path
,
760 if (ret
== -EAGAIN
) {
761 btrfs_release_path(path
);
767 leaf
= path
->nodes
[0];
768 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
769 struct btrfs_file_extent_item
);
770 btrfs_set_file_extent_num_bytes(leaf
, fi
,
773 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
774 struct btrfs_file_extent_item
);
776 extent_offset
+= start
- key
.offset
;
777 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
778 btrfs_set_file_extent_num_bytes(leaf
, fi
,
780 btrfs_mark_buffer_dirty(leaf
);
782 if (update_refs
&& disk_bytenr
> 0) {
783 ret
= btrfs_inc_extent_ref(trans
, root
,
784 disk_bytenr
, num_bytes
, 0,
785 root
->root_key
.objectid
,
787 start
- extent_offset
, 0);
788 BUG_ON(ret
); /* -ENOMEM */
793 * | ---- range to drop ----- |
794 * | -------- extent -------- |
796 if (start
<= key
.offset
&& end
< extent_end
) {
797 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
799 memcpy(&new_key
, &key
, sizeof(new_key
));
800 new_key
.offset
= end
;
801 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
803 extent_offset
+= end
- key
.offset
;
804 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
805 btrfs_set_file_extent_num_bytes(leaf
, fi
,
807 btrfs_mark_buffer_dirty(leaf
);
808 if (update_refs
&& disk_bytenr
> 0)
809 inode_sub_bytes(inode
, end
- key
.offset
);
813 search_start
= extent_end
;
815 * | ---- range to drop ----- |
816 * | -------- extent -------- |
818 if (start
> key
.offset
&& end
>= extent_end
) {
820 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
822 btrfs_set_file_extent_num_bytes(leaf
, fi
,
824 btrfs_mark_buffer_dirty(leaf
);
825 if (update_refs
&& disk_bytenr
> 0)
826 inode_sub_bytes(inode
, extent_end
- start
);
827 if (end
== extent_end
)
835 * | ---- range to drop ----- |
836 * | ------ extent ------ |
838 if (start
<= key
.offset
&& end
>= extent_end
) {
840 del_slot
= path
->slots
[0];
843 BUG_ON(del_slot
+ del_nr
!= path
->slots
[0]);
848 extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
849 inode_sub_bytes(inode
,
850 extent_end
- key
.offset
);
851 extent_end
= ALIGN(extent_end
,
853 } else if (update_refs
&& disk_bytenr
> 0) {
854 ret
= btrfs_free_extent(trans
, root
,
855 disk_bytenr
, num_bytes
, 0,
856 root
->root_key
.objectid
,
857 key
.objectid
, key
.offset
-
859 BUG_ON(ret
); /* -ENOMEM */
860 inode_sub_bytes(inode
,
861 extent_end
- key
.offset
);
864 if (end
== extent_end
)
867 if (path
->slots
[0] + 1 < btrfs_header_nritems(leaf
)) {
872 ret
= btrfs_del_items(trans
, root
, path
, del_slot
,
875 btrfs_abort_transaction(trans
, root
, ret
);
882 btrfs_release_path(path
);
889 if (!ret
&& del_nr
> 0) {
890 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
892 btrfs_abort_transaction(trans
, root
, ret
);
896 *drop_end
= found
? min(end
, extent_end
) : end
;
897 btrfs_release_path(path
);
901 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
902 struct btrfs_root
*root
, struct inode
*inode
, u64 start
,
903 u64 end
, int drop_cache
)
905 struct btrfs_path
*path
;
908 path
= btrfs_alloc_path();
911 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
, start
, end
, NULL
,
913 btrfs_free_path(path
);
917 static int extent_mergeable(struct extent_buffer
*leaf
, int slot
,
918 u64 objectid
, u64 bytenr
, u64 orig_offset
,
919 u64
*start
, u64
*end
)
921 struct btrfs_file_extent_item
*fi
;
922 struct btrfs_key key
;
925 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
928 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
929 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
932 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
933 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
||
934 btrfs_file_extent_disk_bytenr(leaf
, fi
) != bytenr
||
935 btrfs_file_extent_offset(leaf
, fi
) != key
.offset
- orig_offset
||
936 btrfs_file_extent_compression(leaf
, fi
) ||
937 btrfs_file_extent_encryption(leaf
, fi
) ||
938 btrfs_file_extent_other_encoding(leaf
, fi
))
941 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
942 if ((*start
&& *start
!= key
.offset
) || (*end
&& *end
!= extent_end
))
951 * Mark extent in the range start - end as written.
953 * This changes extent type from 'pre-allocated' to 'regular'. If only
954 * part of extent is marked as written, the extent will be split into
957 int btrfs_mark_extent_written(struct btrfs_trans_handle
*trans
,
958 struct inode
*inode
, u64 start
, u64 end
)
960 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
961 struct extent_buffer
*leaf
;
962 struct btrfs_path
*path
;
963 struct btrfs_file_extent_item
*fi
;
964 struct btrfs_key key
;
965 struct btrfs_key new_key
;
977 u64 ino
= btrfs_ino(inode
);
979 path
= btrfs_alloc_path();
986 key
.type
= BTRFS_EXTENT_DATA_KEY
;
989 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
992 if (ret
> 0 && path
->slots
[0] > 0)
995 leaf
= path
->nodes
[0];
996 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
997 BUG_ON(key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
);
998 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
999 struct btrfs_file_extent_item
);
1000 BUG_ON(btrfs_file_extent_type(leaf
, fi
) !=
1001 BTRFS_FILE_EXTENT_PREALLOC
);
1002 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
1003 BUG_ON(key
.offset
> start
|| extent_end
< end
);
1005 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1006 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1007 orig_offset
= key
.offset
- btrfs_file_extent_offset(leaf
, fi
);
1008 memcpy(&new_key
, &key
, sizeof(new_key
));
1010 if (start
== key
.offset
&& end
< extent_end
) {
1013 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1014 ino
, bytenr
, orig_offset
,
1015 &other_start
, &other_end
)) {
1016 new_key
.offset
= end
;
1017 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
1018 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1019 struct btrfs_file_extent_item
);
1020 btrfs_set_file_extent_generation(leaf
, fi
,
1022 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1024 btrfs_set_file_extent_offset(leaf
, fi
,
1026 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1027 struct btrfs_file_extent_item
);
1028 btrfs_set_file_extent_generation(leaf
, fi
,
1030 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1032 btrfs_mark_buffer_dirty(leaf
);
1037 if (start
> key
.offset
&& end
== extent_end
) {
1040 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1041 ino
, bytenr
, orig_offset
,
1042 &other_start
, &other_end
)) {
1043 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1044 struct btrfs_file_extent_item
);
1045 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1046 start
- key
.offset
);
1047 btrfs_set_file_extent_generation(leaf
, fi
,
1050 new_key
.offset
= start
;
1051 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
1053 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1054 struct btrfs_file_extent_item
);
1055 btrfs_set_file_extent_generation(leaf
, fi
,
1057 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1059 btrfs_set_file_extent_offset(leaf
, fi
,
1060 start
- orig_offset
);
1061 btrfs_mark_buffer_dirty(leaf
);
1066 while (start
> key
.offset
|| end
< extent_end
) {
1067 if (key
.offset
== start
)
1070 new_key
.offset
= split
;
1071 ret
= btrfs_duplicate_item(trans
, root
, path
, &new_key
);
1072 if (ret
== -EAGAIN
) {
1073 btrfs_release_path(path
);
1077 btrfs_abort_transaction(trans
, root
, ret
);
1081 leaf
= path
->nodes
[0];
1082 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1083 struct btrfs_file_extent_item
);
1084 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1085 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1086 split
- key
.offset
);
1088 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1089 struct btrfs_file_extent_item
);
1091 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1092 btrfs_set_file_extent_offset(leaf
, fi
, split
- orig_offset
);
1093 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1094 extent_end
- split
);
1095 btrfs_mark_buffer_dirty(leaf
);
1097 ret
= btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
, 0,
1098 root
->root_key
.objectid
,
1099 ino
, orig_offset
, 0);
1100 BUG_ON(ret
); /* -ENOMEM */
1102 if (split
== start
) {
1105 BUG_ON(start
!= key
.offset
);
1114 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1115 ino
, bytenr
, orig_offset
,
1116 &other_start
, &other_end
)) {
1118 btrfs_release_path(path
);
1121 extent_end
= other_end
;
1122 del_slot
= path
->slots
[0] + 1;
1124 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1125 0, root
->root_key
.objectid
,
1126 ino
, orig_offset
, 0);
1127 BUG_ON(ret
); /* -ENOMEM */
1131 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1132 ino
, bytenr
, orig_offset
,
1133 &other_start
, &other_end
)) {
1135 btrfs_release_path(path
);
1138 key
.offset
= other_start
;
1139 del_slot
= path
->slots
[0];
1141 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1142 0, root
->root_key
.objectid
,
1143 ino
, orig_offset
, 0);
1144 BUG_ON(ret
); /* -ENOMEM */
1147 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1148 struct btrfs_file_extent_item
);
1149 btrfs_set_file_extent_type(leaf
, fi
,
1150 BTRFS_FILE_EXTENT_REG
);
1151 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1152 btrfs_mark_buffer_dirty(leaf
);
1154 fi
= btrfs_item_ptr(leaf
, del_slot
- 1,
1155 struct btrfs_file_extent_item
);
1156 btrfs_set_file_extent_type(leaf
, fi
,
1157 BTRFS_FILE_EXTENT_REG
);
1158 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1159 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1160 extent_end
- key
.offset
);
1161 btrfs_mark_buffer_dirty(leaf
);
1163 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
1165 btrfs_abort_transaction(trans
, root
, ret
);
1170 btrfs_free_path(path
);
1175 * on error we return an unlocked page and the error value
1176 * on success we return a locked page and 0
1178 static int prepare_uptodate_page(struct page
*page
, u64 pos
,
1179 bool force_uptodate
)
1183 if (((pos
& (PAGE_CACHE_SIZE
- 1)) || force_uptodate
) &&
1184 !PageUptodate(page
)) {
1185 ret
= btrfs_readpage(NULL
, page
);
1189 if (!PageUptodate(page
)) {
1198 * this gets pages into the page cache and locks them down, it also properly
1199 * waits for data=ordered extents to finish before allowing the pages to be
1202 static noinline
int prepare_pages(struct btrfs_root
*root
, struct file
*file
,
1203 struct page
**pages
, size_t num_pages
,
1204 loff_t pos
, unsigned long first_index
,
1205 size_t write_bytes
, bool force_uptodate
)
1207 struct extent_state
*cached_state
= NULL
;
1209 unsigned long index
= pos
>> PAGE_CACHE_SHIFT
;
1210 struct inode
*inode
= fdentry(file
)->d_inode
;
1211 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
1217 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
1218 last_pos
= ((u64
)index
+ num_pages
) << PAGE_CACHE_SHIFT
;
1221 for (i
= 0; i
< num_pages
; i
++) {
1222 pages
[i
] = find_or_create_page(inode
->i_mapping
, index
+ i
,
1223 mask
| __GFP_WRITE
);
1231 err
= prepare_uptodate_page(pages
[i
], pos
,
1233 if (i
== num_pages
- 1)
1234 err
= prepare_uptodate_page(pages
[i
],
1235 pos
+ write_bytes
, false);
1237 page_cache_release(pages
[i
]);
1241 wait_on_page_writeback(pages
[i
]);
1244 if (start_pos
< inode
->i_size
) {
1245 struct btrfs_ordered_extent
*ordered
;
1246 lock_extent_bits(&BTRFS_I(inode
)->io_tree
,
1247 start_pos
, last_pos
- 1, 0, &cached_state
);
1248 ordered
= btrfs_lookup_first_ordered_extent(inode
,
1251 ordered
->file_offset
+ ordered
->len
> start_pos
&&
1252 ordered
->file_offset
< last_pos
) {
1253 btrfs_put_ordered_extent(ordered
);
1254 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1255 start_pos
, last_pos
- 1,
1256 &cached_state
, GFP_NOFS
);
1257 for (i
= 0; i
< num_pages
; i
++) {
1258 unlock_page(pages
[i
]);
1259 page_cache_release(pages
[i
]);
1261 btrfs_wait_ordered_range(inode
, start_pos
,
1262 last_pos
- start_pos
);
1266 btrfs_put_ordered_extent(ordered
);
1268 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, start_pos
,
1269 last_pos
- 1, EXTENT_DIRTY
| EXTENT_DELALLOC
|
1270 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
1271 0, 0, &cached_state
, GFP_NOFS
);
1272 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1273 start_pos
, last_pos
- 1, &cached_state
,
1276 for (i
= 0; i
< num_pages
; i
++) {
1277 if (clear_page_dirty_for_io(pages
[i
]))
1278 account_page_redirty(pages
[i
]);
1279 set_page_extent_mapped(pages
[i
]);
1280 WARN_ON(!PageLocked(pages
[i
]));
1284 while (faili
>= 0) {
1285 unlock_page(pages
[faili
]);
1286 page_cache_release(pages
[faili
]);
1293 static noinline ssize_t
__btrfs_buffered_write(struct file
*file
,
1297 struct inode
*inode
= fdentry(file
)->d_inode
;
1298 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1299 struct page
**pages
= NULL
;
1300 unsigned long first_index
;
1301 size_t num_written
= 0;
1304 bool force_page_uptodate
= false;
1306 nrptrs
= min((iov_iter_count(i
) + PAGE_CACHE_SIZE
- 1) /
1307 PAGE_CACHE_SIZE
, PAGE_CACHE_SIZE
/
1308 (sizeof(struct page
*)));
1309 nrptrs
= min(nrptrs
, current
->nr_dirtied_pause
- current
->nr_dirtied
);
1310 nrptrs
= max(nrptrs
, 8);
1311 pages
= kmalloc(nrptrs
* sizeof(struct page
*), GFP_KERNEL
);
1315 first_index
= pos
>> PAGE_CACHE_SHIFT
;
1317 while (iov_iter_count(i
) > 0) {
1318 size_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
1319 size_t write_bytes
= min(iov_iter_count(i
),
1320 nrptrs
* (size_t)PAGE_CACHE_SIZE
-
1322 size_t num_pages
= (write_bytes
+ offset
+
1323 PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
1327 WARN_ON(num_pages
> nrptrs
);
1330 * Fault pages before locking them in prepare_pages
1331 * to avoid recursive lock
1333 if (unlikely(iov_iter_fault_in_readable(i
, write_bytes
))) {
1338 ret
= btrfs_delalloc_reserve_space(inode
,
1339 num_pages
<< PAGE_CACHE_SHIFT
);
1344 * This is going to setup the pages array with the number of
1345 * pages we want, so we don't really need to worry about the
1346 * contents of pages from loop to loop
1348 ret
= prepare_pages(root
, file
, pages
, num_pages
,
1349 pos
, first_index
, write_bytes
,
1350 force_page_uptodate
);
1352 btrfs_delalloc_release_space(inode
,
1353 num_pages
<< PAGE_CACHE_SHIFT
);
1357 copied
= btrfs_copy_from_user(pos
, num_pages
,
1358 write_bytes
, pages
, i
);
1361 * if we have trouble faulting in the pages, fall
1362 * back to one page at a time
1364 if (copied
< write_bytes
)
1368 force_page_uptodate
= true;
1371 force_page_uptodate
= false;
1372 dirty_pages
= (copied
+ offset
+
1373 PAGE_CACHE_SIZE
- 1) >>
1378 * If we had a short copy we need to release the excess delaloc
1379 * bytes we reserved. We need to increment outstanding_extents
1380 * because btrfs_delalloc_release_space will decrement it, but
1381 * we still have an outstanding extent for the chunk we actually
1384 if (num_pages
> dirty_pages
) {
1386 spin_lock(&BTRFS_I(inode
)->lock
);
1387 BTRFS_I(inode
)->outstanding_extents
++;
1388 spin_unlock(&BTRFS_I(inode
)->lock
);
1390 btrfs_delalloc_release_space(inode
,
1391 (num_pages
- dirty_pages
) <<
1396 ret
= btrfs_dirty_pages(root
, inode
, pages
,
1397 dirty_pages
, pos
, copied
,
1400 btrfs_delalloc_release_space(inode
,
1401 dirty_pages
<< PAGE_CACHE_SHIFT
);
1402 btrfs_drop_pages(pages
, num_pages
);
1407 btrfs_drop_pages(pages
, num_pages
);
1411 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
,
1413 if (dirty_pages
< (root
->leafsize
>> PAGE_CACHE_SHIFT
) + 1)
1414 btrfs_btree_balance_dirty(root
);
1417 num_written
+= copied
;
1422 return num_written
? num_written
: ret
;
1425 static ssize_t
__btrfs_direct_write(struct kiocb
*iocb
,
1426 const struct iovec
*iov
,
1427 unsigned long nr_segs
, loff_t pos
,
1428 loff_t
*ppos
, size_t count
, size_t ocount
)
1430 struct file
*file
= iocb
->ki_filp
;
1433 ssize_t written_buffered
;
1437 written
= generic_file_direct_write(iocb
, iov
, &nr_segs
, pos
, ppos
,
1440 if (written
< 0 || written
== count
)
1445 iov_iter_init(&i
, iov
, nr_segs
, count
, written
);
1446 written_buffered
= __btrfs_buffered_write(file
, &i
, pos
);
1447 if (written_buffered
< 0) {
1448 err
= written_buffered
;
1451 endbyte
= pos
+ written_buffered
- 1;
1452 err
= filemap_write_and_wait_range(file
->f_mapping
, pos
, endbyte
);
1455 written
+= written_buffered
;
1456 *ppos
= pos
+ written_buffered
;
1457 invalidate_mapping_pages(file
->f_mapping
, pos
>> PAGE_CACHE_SHIFT
,
1458 endbyte
>> PAGE_CACHE_SHIFT
);
1460 return written
? written
: err
;
1463 static ssize_t
btrfs_file_aio_write(struct kiocb
*iocb
,
1464 const struct iovec
*iov
,
1465 unsigned long nr_segs
, loff_t pos
)
1467 struct file
*file
= iocb
->ki_filp
;
1468 struct inode
*inode
= fdentry(file
)->d_inode
;
1469 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1470 loff_t
*ppos
= &iocb
->ki_pos
;
1472 ssize_t num_written
= 0;
1474 size_t count
, ocount
;
1476 sb_start_write(inode
->i_sb
);
1478 mutex_lock(&inode
->i_mutex
);
1480 err
= generic_segment_checks(iov
, &nr_segs
, &ocount
, VERIFY_READ
);
1482 mutex_unlock(&inode
->i_mutex
);
1487 current
->backing_dev_info
= inode
->i_mapping
->backing_dev_info
;
1488 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
1490 mutex_unlock(&inode
->i_mutex
);
1495 mutex_unlock(&inode
->i_mutex
);
1499 err
= file_remove_suid(file
);
1501 mutex_unlock(&inode
->i_mutex
);
1506 * If BTRFS flips readonly due to some impossible error
1507 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1508 * although we have opened a file as writable, we have
1509 * to stop this write operation to ensure FS consistency.
1511 if (root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
1512 mutex_unlock(&inode
->i_mutex
);
1517 err
= file_update_time(file
);
1519 mutex_unlock(&inode
->i_mutex
);
1523 start_pos
= round_down(pos
, root
->sectorsize
);
1524 if (start_pos
> i_size_read(inode
)) {
1525 err
= btrfs_cont_expand(inode
, i_size_read(inode
), start_pos
);
1527 mutex_unlock(&inode
->i_mutex
);
1532 if (unlikely(file
->f_flags
& O_DIRECT
)) {
1533 num_written
= __btrfs_direct_write(iocb
, iov
, nr_segs
,
1534 pos
, ppos
, count
, ocount
);
1538 iov_iter_init(&i
, iov
, nr_segs
, count
, num_written
);
1540 num_written
= __btrfs_buffered_write(file
, &i
, pos
);
1541 if (num_written
> 0)
1542 *ppos
= pos
+ num_written
;
1545 mutex_unlock(&inode
->i_mutex
);
1548 * we want to make sure fsync finds this change
1549 * but we haven't joined a transaction running right now.
1551 * Later on, someone is sure to update the inode and get the
1552 * real transid recorded.
1554 * We set last_trans now to the fs_info generation + 1,
1555 * this will either be one more than the running transaction
1556 * or the generation used for the next transaction if there isn't
1557 * one running right now.
1559 BTRFS_I(inode
)->last_trans
= root
->fs_info
->generation
+ 1;
1560 if (num_written
> 0 || num_written
== -EIOCBQUEUED
) {
1561 err
= generic_write_sync(file
, pos
, num_written
);
1562 if (err
< 0 && num_written
> 0)
1566 sb_end_write(inode
->i_sb
);
1567 current
->backing_dev_info
= NULL
;
1568 return num_written
? num_written
: err
;
1571 int btrfs_release_file(struct inode
*inode
, struct file
*filp
)
1574 * ordered_data_close is set by settattr when we are about to truncate
1575 * a file from a non-zero size to a zero size. This tries to
1576 * flush down new bytes that may have been written if the
1577 * application were using truncate to replace a file in place.
1579 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE
,
1580 &BTRFS_I(inode
)->runtime_flags
)) {
1581 btrfs_add_ordered_operation(NULL
, BTRFS_I(inode
)->root
, inode
);
1582 if (inode
->i_size
> BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT
)
1583 filemap_flush(inode
->i_mapping
);
1585 if (filp
->private_data
)
1586 btrfs_ioctl_trans_end(filp
);
1591 * fsync call for both files and directories. This logs the inode into
1592 * the tree log instead of forcing full commits whenever possible.
1594 * It needs to call filemap_fdatawait so that all ordered extent updates are
1595 * in the metadata btree are up to date for copying to the log.
1597 * It drops the inode mutex before doing the tree log commit. This is an
1598 * important optimization for directories because holding the mutex prevents
1599 * new operations on the dir while we write to disk.
1601 int btrfs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
1603 struct dentry
*dentry
= file
->f_path
.dentry
;
1604 struct inode
*inode
= dentry
->d_inode
;
1605 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1607 struct btrfs_trans_handle
*trans
;
1609 trace_btrfs_sync_file(file
, datasync
);
1612 * We write the dirty pages in the range and wait until they complete
1613 * out of the ->i_mutex. If so, we can flush the dirty pages by
1614 * multi-task, and make the performance up.
1616 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1620 mutex_lock(&inode
->i_mutex
);
1623 * We flush the dirty pages again to avoid some dirty pages in the
1626 atomic_inc(&root
->log_batch
);
1627 btrfs_wait_ordered_range(inode
, start
, end
- start
+ 1);
1628 atomic_inc(&root
->log_batch
);
1631 * check the transaction that last modified this inode
1632 * and see if its already been committed
1634 if (!BTRFS_I(inode
)->last_trans
) {
1635 mutex_unlock(&inode
->i_mutex
);
1640 * if the last transaction that changed this file was before
1641 * the current transaction, we can bail out now without any
1645 if (btrfs_inode_in_log(inode
, root
->fs_info
->generation
) ||
1646 BTRFS_I(inode
)->last_trans
<=
1647 root
->fs_info
->last_trans_committed
) {
1648 BTRFS_I(inode
)->last_trans
= 0;
1651 * We'v had everything committed since the last time we were
1652 * modified so clear this flag in case it was set for whatever
1653 * reason, it's no longer relevant.
1655 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
1656 &BTRFS_I(inode
)->runtime_flags
);
1657 mutex_unlock(&inode
->i_mutex
);
1662 * ok we haven't committed the transaction yet, lets do a commit
1664 if (file
->private_data
)
1665 btrfs_ioctl_trans_end(file
);
1667 trans
= btrfs_start_transaction(root
, 0);
1668 if (IS_ERR(trans
)) {
1669 ret
= PTR_ERR(trans
);
1670 mutex_unlock(&inode
->i_mutex
);
1674 ret
= btrfs_log_dentry_safe(trans
, root
, dentry
);
1676 mutex_unlock(&inode
->i_mutex
);
1680 /* we've logged all the items and now have a consistent
1681 * version of the file in the log. It is possible that
1682 * someone will come in and modify the file, but that's
1683 * fine because the log is consistent on disk, and we
1684 * have references to all of the file's extents
1686 * It is possible that someone will come in and log the
1687 * file again, but that will end up using the synchronization
1688 * inside btrfs_sync_log to keep things safe.
1690 mutex_unlock(&inode
->i_mutex
);
1692 if (ret
!= BTRFS_NO_LOG_SYNC
) {
1694 ret
= btrfs_commit_transaction(trans
, root
);
1696 ret
= btrfs_sync_log(trans
, root
);
1698 ret
= btrfs_end_transaction(trans
, root
);
1700 ret
= btrfs_commit_transaction(trans
, root
);
1703 ret
= btrfs_end_transaction(trans
, root
);
1706 return ret
> 0 ? -EIO
: ret
;
1709 static const struct vm_operations_struct btrfs_file_vm_ops
= {
1710 .fault
= filemap_fault
,
1711 .page_mkwrite
= btrfs_page_mkwrite
,
1712 .remap_pages
= generic_file_remap_pages
,
1715 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1717 struct address_space
*mapping
= filp
->f_mapping
;
1719 if (!mapping
->a_ops
->readpage
)
1722 file_accessed(filp
);
1723 vma
->vm_ops
= &btrfs_file_vm_ops
;
1728 static int hole_mergeable(struct inode
*inode
, struct extent_buffer
*leaf
,
1729 int slot
, u64 start
, u64 end
)
1731 struct btrfs_file_extent_item
*fi
;
1732 struct btrfs_key key
;
1734 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
1737 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1738 if (key
.objectid
!= btrfs_ino(inode
) ||
1739 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1742 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
1744 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
)
1747 if (btrfs_file_extent_disk_bytenr(leaf
, fi
))
1750 if (key
.offset
== end
)
1752 if (key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
) == start
)
1757 static int fill_holes(struct btrfs_trans_handle
*trans
, struct inode
*inode
,
1758 struct btrfs_path
*path
, u64 offset
, u64 end
)
1760 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1761 struct extent_buffer
*leaf
;
1762 struct btrfs_file_extent_item
*fi
;
1763 struct extent_map
*hole_em
;
1764 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
1765 struct btrfs_key key
;
1768 key
.objectid
= btrfs_ino(inode
);
1769 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1770 key
.offset
= offset
;
1773 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1778 leaf
= path
->nodes
[0];
1779 if (hole_mergeable(inode
, leaf
, path
->slots
[0]-1, offset
, end
)) {
1783 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1784 struct btrfs_file_extent_item
);
1785 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) +
1787 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
1788 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
1789 btrfs_set_file_extent_offset(leaf
, fi
, 0);
1790 btrfs_mark_buffer_dirty(leaf
);
1794 if (hole_mergeable(inode
, leaf
, path
->slots
[0]+1, offset
, end
)) {
1798 key
.offset
= offset
;
1799 btrfs_set_item_key_safe(trans
, root
, path
, &key
);
1800 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1801 struct btrfs_file_extent_item
);
1802 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) + end
-
1804 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
1805 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
1806 btrfs_set_file_extent_offset(leaf
, fi
, 0);
1807 btrfs_mark_buffer_dirty(leaf
);
1810 btrfs_release_path(path
);
1812 ret
= btrfs_insert_file_extent(trans
, root
, btrfs_ino(inode
), offset
,
1813 0, 0, end
- offset
, 0, end
- offset
,
1819 btrfs_release_path(path
);
1821 hole_em
= alloc_extent_map();
1823 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
1824 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
1825 &BTRFS_I(inode
)->runtime_flags
);
1827 hole_em
->start
= offset
;
1828 hole_em
->len
= end
- offset
;
1829 hole_em
->orig_start
= offset
;
1831 hole_em
->block_start
= EXTENT_MAP_HOLE
;
1832 hole_em
->block_len
= 0;
1833 hole_em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
1834 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
1835 hole_em
->generation
= trans
->transid
;
1838 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
1839 write_lock(&em_tree
->lock
);
1840 ret
= add_extent_mapping(em_tree
, hole_em
);
1842 list_move(&hole_em
->list
,
1843 &em_tree
->modified_extents
);
1844 write_unlock(&em_tree
->lock
);
1845 } while (ret
== -EEXIST
);
1846 free_extent_map(hole_em
);
1848 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
1849 &BTRFS_I(inode
)->runtime_flags
);
1855 static int btrfs_punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
1857 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1858 struct extent_state
*cached_state
= NULL
;
1859 struct btrfs_path
*path
;
1860 struct btrfs_block_rsv
*rsv
;
1861 struct btrfs_trans_handle
*trans
;
1862 u64 lockstart
= round_up(offset
, BTRFS_I(inode
)->root
->sectorsize
);
1863 u64 lockend
= round_down(offset
+ len
,
1864 BTRFS_I(inode
)->root
->sectorsize
) - 1;
1865 u64 cur_offset
= lockstart
;
1866 u64 min_size
= btrfs_calc_trunc_metadata_size(root
, 1);
1870 bool same_page
= ((offset
>> PAGE_CACHE_SHIFT
) ==
1871 ((offset
+ len
- 1) >> PAGE_CACHE_SHIFT
));
1873 btrfs_wait_ordered_range(inode
, offset
, len
);
1875 mutex_lock(&inode
->i_mutex
);
1876 if (offset
>= inode
->i_size
) {
1877 mutex_unlock(&inode
->i_mutex
);
1882 * Only do this if we are in the same page and we aren't doing the
1885 if (same_page
&& len
< PAGE_CACHE_SIZE
) {
1886 ret
= btrfs_truncate_page(inode
, offset
, len
, 0);
1887 mutex_unlock(&inode
->i_mutex
);
1891 /* zero back part of the first page */
1892 ret
= btrfs_truncate_page(inode
, offset
, 0, 0);
1894 mutex_unlock(&inode
->i_mutex
);
1898 /* zero the front end of the last page */
1899 if (offset
+ len
< round_up(inode
->i_size
, PAGE_CACHE_SIZE
)) {
1900 ret
= btrfs_truncate_page(inode
, offset
+ len
, 0, 1);
1902 mutex_unlock(&inode
->i_mutex
);
1907 if (lockend
< lockstart
) {
1908 mutex_unlock(&inode
->i_mutex
);
1913 struct btrfs_ordered_extent
*ordered
;
1915 truncate_pagecache_range(inode
, lockstart
, lockend
);
1917 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
1919 ordered
= btrfs_lookup_first_ordered_extent(inode
, lockend
);
1922 * We need to make sure we have no ordered extents in this range
1923 * and nobody raced in and read a page in this range, if we did
1924 * we need to try again.
1927 (ordered
->file_offset
+ ordered
->len
< lockstart
||
1928 ordered
->file_offset
> lockend
)) &&
1929 !test_range_bit(&BTRFS_I(inode
)->io_tree
, lockstart
,
1930 lockend
, EXTENT_UPTODATE
, 0,
1933 btrfs_put_ordered_extent(ordered
);
1937 btrfs_put_ordered_extent(ordered
);
1938 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
,
1939 lockend
, &cached_state
, GFP_NOFS
);
1940 btrfs_wait_ordered_range(inode
, lockstart
,
1941 lockend
- lockstart
+ 1);
1944 path
= btrfs_alloc_path();
1950 rsv
= btrfs_alloc_block_rsv(root
, BTRFS_BLOCK_RSV_TEMP
);
1955 rsv
->size
= btrfs_calc_trunc_metadata_size(root
, 1);
1959 * 1 - update the inode
1960 * 1 - removing the extents in the range
1961 * 1 - adding the hole extent
1963 trans
= btrfs_start_transaction(root
, 3);
1964 if (IS_ERR(trans
)) {
1965 err
= PTR_ERR(trans
);
1969 ret
= btrfs_block_rsv_migrate(&root
->fs_info
->trans_block_rsv
, rsv
,
1972 trans
->block_rsv
= rsv
;
1974 while (cur_offset
< lockend
) {
1975 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
,
1976 cur_offset
, lockend
+ 1,
1981 trans
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
1983 ret
= fill_holes(trans
, inode
, path
, cur_offset
, drop_end
);
1989 cur_offset
= drop_end
;
1991 ret
= btrfs_update_inode(trans
, root
, inode
);
1997 btrfs_end_transaction(trans
, root
);
1998 btrfs_btree_balance_dirty(root
);
2000 trans
= btrfs_start_transaction(root
, 3);
2001 if (IS_ERR(trans
)) {
2002 ret
= PTR_ERR(trans
);
2007 ret
= btrfs_block_rsv_migrate(&root
->fs_info
->trans_block_rsv
,
2009 BUG_ON(ret
); /* shouldn't happen */
2010 trans
->block_rsv
= rsv
;
2018 trans
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
2019 ret
= fill_holes(trans
, inode
, path
, cur_offset
, drop_end
);
2029 inode_inc_iversion(inode
);
2030 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
2032 trans
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
2033 ret
= btrfs_update_inode(trans
, root
, inode
);
2034 btrfs_end_transaction(trans
, root
);
2035 btrfs_btree_balance_dirty(root
);
2037 btrfs_free_path(path
);
2038 btrfs_free_block_rsv(root
, rsv
);
2040 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2041 &cached_state
, GFP_NOFS
);
2042 mutex_unlock(&inode
->i_mutex
);
2048 static long btrfs_fallocate(struct file
*file
, int mode
,
2049 loff_t offset
, loff_t len
)
2051 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
2052 struct extent_state
*cached_state
= NULL
;
2059 struct extent_map
*em
;
2060 int blocksize
= BTRFS_I(inode
)->root
->sectorsize
;
2063 alloc_start
= round_down(offset
, blocksize
);
2064 alloc_end
= round_up(offset
+ len
, blocksize
);
2066 /* Make sure we aren't being give some crap mode */
2067 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
2070 if (mode
& FALLOC_FL_PUNCH_HOLE
)
2071 return btrfs_punch_hole(inode
, offset
, len
);
2074 * Make sure we have enough space before we do the
2077 ret
= btrfs_check_data_free_space(inode
, alloc_end
- alloc_start
);
2082 * wait for ordered IO before we have any locks. We'll loop again
2083 * below with the locks held.
2085 btrfs_wait_ordered_range(inode
, alloc_start
, alloc_end
- alloc_start
);
2087 mutex_lock(&inode
->i_mutex
);
2088 ret
= inode_newsize_ok(inode
, alloc_end
);
2092 if (alloc_start
> inode
->i_size
) {
2093 ret
= btrfs_cont_expand(inode
, i_size_read(inode
),
2099 locked_end
= alloc_end
- 1;
2101 struct btrfs_ordered_extent
*ordered
;
2103 /* the extent lock is ordered inside the running
2106 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, alloc_start
,
2107 locked_end
, 0, &cached_state
);
2108 ordered
= btrfs_lookup_first_ordered_extent(inode
,
2111 ordered
->file_offset
+ ordered
->len
> alloc_start
&&
2112 ordered
->file_offset
< alloc_end
) {
2113 btrfs_put_ordered_extent(ordered
);
2114 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
2115 alloc_start
, locked_end
,
2116 &cached_state
, GFP_NOFS
);
2118 * we can't wait on the range with the transaction
2119 * running or with the extent lock held
2121 btrfs_wait_ordered_range(inode
, alloc_start
,
2122 alloc_end
- alloc_start
);
2125 btrfs_put_ordered_extent(ordered
);
2130 cur_offset
= alloc_start
;
2134 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
2135 alloc_end
- cur_offset
, 0);
2136 if (IS_ERR_OR_NULL(em
)) {
2143 last_byte
= min(extent_map_end(em
), alloc_end
);
2144 actual_end
= min_t(u64
, extent_map_end(em
), offset
+ len
);
2145 last_byte
= ALIGN(last_byte
, blocksize
);
2147 if (em
->block_start
== EXTENT_MAP_HOLE
||
2148 (cur_offset
>= inode
->i_size
&&
2149 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))) {
2150 ret
= btrfs_prealloc_file_range(inode
, mode
, cur_offset
,
2151 last_byte
- cur_offset
,
2152 1 << inode
->i_blkbits
,
2157 free_extent_map(em
);
2160 } else if (actual_end
> inode
->i_size
&&
2161 !(mode
& FALLOC_FL_KEEP_SIZE
)) {
2163 * We didn't need to allocate any more space, but we
2164 * still extended the size of the file so we need to
2167 inode
->i_ctime
= CURRENT_TIME
;
2168 i_size_write(inode
, actual_end
);
2169 btrfs_ordered_update_i_size(inode
, actual_end
, NULL
);
2171 free_extent_map(em
);
2173 cur_offset
= last_byte
;
2174 if (cur_offset
>= alloc_end
) {
2179 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, alloc_start
, locked_end
,
2180 &cached_state
, GFP_NOFS
);
2182 mutex_unlock(&inode
->i_mutex
);
2183 /* Let go of our reservation. */
2184 btrfs_free_reserved_data_space(inode
, alloc_end
- alloc_start
);
2188 static int find_desired_extent(struct inode
*inode
, loff_t
*offset
, int origin
)
2190 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2191 struct extent_map
*em
;
2192 struct extent_state
*cached_state
= NULL
;
2193 u64 lockstart
= *offset
;
2194 u64 lockend
= i_size_read(inode
);
2195 u64 start
= *offset
;
2196 u64 orig_start
= *offset
;
2197 u64 len
= i_size_read(inode
);
2201 lockend
= max_t(u64
, root
->sectorsize
, lockend
);
2202 if (lockend
<= lockstart
)
2203 lockend
= lockstart
+ root
->sectorsize
;
2205 len
= lockend
- lockstart
+ 1;
2207 len
= max_t(u64
, len
, root
->sectorsize
);
2208 if (inode
->i_size
== 0)
2211 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
, 0,
2215 * Delalloc is such a pain. If we have a hole and we have pending
2216 * delalloc for a portion of the hole we will get back a hole that
2217 * exists for the entire range since it hasn't been actually written
2218 * yet. So to take care of this case we need to look for an extent just
2219 * before the position we want in case there is outstanding delalloc
2222 if (origin
== SEEK_HOLE
&& start
!= 0) {
2223 if (start
<= root
->sectorsize
)
2224 em
= btrfs_get_extent_fiemap(inode
, NULL
, 0, 0,
2225 root
->sectorsize
, 0);
2227 em
= btrfs_get_extent_fiemap(inode
, NULL
, 0,
2228 start
- root
->sectorsize
,
2229 root
->sectorsize
, 0);
2234 last_end
= em
->start
+ em
->len
;
2235 if (em
->block_start
== EXTENT_MAP_DELALLOC
)
2236 last_end
= min_t(u64
, last_end
, inode
->i_size
);
2237 free_extent_map(em
);
2241 em
= btrfs_get_extent_fiemap(inode
, NULL
, 0, start
, len
, 0);
2247 if (em
->block_start
== EXTENT_MAP_HOLE
) {
2248 if (test_bit(EXTENT_FLAG_VACANCY
, &em
->flags
)) {
2249 if (last_end
<= orig_start
) {
2250 free_extent_map(em
);
2256 if (origin
== SEEK_HOLE
) {
2258 free_extent_map(em
);
2262 if (origin
== SEEK_DATA
) {
2263 if (em
->block_start
== EXTENT_MAP_DELALLOC
) {
2264 if (start
>= inode
->i_size
) {
2265 free_extent_map(em
);
2272 free_extent_map(em
);
2277 start
= em
->start
+ em
->len
;
2278 last_end
= em
->start
+ em
->len
;
2280 if (em
->block_start
== EXTENT_MAP_DELALLOC
)
2281 last_end
= min_t(u64
, last_end
, inode
->i_size
);
2283 if (test_bit(EXTENT_FLAG_VACANCY
, &em
->flags
)) {
2284 free_extent_map(em
);
2288 free_extent_map(em
);
2292 *offset
= min(*offset
, inode
->i_size
);
2294 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2295 &cached_state
, GFP_NOFS
);
2299 static loff_t
btrfs_file_llseek(struct file
*file
, loff_t offset
, int origin
)
2301 struct inode
*inode
= file
->f_mapping
->host
;
2304 mutex_lock(&inode
->i_mutex
);
2308 offset
= generic_file_llseek(file
, offset
, origin
);
2312 if (offset
>= i_size_read(inode
)) {
2313 mutex_unlock(&inode
->i_mutex
);
2317 ret
= find_desired_extent(inode
, &offset
, origin
);
2319 mutex_unlock(&inode
->i_mutex
);
2324 if (offset
< 0 && !(file
->f_mode
& FMODE_UNSIGNED_OFFSET
)) {
2328 if (offset
> inode
->i_sb
->s_maxbytes
) {
2333 /* Special lock needed here? */
2334 if (offset
!= file
->f_pos
) {
2335 file
->f_pos
= offset
;
2336 file
->f_version
= 0;
2339 mutex_unlock(&inode
->i_mutex
);
2343 const struct file_operations btrfs_file_operations
= {
2344 .llseek
= btrfs_file_llseek
,
2345 .read
= do_sync_read
,
2346 .write
= do_sync_write
,
2347 .aio_read
= generic_file_aio_read
,
2348 .splice_read
= generic_file_splice_read
,
2349 .aio_write
= btrfs_file_aio_write
,
2350 .mmap
= btrfs_file_mmap
,
2351 .open
= generic_file_open
,
2352 .release
= btrfs_release_file
,
2353 .fsync
= btrfs_sync_file
,
2354 .fallocate
= btrfs_fallocate
,
2355 .unlocked_ioctl
= btrfs_ioctl
,
2356 #ifdef CONFIG_COMPAT
2357 .compat_ioctl
= btrfs_ioctl
,
2361 void btrfs_auto_defrag_exit(void)
2363 if (btrfs_inode_defrag_cachep
)
2364 kmem_cache_destroy(btrfs_inode_defrag_cachep
);
2367 int btrfs_auto_defrag_init(void)
2369 btrfs_inode_defrag_cachep
= kmem_cache_create("btrfs_inode_defrag",
2370 sizeof(struct inode_defrag
), 0,
2371 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
2373 if (!btrfs_inode_defrag_cachep
)