2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/aio.h>
28 #include <linux/falloc.h>
29 #include <linux/swap.h>
30 #include <linux/writeback.h>
31 #include <linux/statfs.h>
32 #include <linux/compat.h>
33 #include <linux/slab.h>
34 #include <linux/btrfs.h>
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "print-tree.h"
44 static struct kmem_cache
*btrfs_inode_defrag_cachep
;
46 * when auto defrag is enabled we
47 * queue up these defrag structs to remember which
48 * inodes need defragging passes
51 struct rb_node rb_node
;
55 * transid where the defrag was added, we search for
56 * extents newer than this
63 /* last offset we were able to defrag */
66 /* if we've wrapped around back to zero once already */
70 static int __compare_inode_defrag(struct inode_defrag
*defrag1
,
71 struct inode_defrag
*defrag2
)
73 if (defrag1
->root
> defrag2
->root
)
75 else if (defrag1
->root
< defrag2
->root
)
77 else if (defrag1
->ino
> defrag2
->ino
)
79 else if (defrag1
->ino
< defrag2
->ino
)
85 /* pop a record for an inode into the defrag tree. The lock
86 * must be held already
88 * If you're inserting a record for an older transid than an
89 * existing record, the transid already in the tree is lowered
91 * If an existing record is found the defrag item you
94 static int __btrfs_add_inode_defrag(struct inode
*inode
,
95 struct inode_defrag
*defrag
)
97 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
98 struct inode_defrag
*entry
;
100 struct rb_node
*parent
= NULL
;
103 p
= &root
->fs_info
->defrag_inodes
.rb_node
;
106 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
108 ret
= __compare_inode_defrag(defrag
, entry
);
110 p
= &parent
->rb_left
;
112 p
= &parent
->rb_right
;
114 /* if we're reinserting an entry for
115 * an old defrag run, make sure to
116 * lower the transid of our existing record
118 if (defrag
->transid
< entry
->transid
)
119 entry
->transid
= defrag
->transid
;
120 if (defrag
->last_offset
> entry
->last_offset
)
121 entry
->last_offset
= defrag
->last_offset
;
125 set_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
126 rb_link_node(&defrag
->rb_node
, parent
, p
);
127 rb_insert_color(&defrag
->rb_node
, &root
->fs_info
->defrag_inodes
);
131 static inline int __need_auto_defrag(struct btrfs_root
*root
)
133 if (!btrfs_test_opt(root
, AUTO_DEFRAG
))
136 if (btrfs_fs_closing(root
->fs_info
))
143 * insert a defrag record for this inode if auto defrag is
146 int btrfs_add_inode_defrag(struct btrfs_trans_handle
*trans
,
149 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
150 struct inode_defrag
*defrag
;
154 if (!__need_auto_defrag(root
))
157 if (test_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
))
161 transid
= trans
->transid
;
163 transid
= BTRFS_I(inode
)->root
->last_trans
;
165 defrag
= kmem_cache_zalloc(btrfs_inode_defrag_cachep
, GFP_NOFS
);
169 defrag
->ino
= btrfs_ino(inode
);
170 defrag
->transid
= transid
;
171 defrag
->root
= root
->root_key
.objectid
;
173 spin_lock(&root
->fs_info
->defrag_inodes_lock
);
174 if (!test_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
)) {
176 * If we set IN_DEFRAG flag and evict the inode from memory,
177 * and then re-read this inode, this new inode doesn't have
178 * IN_DEFRAG flag. At the case, we may find the existed defrag.
180 ret
= __btrfs_add_inode_defrag(inode
, defrag
);
182 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
184 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
186 spin_unlock(&root
->fs_info
->defrag_inodes_lock
);
191 * Requeue the defrag object. If there is a defrag object that points to
192 * the same inode in the tree, we will merge them together (by
193 * __btrfs_add_inode_defrag()) and free the one that we want to requeue.
195 static void btrfs_requeue_inode_defrag(struct inode
*inode
,
196 struct inode_defrag
*defrag
)
198 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
201 if (!__need_auto_defrag(root
))
205 * Here we don't check the IN_DEFRAG flag, because we need merge
208 spin_lock(&root
->fs_info
->defrag_inodes_lock
);
209 ret
= __btrfs_add_inode_defrag(inode
, defrag
);
210 spin_unlock(&root
->fs_info
->defrag_inodes_lock
);
215 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
219 * pick the defragable inode that we want, if it doesn't exist, we will get
222 static struct inode_defrag
*
223 btrfs_pick_defrag_inode(struct btrfs_fs_info
*fs_info
, u64 root
, u64 ino
)
225 struct inode_defrag
*entry
= NULL
;
226 struct inode_defrag tmp
;
228 struct rb_node
*parent
= NULL
;
234 spin_lock(&fs_info
->defrag_inodes_lock
);
235 p
= fs_info
->defrag_inodes
.rb_node
;
238 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
240 ret
= __compare_inode_defrag(&tmp
, entry
);
244 p
= parent
->rb_right
;
249 if (parent
&& __compare_inode_defrag(&tmp
, entry
) > 0) {
250 parent
= rb_next(parent
);
252 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
258 rb_erase(parent
, &fs_info
->defrag_inodes
);
259 spin_unlock(&fs_info
->defrag_inodes_lock
);
263 void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info
*fs_info
)
265 struct inode_defrag
*defrag
;
266 struct rb_node
*node
;
268 spin_lock(&fs_info
->defrag_inodes_lock
);
269 node
= rb_first(&fs_info
->defrag_inodes
);
271 rb_erase(node
, &fs_info
->defrag_inodes
);
272 defrag
= rb_entry(node
, struct inode_defrag
, rb_node
);
273 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
275 if (need_resched()) {
276 spin_unlock(&fs_info
->defrag_inodes_lock
);
278 spin_lock(&fs_info
->defrag_inodes_lock
);
281 node
= rb_first(&fs_info
->defrag_inodes
);
283 spin_unlock(&fs_info
->defrag_inodes_lock
);
286 #define BTRFS_DEFRAG_BATCH 1024
288 static int __btrfs_run_defrag_inode(struct btrfs_fs_info
*fs_info
,
289 struct inode_defrag
*defrag
)
291 struct btrfs_root
*inode_root
;
293 struct btrfs_key key
;
294 struct btrfs_ioctl_defrag_range_args range
;
300 key
.objectid
= defrag
->root
;
301 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
302 key
.offset
= (u64
)-1;
304 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
306 inode_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
307 if (IS_ERR(inode_root
)) {
308 ret
= PTR_ERR(inode_root
);
312 key
.objectid
= defrag
->ino
;
313 btrfs_set_key_type(&key
, BTRFS_INODE_ITEM_KEY
);
315 inode
= btrfs_iget(fs_info
->sb
, &key
, inode_root
, NULL
);
317 ret
= PTR_ERR(inode
);
320 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
322 /* do a chunk of defrag */
323 clear_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
324 memset(&range
, 0, sizeof(range
));
326 range
.start
= defrag
->last_offset
;
328 sb_start_write(fs_info
->sb
);
329 num_defrag
= btrfs_defrag_file(inode
, NULL
, &range
, defrag
->transid
,
331 sb_end_write(fs_info
->sb
);
333 * if we filled the whole defrag batch, there
334 * must be more work to do. Queue this defrag
337 if (num_defrag
== BTRFS_DEFRAG_BATCH
) {
338 defrag
->last_offset
= range
.start
;
339 btrfs_requeue_inode_defrag(inode
, defrag
);
340 } else if (defrag
->last_offset
&& !defrag
->cycled
) {
342 * we didn't fill our defrag batch, but
343 * we didn't start at zero. Make sure we loop
344 * around to the start of the file.
346 defrag
->last_offset
= 0;
348 btrfs_requeue_inode_defrag(inode
, defrag
);
350 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
356 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
357 kmem_cache_free(btrfs_inode_defrag_cachep
, defrag
);
362 * run through the list of inodes in the FS that need
365 int btrfs_run_defrag_inodes(struct btrfs_fs_info
*fs_info
)
367 struct inode_defrag
*defrag
;
369 u64 root_objectid
= 0;
371 atomic_inc(&fs_info
->defrag_running
);
373 /* Pause the auto defragger. */
374 if (test_bit(BTRFS_FS_STATE_REMOUNTING
,
378 if (!__need_auto_defrag(fs_info
->tree_root
))
381 /* find an inode to defrag */
382 defrag
= btrfs_pick_defrag_inode(fs_info
, root_objectid
,
385 if (root_objectid
|| first_ino
) {
394 first_ino
= defrag
->ino
+ 1;
395 root_objectid
= defrag
->root
;
397 __btrfs_run_defrag_inode(fs_info
, defrag
);
399 atomic_dec(&fs_info
->defrag_running
);
402 * during unmount, we use the transaction_wait queue to
403 * wait for the defragger to stop
405 wake_up(&fs_info
->transaction_wait
);
409 /* simple helper to fault in pages and copy. This should go away
410 * and be replaced with calls into generic code.
412 static noinline
int btrfs_copy_from_user(loff_t pos
, int num_pages
,
414 struct page
**prepared_pages
,
418 size_t total_copied
= 0;
420 int offset
= pos
& (PAGE_CACHE_SIZE
- 1);
422 while (write_bytes
> 0) {
423 size_t count
= min_t(size_t,
424 PAGE_CACHE_SIZE
- offset
, write_bytes
);
425 struct page
*page
= prepared_pages
[pg
];
427 * Copy data from userspace to the current page
429 * Disable pagefault to avoid recursive lock since
430 * the pages are already locked
433 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, count
);
436 /* Flush processor's dcache for this page */
437 flush_dcache_page(page
);
440 * if we get a partial write, we can end up with
441 * partially up to date pages. These add
442 * a lot of complexity, so make sure they don't
443 * happen by forcing this copy to be retried.
445 * The rest of the btrfs_file_write code will fall
446 * back to page at a time copies after we return 0.
448 if (!PageUptodate(page
) && copied
< count
)
451 iov_iter_advance(i
, copied
);
452 write_bytes
-= copied
;
453 total_copied
+= copied
;
455 /* Return to btrfs_file_aio_write to fault page */
456 if (unlikely(copied
== 0))
459 if (unlikely(copied
< PAGE_CACHE_SIZE
- offset
)) {
470 * unlocks pages after btrfs_file_write is done with them
472 static void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
475 for (i
= 0; i
< num_pages
; i
++) {
476 /* page checked is some magic around finding pages that
477 * have been modified without going through btrfs_set_page_dirty
480 ClearPageChecked(pages
[i
]);
481 unlock_page(pages
[i
]);
482 mark_page_accessed(pages
[i
]);
483 page_cache_release(pages
[i
]);
488 * after copy_from_user, pages need to be dirtied and we need to make
489 * sure holes are created between the current EOF and the start of
490 * any next extents (if required).
492 * this also makes the decision about creating an inline extent vs
493 * doing real data extents, marking pages dirty and delalloc as required.
495 int btrfs_dirty_pages(struct btrfs_root
*root
, struct inode
*inode
,
496 struct page
**pages
, size_t num_pages
,
497 loff_t pos
, size_t write_bytes
,
498 struct extent_state
**cached
)
504 u64 end_of_last_block
;
505 u64 end_pos
= pos
+ write_bytes
;
506 loff_t isize
= i_size_read(inode
);
508 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
509 num_bytes
= ALIGN(write_bytes
+ pos
- start_pos
, root
->sectorsize
);
511 end_of_last_block
= start_pos
+ num_bytes
- 1;
512 err
= btrfs_set_extent_delalloc(inode
, start_pos
, end_of_last_block
,
517 for (i
= 0; i
< num_pages
; i
++) {
518 struct page
*p
= pages
[i
];
525 * we've only changed i_size in ram, and we haven't updated
526 * the disk i_size. There is no need to log the inode
530 i_size_write(inode
, end_pos
);
535 * this drops all the extents in the cache that intersect the range
536 * [start, end]. Existing extents are split as required.
538 void btrfs_drop_extent_cache(struct inode
*inode
, u64 start
, u64 end
,
541 struct extent_map
*em
;
542 struct extent_map
*split
= NULL
;
543 struct extent_map
*split2
= NULL
;
544 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
545 u64 len
= end
- start
+ 1;
553 WARN_ON(end
< start
);
554 if (end
== (u64
)-1) {
563 split
= alloc_extent_map();
565 split2
= alloc_extent_map();
566 if (!split
|| !split2
)
569 write_lock(&em_tree
->lock
);
570 em
= lookup_extent_mapping(em_tree
, start
, len
);
572 write_unlock(&em_tree
->lock
);
576 gen
= em
->generation
;
577 if (skip_pinned
&& test_bit(EXTENT_FLAG_PINNED
, &em
->flags
)) {
578 if (testend
&& em
->start
+ em
->len
>= start
+ len
) {
580 write_unlock(&em_tree
->lock
);
583 start
= em
->start
+ em
->len
;
585 len
= start
+ len
- (em
->start
+ em
->len
);
587 write_unlock(&em_tree
->lock
);
590 compressed
= test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
591 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
592 clear_bit(EXTENT_FLAG_LOGGING
, &flags
);
593 modified
= !list_empty(&em
->list
);
594 remove_extent_mapping(em_tree
, em
);
598 if (em
->start
< start
) {
599 split
->start
= em
->start
;
600 split
->len
= start
- em
->start
;
602 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
603 split
->orig_start
= em
->orig_start
;
604 split
->block_start
= em
->block_start
;
607 split
->block_len
= em
->block_len
;
609 split
->block_len
= split
->len
;
610 split
->orig_block_len
= max(split
->block_len
,
612 split
->ram_bytes
= em
->ram_bytes
;
614 split
->orig_start
= split
->start
;
615 split
->block_len
= 0;
616 split
->block_start
= em
->block_start
;
617 split
->orig_block_len
= 0;
618 split
->ram_bytes
= split
->len
;
621 split
->generation
= gen
;
622 split
->bdev
= em
->bdev
;
623 split
->flags
= flags
;
624 split
->compress_type
= em
->compress_type
;
625 ret
= add_extent_mapping(em_tree
, split
, modified
);
626 BUG_ON(ret
); /* Logic error */
627 free_extent_map(split
);
631 if (testend
&& em
->start
+ em
->len
> start
+ len
) {
632 u64 diff
= start
+ len
- em
->start
;
634 split
->start
= start
+ len
;
635 split
->len
= em
->start
+ em
->len
- (start
+ len
);
636 split
->bdev
= em
->bdev
;
637 split
->flags
= flags
;
638 split
->compress_type
= em
->compress_type
;
639 split
->generation
= gen
;
641 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
) {
642 split
->orig_block_len
= max(em
->block_len
,
645 split
->ram_bytes
= em
->ram_bytes
;
647 split
->block_len
= em
->block_len
;
648 split
->block_start
= em
->block_start
;
649 split
->orig_start
= em
->orig_start
;
651 split
->block_len
= split
->len
;
652 split
->block_start
= em
->block_start
654 split
->orig_start
= em
->orig_start
;
657 split
->ram_bytes
= split
->len
;
658 split
->orig_start
= split
->start
;
659 split
->block_len
= 0;
660 split
->block_start
= em
->block_start
;
661 split
->orig_block_len
= 0;
664 ret
= add_extent_mapping(em_tree
, split
, modified
);
665 BUG_ON(ret
); /* Logic error */
666 free_extent_map(split
);
670 write_unlock(&em_tree
->lock
);
674 /* once for the tree*/
678 free_extent_map(split
);
680 free_extent_map(split2
);
684 * this is very complex, but the basic idea is to drop all extents
685 * in the range start - end. hint_block is filled in with a block number
686 * that would be a good hint to the block allocator for this file.
688 * If an extent intersects the range but is not entirely inside the range
689 * it is either truncated or split. Anything entirely inside the range
690 * is deleted from the tree.
692 int __btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
693 struct btrfs_root
*root
, struct inode
*inode
,
694 struct btrfs_path
*path
, u64 start
, u64 end
,
695 u64
*drop_end
, int drop_cache
,
697 u32 extent_item_size
,
700 struct extent_buffer
*leaf
;
701 struct btrfs_file_extent_item
*fi
;
702 struct btrfs_key key
;
703 struct btrfs_key new_key
;
704 u64 ino
= btrfs_ino(inode
);
705 u64 search_start
= start
;
708 u64 extent_offset
= 0;
715 int modify_tree
= -1;
716 int update_refs
= (root
->ref_cows
|| root
== root
->fs_info
->tree_root
);
718 int leafs_visited
= 0;
721 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
723 if (start
>= BTRFS_I(inode
)->disk_i_size
&& !replace_extent
)
728 ret
= btrfs_lookup_file_extent(trans
, root
, path
, ino
,
729 search_start
, modify_tree
);
732 if (ret
> 0 && path
->slots
[0] > 0 && search_start
== start
) {
733 leaf
= path
->nodes
[0];
734 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
735 if (key
.objectid
== ino
&&
736 key
.type
== BTRFS_EXTENT_DATA_KEY
)
742 leaf
= path
->nodes
[0];
743 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
745 ret
= btrfs_next_leaf(root
, path
);
753 leaf
= path
->nodes
[0];
757 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
758 if (key
.objectid
> ino
||
759 key
.type
> BTRFS_EXTENT_DATA_KEY
|| key
.offset
>= end
)
762 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
763 struct btrfs_file_extent_item
);
764 extent_type
= btrfs_file_extent_type(leaf
, fi
);
766 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
767 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
768 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
769 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
770 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
771 extent_end
= key
.offset
+
772 btrfs_file_extent_num_bytes(leaf
, fi
);
773 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
774 extent_end
= key
.offset
+
775 btrfs_file_extent_inline_len(leaf
,
779 extent_end
= search_start
;
782 if (extent_end
<= search_start
) {
788 search_start
= max(key
.offset
, start
);
789 if (recow
|| !modify_tree
) {
791 btrfs_release_path(path
);
796 * | - range to drop - |
797 * | -------- extent -------- |
799 if (start
> key
.offset
&& end
< extent_end
) {
801 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
803 memcpy(&new_key
, &key
, sizeof(new_key
));
804 new_key
.offset
= start
;
805 ret
= btrfs_duplicate_item(trans
, root
, path
,
807 if (ret
== -EAGAIN
) {
808 btrfs_release_path(path
);
814 leaf
= path
->nodes
[0];
815 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
816 struct btrfs_file_extent_item
);
817 btrfs_set_file_extent_num_bytes(leaf
, fi
,
820 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
821 struct btrfs_file_extent_item
);
823 extent_offset
+= start
- key
.offset
;
824 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
825 btrfs_set_file_extent_num_bytes(leaf
, fi
,
827 btrfs_mark_buffer_dirty(leaf
);
829 if (update_refs
&& disk_bytenr
> 0) {
830 ret
= btrfs_inc_extent_ref(trans
, root
,
831 disk_bytenr
, num_bytes
, 0,
832 root
->root_key
.objectid
,
834 start
- extent_offset
, 0);
835 BUG_ON(ret
); /* -ENOMEM */
840 * | ---- range to drop ----- |
841 * | -------- extent -------- |
843 if (start
<= key
.offset
&& end
< extent_end
) {
844 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
846 memcpy(&new_key
, &key
, sizeof(new_key
));
847 new_key
.offset
= end
;
848 btrfs_set_item_key_safe(root
, path
, &new_key
);
850 extent_offset
+= end
- key
.offset
;
851 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
852 btrfs_set_file_extent_num_bytes(leaf
, fi
,
854 btrfs_mark_buffer_dirty(leaf
);
855 if (update_refs
&& disk_bytenr
> 0)
856 inode_sub_bytes(inode
, end
- key
.offset
);
860 search_start
= extent_end
;
862 * | ---- range to drop ----- |
863 * | -------- extent -------- |
865 if (start
> key
.offset
&& end
>= extent_end
) {
867 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
869 btrfs_set_file_extent_num_bytes(leaf
, fi
,
871 btrfs_mark_buffer_dirty(leaf
);
872 if (update_refs
&& disk_bytenr
> 0)
873 inode_sub_bytes(inode
, extent_end
- start
);
874 if (end
== extent_end
)
882 * | ---- range to drop ----- |
883 * | ------ extent ------ |
885 if (start
<= key
.offset
&& end
>= extent_end
) {
887 del_slot
= path
->slots
[0];
890 BUG_ON(del_slot
+ del_nr
!= path
->slots
[0]);
895 extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
896 inode_sub_bytes(inode
,
897 extent_end
- key
.offset
);
898 extent_end
= ALIGN(extent_end
,
900 } else if (update_refs
&& disk_bytenr
> 0) {
901 ret
= btrfs_free_extent(trans
, root
,
902 disk_bytenr
, num_bytes
, 0,
903 root
->root_key
.objectid
,
904 key
.objectid
, key
.offset
-
906 BUG_ON(ret
); /* -ENOMEM */
907 inode_sub_bytes(inode
,
908 extent_end
- key
.offset
);
911 if (end
== extent_end
)
914 if (path
->slots
[0] + 1 < btrfs_header_nritems(leaf
)) {
919 ret
= btrfs_del_items(trans
, root
, path
, del_slot
,
922 btrfs_abort_transaction(trans
, root
, ret
);
929 btrfs_release_path(path
);
936 if (!ret
&& del_nr
> 0) {
938 * Set path->slots[0] to first slot, so that after the delete
939 * if items are move off from our leaf to its immediate left or
940 * right neighbor leafs, we end up with a correct and adjusted
941 * path->slots[0] for our insertion (if replace_extent != 0).
943 path
->slots
[0] = del_slot
;
944 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
946 btrfs_abort_transaction(trans
, root
, ret
);
949 leaf
= path
->nodes
[0];
951 * If btrfs_del_items() was called, it might have deleted a leaf, in
952 * which case it unlocked our path, so check path->locks[0] matches a
955 if (!ret
&& replace_extent
&& leafs_visited
== 1 &&
956 (path
->locks
[0] == BTRFS_WRITE_LOCK_BLOCKING
||
957 path
->locks
[0] == BTRFS_WRITE_LOCK
) &&
958 btrfs_leaf_free_space(root
, leaf
) >=
959 sizeof(struct btrfs_item
) + extent_item_size
) {
962 key
.type
= BTRFS_EXTENT_DATA_KEY
;
964 if (!del_nr
&& path
->slots
[0] < btrfs_header_nritems(leaf
)) {
965 struct btrfs_key slot_key
;
967 btrfs_item_key_to_cpu(leaf
, &slot_key
, path
->slots
[0]);
968 if (btrfs_comp_cpu_keys(&key
, &slot_key
) > 0)
971 setup_items_for_insert(root
, path
, &key
,
974 sizeof(struct btrfs_item
) +
975 extent_item_size
, 1);
979 if (!replace_extent
|| !(*key_inserted
))
980 btrfs_release_path(path
);
982 *drop_end
= found
? min(end
, extent_end
) : end
;
986 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
987 struct btrfs_root
*root
, struct inode
*inode
, u64 start
,
988 u64 end
, int drop_cache
)
990 struct btrfs_path
*path
;
993 path
= btrfs_alloc_path();
996 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
, start
, end
, NULL
,
997 drop_cache
, 0, 0, NULL
);
998 btrfs_free_path(path
);
1002 static int extent_mergeable(struct extent_buffer
*leaf
, int slot
,
1003 u64 objectid
, u64 bytenr
, u64 orig_offset
,
1004 u64
*start
, u64
*end
)
1006 struct btrfs_file_extent_item
*fi
;
1007 struct btrfs_key key
;
1010 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
1013 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1014 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1017 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
1018 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
||
1019 btrfs_file_extent_disk_bytenr(leaf
, fi
) != bytenr
||
1020 btrfs_file_extent_offset(leaf
, fi
) != key
.offset
- orig_offset
||
1021 btrfs_file_extent_compression(leaf
, fi
) ||
1022 btrfs_file_extent_encryption(leaf
, fi
) ||
1023 btrfs_file_extent_other_encoding(leaf
, fi
))
1026 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
1027 if ((*start
&& *start
!= key
.offset
) || (*end
&& *end
!= extent_end
))
1030 *start
= key
.offset
;
1036 * Mark extent in the range start - end as written.
1038 * This changes extent type from 'pre-allocated' to 'regular'. If only
1039 * part of extent is marked as written, the extent will be split into
1042 int btrfs_mark_extent_written(struct btrfs_trans_handle
*trans
,
1043 struct inode
*inode
, u64 start
, u64 end
)
1045 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1046 struct extent_buffer
*leaf
;
1047 struct btrfs_path
*path
;
1048 struct btrfs_file_extent_item
*fi
;
1049 struct btrfs_key key
;
1050 struct btrfs_key new_key
;
1062 u64 ino
= btrfs_ino(inode
);
1064 path
= btrfs_alloc_path();
1071 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1074 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
1077 if (ret
> 0 && path
->slots
[0] > 0)
1080 leaf
= path
->nodes
[0];
1081 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
1082 BUG_ON(key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
);
1083 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1084 struct btrfs_file_extent_item
);
1085 BUG_ON(btrfs_file_extent_type(leaf
, fi
) !=
1086 BTRFS_FILE_EXTENT_PREALLOC
);
1087 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
1088 BUG_ON(key
.offset
> start
|| extent_end
< end
);
1090 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1091 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1092 orig_offset
= key
.offset
- btrfs_file_extent_offset(leaf
, fi
);
1093 memcpy(&new_key
, &key
, sizeof(new_key
));
1095 if (start
== key
.offset
&& end
< extent_end
) {
1098 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1099 ino
, bytenr
, orig_offset
,
1100 &other_start
, &other_end
)) {
1101 new_key
.offset
= end
;
1102 btrfs_set_item_key_safe(root
, path
, &new_key
);
1103 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1104 struct btrfs_file_extent_item
);
1105 btrfs_set_file_extent_generation(leaf
, fi
,
1107 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1109 btrfs_set_file_extent_offset(leaf
, fi
,
1111 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1112 struct btrfs_file_extent_item
);
1113 btrfs_set_file_extent_generation(leaf
, fi
,
1115 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1117 btrfs_mark_buffer_dirty(leaf
);
1122 if (start
> key
.offset
&& end
== extent_end
) {
1125 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1126 ino
, bytenr
, orig_offset
,
1127 &other_start
, &other_end
)) {
1128 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1129 struct btrfs_file_extent_item
);
1130 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1131 start
- key
.offset
);
1132 btrfs_set_file_extent_generation(leaf
, fi
,
1135 new_key
.offset
= start
;
1136 btrfs_set_item_key_safe(root
, path
, &new_key
);
1138 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1139 struct btrfs_file_extent_item
);
1140 btrfs_set_file_extent_generation(leaf
, fi
,
1142 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1144 btrfs_set_file_extent_offset(leaf
, fi
,
1145 start
- orig_offset
);
1146 btrfs_mark_buffer_dirty(leaf
);
1151 while (start
> key
.offset
|| end
< extent_end
) {
1152 if (key
.offset
== start
)
1155 new_key
.offset
= split
;
1156 ret
= btrfs_duplicate_item(trans
, root
, path
, &new_key
);
1157 if (ret
== -EAGAIN
) {
1158 btrfs_release_path(path
);
1162 btrfs_abort_transaction(trans
, root
, ret
);
1166 leaf
= path
->nodes
[0];
1167 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1168 struct btrfs_file_extent_item
);
1169 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1170 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1171 split
- key
.offset
);
1173 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1174 struct btrfs_file_extent_item
);
1176 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1177 btrfs_set_file_extent_offset(leaf
, fi
, split
- orig_offset
);
1178 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1179 extent_end
- split
);
1180 btrfs_mark_buffer_dirty(leaf
);
1182 ret
= btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
, 0,
1183 root
->root_key
.objectid
,
1184 ino
, orig_offset
, 0);
1185 BUG_ON(ret
); /* -ENOMEM */
1187 if (split
== start
) {
1190 BUG_ON(start
!= key
.offset
);
1199 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1200 ino
, bytenr
, orig_offset
,
1201 &other_start
, &other_end
)) {
1203 btrfs_release_path(path
);
1206 extent_end
= other_end
;
1207 del_slot
= path
->slots
[0] + 1;
1209 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1210 0, root
->root_key
.objectid
,
1211 ino
, orig_offset
, 0);
1212 BUG_ON(ret
); /* -ENOMEM */
1216 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1217 ino
, bytenr
, orig_offset
,
1218 &other_start
, &other_end
)) {
1220 btrfs_release_path(path
);
1223 key
.offset
= other_start
;
1224 del_slot
= path
->slots
[0];
1226 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1227 0, root
->root_key
.objectid
,
1228 ino
, orig_offset
, 0);
1229 BUG_ON(ret
); /* -ENOMEM */
1232 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1233 struct btrfs_file_extent_item
);
1234 btrfs_set_file_extent_type(leaf
, fi
,
1235 BTRFS_FILE_EXTENT_REG
);
1236 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1237 btrfs_mark_buffer_dirty(leaf
);
1239 fi
= btrfs_item_ptr(leaf
, del_slot
- 1,
1240 struct btrfs_file_extent_item
);
1241 btrfs_set_file_extent_type(leaf
, fi
,
1242 BTRFS_FILE_EXTENT_REG
);
1243 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1244 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1245 extent_end
- key
.offset
);
1246 btrfs_mark_buffer_dirty(leaf
);
1248 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
1250 btrfs_abort_transaction(trans
, root
, ret
);
1255 btrfs_free_path(path
);
1260 * on error we return an unlocked page and the error value
1261 * on success we return a locked page and 0
1263 static int prepare_uptodate_page(struct page
*page
, u64 pos
,
1264 bool force_uptodate
)
1268 if (((pos
& (PAGE_CACHE_SIZE
- 1)) || force_uptodate
) &&
1269 !PageUptodate(page
)) {
1270 ret
= btrfs_readpage(NULL
, page
);
1274 if (!PageUptodate(page
)) {
1283 * this just gets pages into the page cache and locks them down.
1285 static noinline
int prepare_pages(struct inode
*inode
, struct page
**pages
,
1286 size_t num_pages
, loff_t pos
,
1287 size_t write_bytes
, bool force_uptodate
)
1290 unsigned long index
= pos
>> PAGE_CACHE_SHIFT
;
1291 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
1295 for (i
= 0; i
< num_pages
; i
++) {
1296 pages
[i
] = find_or_create_page(inode
->i_mapping
, index
+ i
,
1297 mask
| __GFP_WRITE
);
1305 err
= prepare_uptodate_page(pages
[i
], pos
,
1307 if (i
== num_pages
- 1)
1308 err
= prepare_uptodate_page(pages
[i
],
1309 pos
+ write_bytes
, false);
1311 page_cache_release(pages
[i
]);
1315 wait_on_page_writeback(pages
[i
]);
1320 while (faili
>= 0) {
1321 unlock_page(pages
[faili
]);
1322 page_cache_release(pages
[faili
]);
1330 * This function locks the extent and properly waits for data=ordered extents
1331 * to finish before allowing the pages to be modified if need.
1334 * 1 - the extent is locked
1335 * 0 - the extent is not locked, and everything is OK
1336 * -EAGAIN - need re-prepare the pages
1337 * the other < 0 number - Something wrong happens
1340 lock_and_cleanup_extent_if_need(struct inode
*inode
, struct page
**pages
,
1341 size_t num_pages
, loff_t pos
,
1342 u64
*lockstart
, u64
*lockend
,
1343 struct extent_state
**cached_state
)
1350 start_pos
= pos
& ~((u64
)PAGE_CACHE_SIZE
- 1);
1351 last_pos
= start_pos
+ ((u64
)num_pages
<< PAGE_CACHE_SHIFT
) - 1;
1353 if (start_pos
< inode
->i_size
) {
1354 struct btrfs_ordered_extent
*ordered
;
1355 lock_extent_bits(&BTRFS_I(inode
)->io_tree
,
1356 start_pos
, last_pos
, 0, cached_state
);
1357 ordered
= btrfs_lookup_first_ordered_extent(inode
, last_pos
);
1359 ordered
->file_offset
+ ordered
->len
> start_pos
&&
1360 ordered
->file_offset
<= last_pos
) {
1361 btrfs_put_ordered_extent(ordered
);
1362 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1363 start_pos
, last_pos
,
1364 cached_state
, GFP_NOFS
);
1365 for (i
= 0; i
< num_pages
; i
++) {
1366 unlock_page(pages
[i
]);
1367 page_cache_release(pages
[i
]);
1369 ret
= btrfs_wait_ordered_range(inode
, start_pos
,
1370 last_pos
- start_pos
+ 1);
1377 btrfs_put_ordered_extent(ordered
);
1379 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, start_pos
,
1380 last_pos
, EXTENT_DIRTY
| EXTENT_DELALLOC
|
1381 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
1382 0, 0, cached_state
, GFP_NOFS
);
1383 *lockstart
= start_pos
;
1384 *lockend
= last_pos
;
1388 for (i
= 0; i
< num_pages
; i
++) {
1389 if (clear_page_dirty_for_io(pages
[i
]))
1390 account_page_redirty(pages
[i
]);
1391 set_page_extent_mapped(pages
[i
]);
1392 WARN_ON(!PageLocked(pages
[i
]));
1398 static noinline
int check_can_nocow(struct inode
*inode
, loff_t pos
,
1399 size_t *write_bytes
)
1401 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1402 struct btrfs_ordered_extent
*ordered
;
1403 u64 lockstart
, lockend
;
1407 lockstart
= round_down(pos
, root
->sectorsize
);
1408 lockend
= lockstart
+ round_up(*write_bytes
, root
->sectorsize
) - 1;
1411 lock_extent(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
);
1412 ordered
= btrfs_lookup_ordered_range(inode
, lockstart
,
1413 lockend
- lockstart
+ 1);
1417 unlock_extent(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
);
1418 btrfs_start_ordered_extent(inode
, ordered
, 1);
1419 btrfs_put_ordered_extent(ordered
);
1422 num_bytes
= lockend
- lockstart
+ 1;
1423 ret
= can_nocow_extent(inode
, lockstart
, &num_bytes
, NULL
, NULL
, NULL
);
1427 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
1428 EXTENT_DIRTY
| EXTENT_DELALLOC
|
1429 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
, 0, 0,
1431 *write_bytes
= min_t(size_t, *write_bytes
, num_bytes
);
1434 unlock_extent(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
);
1439 static noinline ssize_t
__btrfs_buffered_write(struct file
*file
,
1443 struct inode
*inode
= file_inode(file
);
1444 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1445 struct page
**pages
= NULL
;
1446 struct extent_state
*cached_state
= NULL
;
1447 u64 release_bytes
= 0;
1450 unsigned long first_index
;
1451 size_t num_written
= 0;
1454 bool only_release_metadata
= false;
1455 bool force_page_uptodate
= false;
1458 nrptrs
= min((iov_iter_count(i
) + PAGE_CACHE_SIZE
- 1) /
1459 PAGE_CACHE_SIZE
, PAGE_CACHE_SIZE
/
1460 (sizeof(struct page
*)));
1461 nrptrs
= min(nrptrs
, current
->nr_dirtied_pause
- current
->nr_dirtied
);
1462 nrptrs
= max(nrptrs
, 8);
1463 pages
= kmalloc(nrptrs
* sizeof(struct page
*), GFP_KERNEL
);
1467 first_index
= pos
>> PAGE_CACHE_SHIFT
;
1469 while (iov_iter_count(i
) > 0) {
1470 size_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
1471 size_t write_bytes
= min(iov_iter_count(i
),
1472 nrptrs
* (size_t)PAGE_CACHE_SIZE
-
1474 size_t num_pages
= (write_bytes
+ offset
+
1475 PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
1476 size_t reserve_bytes
;
1480 WARN_ON(num_pages
> nrptrs
);
1483 * Fault pages before locking them in prepare_pages
1484 * to avoid recursive lock
1486 if (unlikely(iov_iter_fault_in_readable(i
, write_bytes
))) {
1491 reserve_bytes
= num_pages
<< PAGE_CACHE_SHIFT
;
1492 ret
= btrfs_check_data_free_space(inode
, reserve_bytes
);
1493 if (ret
== -ENOSPC
&&
1494 (BTRFS_I(inode
)->flags
& (BTRFS_INODE_NODATACOW
|
1495 BTRFS_INODE_PREALLOC
))) {
1496 ret
= check_can_nocow(inode
, pos
, &write_bytes
);
1498 only_release_metadata
= true;
1500 * our prealloc extent may be smaller than
1501 * write_bytes, so scale down.
1503 num_pages
= (write_bytes
+ offset
+
1504 PAGE_CACHE_SIZE
- 1) >>
1506 reserve_bytes
= num_pages
<< PAGE_CACHE_SHIFT
;
1516 ret
= btrfs_delalloc_reserve_metadata(inode
, reserve_bytes
);
1518 if (!only_release_metadata
)
1519 btrfs_free_reserved_data_space(inode
,
1524 release_bytes
= reserve_bytes
;
1525 need_unlock
= false;
1528 * This is going to setup the pages array with the number of
1529 * pages we want, so we don't really need to worry about the
1530 * contents of pages from loop to loop
1532 ret
= prepare_pages(inode
, pages
, num_pages
,
1534 force_page_uptodate
);
1538 ret
= lock_and_cleanup_extent_if_need(inode
, pages
, num_pages
,
1539 pos
, &lockstart
, &lockend
,
1545 } else if (ret
> 0) {
1550 copied
= btrfs_copy_from_user(pos
, num_pages
,
1551 write_bytes
, pages
, i
);
1554 * if we have trouble faulting in the pages, fall
1555 * back to one page at a time
1557 if (copied
< write_bytes
)
1561 force_page_uptodate
= true;
1564 force_page_uptodate
= false;
1565 dirty_pages
= (copied
+ offset
+
1566 PAGE_CACHE_SIZE
- 1) >>
1571 * If we had a short copy we need to release the excess delaloc
1572 * bytes we reserved. We need to increment outstanding_extents
1573 * because btrfs_delalloc_release_space will decrement it, but
1574 * we still have an outstanding extent for the chunk we actually
1577 if (num_pages
> dirty_pages
) {
1578 release_bytes
= (num_pages
- dirty_pages
) <<
1581 spin_lock(&BTRFS_I(inode
)->lock
);
1582 BTRFS_I(inode
)->outstanding_extents
++;
1583 spin_unlock(&BTRFS_I(inode
)->lock
);
1585 if (only_release_metadata
)
1586 btrfs_delalloc_release_metadata(inode
,
1589 btrfs_delalloc_release_space(inode
,
1593 release_bytes
= dirty_pages
<< PAGE_CACHE_SHIFT
;
1596 ret
= btrfs_dirty_pages(root
, inode
, pages
,
1597 dirty_pages
, pos
, copied
,
1600 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1601 lockstart
, lockend
, &cached_state
,
1604 btrfs_drop_pages(pages
, num_pages
);
1609 if (only_release_metadata
&& copied
> 0) {
1610 u64 lockstart
= round_down(pos
, root
->sectorsize
);
1611 u64 lockend
= lockstart
+
1612 (dirty_pages
<< PAGE_CACHE_SHIFT
) - 1;
1614 set_extent_bit(&BTRFS_I(inode
)->io_tree
, lockstart
,
1615 lockend
, EXTENT_NORESERVE
, NULL
,
1617 only_release_metadata
= false;
1620 btrfs_drop_pages(pages
, num_pages
);
1624 balance_dirty_pages_ratelimited(inode
->i_mapping
);
1625 if (dirty_pages
< (root
->leafsize
>> PAGE_CACHE_SHIFT
) + 1)
1626 btrfs_btree_balance_dirty(root
);
1629 num_written
+= copied
;
1634 if (release_bytes
) {
1635 if (only_release_metadata
)
1636 btrfs_delalloc_release_metadata(inode
, release_bytes
);
1638 btrfs_delalloc_release_space(inode
, release_bytes
);
1641 return num_written
? num_written
: ret
;
1644 static ssize_t
__btrfs_direct_write(struct kiocb
*iocb
,
1645 const struct iovec
*iov
,
1646 unsigned long nr_segs
, loff_t pos
,
1647 loff_t
*ppos
, size_t count
, size_t ocount
)
1649 struct file
*file
= iocb
->ki_filp
;
1652 ssize_t written_buffered
;
1656 written
= generic_file_direct_write(iocb
, iov
, &nr_segs
, pos
, ppos
,
1659 if (written
< 0 || written
== count
)
1664 iov_iter_init(&i
, iov
, nr_segs
, count
, written
);
1665 written_buffered
= __btrfs_buffered_write(file
, &i
, pos
);
1666 if (written_buffered
< 0) {
1667 err
= written_buffered
;
1670 endbyte
= pos
+ written_buffered
- 1;
1671 err
= filemap_write_and_wait_range(file
->f_mapping
, pos
, endbyte
);
1674 written
+= written_buffered
;
1675 *ppos
= pos
+ written_buffered
;
1676 invalidate_mapping_pages(file
->f_mapping
, pos
>> PAGE_CACHE_SHIFT
,
1677 endbyte
>> PAGE_CACHE_SHIFT
);
1679 return written
? written
: err
;
1682 static void update_time_for_write(struct inode
*inode
)
1684 struct timespec now
;
1686 if (IS_NOCMTIME(inode
))
1689 now
= current_fs_time(inode
->i_sb
);
1690 if (!timespec_equal(&inode
->i_mtime
, &now
))
1691 inode
->i_mtime
= now
;
1693 if (!timespec_equal(&inode
->i_ctime
, &now
))
1694 inode
->i_ctime
= now
;
1696 if (IS_I_VERSION(inode
))
1697 inode_inc_iversion(inode
);
1700 static ssize_t
btrfs_file_aio_write(struct kiocb
*iocb
,
1701 const struct iovec
*iov
,
1702 unsigned long nr_segs
, loff_t pos
)
1704 struct file
*file
= iocb
->ki_filp
;
1705 struct inode
*inode
= file_inode(file
);
1706 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1707 loff_t
*ppos
= &iocb
->ki_pos
;
1709 ssize_t num_written
= 0;
1711 size_t count
, ocount
;
1712 bool sync
= (file
->f_flags
& O_DSYNC
) || IS_SYNC(file
->f_mapping
->host
);
1714 mutex_lock(&inode
->i_mutex
);
1716 err
= generic_segment_checks(iov
, &nr_segs
, &ocount
, VERIFY_READ
);
1718 mutex_unlock(&inode
->i_mutex
);
1723 current
->backing_dev_info
= inode
->i_mapping
->backing_dev_info
;
1724 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
1726 mutex_unlock(&inode
->i_mutex
);
1731 mutex_unlock(&inode
->i_mutex
);
1735 err
= file_remove_suid(file
);
1737 mutex_unlock(&inode
->i_mutex
);
1742 * If BTRFS flips readonly due to some impossible error
1743 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1744 * although we have opened a file as writable, we have
1745 * to stop this write operation to ensure FS consistency.
1747 if (test_bit(BTRFS_FS_STATE_ERROR
, &root
->fs_info
->fs_state
)) {
1748 mutex_unlock(&inode
->i_mutex
);
1754 * We reserve space for updating the inode when we reserve space for the
1755 * extent we are going to write, so we will enospc out there. We don't
1756 * need to start yet another transaction to update the inode as we will
1757 * update the inode when we finish writing whatever data we write.
1759 update_time_for_write(inode
);
1761 start_pos
= round_down(pos
, root
->sectorsize
);
1762 if (start_pos
> i_size_read(inode
)) {
1763 err
= btrfs_cont_expand(inode
, i_size_read(inode
), start_pos
);
1765 mutex_unlock(&inode
->i_mutex
);
1771 atomic_inc(&BTRFS_I(inode
)->sync_writers
);
1773 if (unlikely(file
->f_flags
& O_DIRECT
)) {
1774 num_written
= __btrfs_direct_write(iocb
, iov
, nr_segs
,
1775 pos
, ppos
, count
, ocount
);
1779 iov_iter_init(&i
, iov
, nr_segs
, count
, num_written
);
1781 num_written
= __btrfs_buffered_write(file
, &i
, pos
);
1782 if (num_written
> 0)
1783 *ppos
= pos
+ num_written
;
1786 mutex_unlock(&inode
->i_mutex
);
1789 * we want to make sure fsync finds this change
1790 * but we haven't joined a transaction running right now.
1792 * Later on, someone is sure to update the inode and get the
1793 * real transid recorded.
1795 * We set last_trans now to the fs_info generation + 1,
1796 * this will either be one more than the running transaction
1797 * or the generation used for the next transaction if there isn't
1798 * one running right now.
1800 * We also have to set last_sub_trans to the current log transid,
1801 * otherwise subsequent syncs to a file that's been synced in this
1802 * transaction will appear to have already occured.
1804 BTRFS_I(inode
)->last_trans
= root
->fs_info
->generation
+ 1;
1805 BTRFS_I(inode
)->last_sub_trans
= root
->log_transid
;
1806 if (num_written
> 0) {
1807 err
= generic_write_sync(file
, pos
, num_written
);
1808 if (err
< 0 && num_written
> 0)
1813 atomic_dec(&BTRFS_I(inode
)->sync_writers
);
1815 current
->backing_dev_info
= NULL
;
1816 return num_written
? num_written
: err
;
1819 int btrfs_release_file(struct inode
*inode
, struct file
*filp
)
1822 * ordered_data_close is set by settattr when we are about to truncate
1823 * a file from a non-zero size to a zero size. This tries to
1824 * flush down new bytes that may have been written if the
1825 * application were using truncate to replace a file in place.
1827 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE
,
1828 &BTRFS_I(inode
)->runtime_flags
)) {
1829 struct btrfs_trans_handle
*trans
;
1830 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1833 * We need to block on a committing transaction to keep us from
1834 * throwing a ordered operation on to the list and causing
1835 * something like sync to deadlock trying to flush out this
1838 trans
= btrfs_start_transaction(root
, 0);
1840 return PTR_ERR(trans
);
1841 btrfs_add_ordered_operation(trans
, BTRFS_I(inode
)->root
, inode
);
1842 btrfs_end_transaction(trans
, root
);
1843 if (inode
->i_size
> BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT
)
1844 filemap_flush(inode
->i_mapping
);
1846 if (filp
->private_data
)
1847 btrfs_ioctl_trans_end(filp
);
1852 * fsync call for both files and directories. This logs the inode into
1853 * the tree log instead of forcing full commits whenever possible.
1855 * It needs to call filemap_fdatawait so that all ordered extent updates are
1856 * in the metadata btree are up to date for copying to the log.
1858 * It drops the inode mutex before doing the tree log commit. This is an
1859 * important optimization for directories because holding the mutex prevents
1860 * new operations on the dir while we write to disk.
1862 int btrfs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
1864 struct dentry
*dentry
= file
->f_path
.dentry
;
1865 struct inode
*inode
= dentry
->d_inode
;
1866 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1868 struct btrfs_trans_handle
*trans
;
1871 trace_btrfs_sync_file(file
, datasync
);
1874 * We write the dirty pages in the range and wait until they complete
1875 * out of the ->i_mutex. If so, we can flush the dirty pages by
1876 * multi-task, and make the performance up. See
1877 * btrfs_wait_ordered_range for an explanation of the ASYNC check.
1879 atomic_inc(&BTRFS_I(inode
)->sync_writers
);
1880 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
1881 if (!ret
&& test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
1882 &BTRFS_I(inode
)->runtime_flags
))
1883 ret
= filemap_fdatawrite_range(inode
->i_mapping
, start
, end
);
1884 atomic_dec(&BTRFS_I(inode
)->sync_writers
);
1888 mutex_lock(&inode
->i_mutex
);
1891 * We flush the dirty pages again to avoid some dirty pages in the
1894 atomic_inc(&root
->log_batch
);
1895 full_sync
= test_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
1896 &BTRFS_I(inode
)->runtime_flags
);
1898 ret
= btrfs_wait_ordered_range(inode
, start
, end
- start
+ 1);
1900 mutex_unlock(&inode
->i_mutex
);
1904 atomic_inc(&root
->log_batch
);
1907 * check the transaction that last modified this inode
1908 * and see if its already been committed
1910 if (!BTRFS_I(inode
)->last_trans
) {
1911 mutex_unlock(&inode
->i_mutex
);
1916 * if the last transaction that changed this file was before
1917 * the current transaction, we can bail out now without any
1921 if (btrfs_inode_in_log(inode
, root
->fs_info
->generation
) ||
1922 BTRFS_I(inode
)->last_trans
<=
1923 root
->fs_info
->last_trans_committed
) {
1924 BTRFS_I(inode
)->last_trans
= 0;
1927 * We'v had everything committed since the last time we were
1928 * modified so clear this flag in case it was set for whatever
1929 * reason, it's no longer relevant.
1931 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
1932 &BTRFS_I(inode
)->runtime_flags
);
1933 mutex_unlock(&inode
->i_mutex
);
1938 * ok we haven't committed the transaction yet, lets do a commit
1940 if (file
->private_data
)
1941 btrfs_ioctl_trans_end(file
);
1944 * We use start here because we will need to wait on the IO to complete
1945 * in btrfs_sync_log, which could require joining a transaction (for
1946 * example checking cross references in the nocow path). If we use join
1947 * here we could get into a situation where we're waiting on IO to
1948 * happen that is blocked on a transaction trying to commit. With start
1949 * we inc the extwriter counter, so we wait for all extwriters to exit
1950 * before we start blocking join'ers. This comment is to keep somebody
1951 * from thinking they are super smart and changing this to
1952 * btrfs_join_transaction *cough*Josef*cough*.
1954 trans
= btrfs_start_transaction(root
, 0);
1955 if (IS_ERR(trans
)) {
1956 ret
= PTR_ERR(trans
);
1957 mutex_unlock(&inode
->i_mutex
);
1962 ret
= btrfs_log_dentry_safe(trans
, root
, dentry
);
1964 /* Fallthrough and commit/free transaction. */
1968 /* we've logged all the items and now have a consistent
1969 * version of the file in the log. It is possible that
1970 * someone will come in and modify the file, but that's
1971 * fine because the log is consistent on disk, and we
1972 * have references to all of the file's extents
1974 * It is possible that someone will come in and log the
1975 * file again, but that will end up using the synchronization
1976 * inside btrfs_sync_log to keep things safe.
1978 mutex_unlock(&inode
->i_mutex
);
1980 if (ret
!= BTRFS_NO_LOG_SYNC
) {
1982 ret
= btrfs_sync_log(trans
, root
);
1984 ret
= btrfs_end_transaction(trans
, root
);
1989 ret
= btrfs_wait_ordered_range(inode
, start
,
1994 ret
= btrfs_commit_transaction(trans
, root
);
1996 ret
= btrfs_end_transaction(trans
, root
);
1999 return ret
> 0 ? -EIO
: ret
;
2002 static const struct vm_operations_struct btrfs_file_vm_ops
= {
2003 .fault
= filemap_fault
,
2004 .page_mkwrite
= btrfs_page_mkwrite
,
2005 .remap_pages
= generic_file_remap_pages
,
2008 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
2010 struct address_space
*mapping
= filp
->f_mapping
;
2012 if (!mapping
->a_ops
->readpage
)
2015 file_accessed(filp
);
2016 vma
->vm_ops
= &btrfs_file_vm_ops
;
2021 static int hole_mergeable(struct inode
*inode
, struct extent_buffer
*leaf
,
2022 int slot
, u64 start
, u64 end
)
2024 struct btrfs_file_extent_item
*fi
;
2025 struct btrfs_key key
;
2027 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
2030 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2031 if (key
.objectid
!= btrfs_ino(inode
) ||
2032 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2035 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
2037 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
)
2040 if (btrfs_file_extent_disk_bytenr(leaf
, fi
))
2043 if (key
.offset
== end
)
2045 if (key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
) == start
)
2050 static int fill_holes(struct btrfs_trans_handle
*trans
, struct inode
*inode
,
2051 struct btrfs_path
*path
, u64 offset
, u64 end
)
2053 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2054 struct extent_buffer
*leaf
;
2055 struct btrfs_file_extent_item
*fi
;
2056 struct extent_map
*hole_em
;
2057 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
2058 struct btrfs_key key
;
2061 if (btrfs_fs_incompat(root
->fs_info
, NO_HOLES
))
2064 key
.objectid
= btrfs_ino(inode
);
2065 key
.type
= BTRFS_EXTENT_DATA_KEY
;
2066 key
.offset
= offset
;
2068 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2073 leaf
= path
->nodes
[0];
2074 if (hole_mergeable(inode
, leaf
, path
->slots
[0]-1, offset
, end
)) {
2078 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2079 struct btrfs_file_extent_item
);
2080 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) +
2082 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
2083 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
2084 btrfs_set_file_extent_offset(leaf
, fi
, 0);
2085 btrfs_mark_buffer_dirty(leaf
);
2089 if (hole_mergeable(inode
, leaf
, path
->slots
[0]+1, offset
, end
)) {
2093 key
.offset
= offset
;
2094 btrfs_set_item_key_safe(root
, path
, &key
);
2095 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2096 struct btrfs_file_extent_item
);
2097 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) + end
-
2099 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
2100 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
2101 btrfs_set_file_extent_offset(leaf
, fi
, 0);
2102 btrfs_mark_buffer_dirty(leaf
);
2105 btrfs_release_path(path
);
2107 ret
= btrfs_insert_file_extent(trans
, root
, btrfs_ino(inode
), offset
,
2108 0, 0, end
- offset
, 0, end
- offset
,
2114 btrfs_release_path(path
);
2116 hole_em
= alloc_extent_map();
2118 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
2119 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2120 &BTRFS_I(inode
)->runtime_flags
);
2122 hole_em
->start
= offset
;
2123 hole_em
->len
= end
- offset
;
2124 hole_em
->ram_bytes
= hole_em
->len
;
2125 hole_em
->orig_start
= offset
;
2127 hole_em
->block_start
= EXTENT_MAP_HOLE
;
2128 hole_em
->block_len
= 0;
2129 hole_em
->orig_block_len
= 0;
2130 hole_em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
2131 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
2132 hole_em
->generation
= trans
->transid
;
2135 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
2136 write_lock(&em_tree
->lock
);
2137 ret
= add_extent_mapping(em_tree
, hole_em
, 1);
2138 write_unlock(&em_tree
->lock
);
2139 } while (ret
== -EEXIST
);
2140 free_extent_map(hole_em
);
2142 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
2143 &BTRFS_I(inode
)->runtime_flags
);
2149 static int btrfs_punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
2151 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2152 struct extent_state
*cached_state
= NULL
;
2153 struct btrfs_path
*path
;
2154 struct btrfs_block_rsv
*rsv
;
2155 struct btrfs_trans_handle
*trans
;
2156 u64 lockstart
= round_up(offset
, BTRFS_I(inode
)->root
->sectorsize
);
2157 u64 lockend
= round_down(offset
+ len
,
2158 BTRFS_I(inode
)->root
->sectorsize
) - 1;
2159 u64 cur_offset
= lockstart
;
2160 u64 min_size
= btrfs_calc_trunc_metadata_size(root
, 1);
2165 bool same_page
= ((offset
>> PAGE_CACHE_SHIFT
) ==
2166 ((offset
+ len
- 1) >> PAGE_CACHE_SHIFT
));
2167 bool no_holes
= btrfs_fs_incompat(root
->fs_info
, NO_HOLES
);
2169 ret
= btrfs_wait_ordered_range(inode
, offset
, len
);
2173 mutex_lock(&inode
->i_mutex
);
2175 * We needn't truncate any page which is beyond the end of the file
2176 * because we are sure there is no data there.
2179 * Only do this if we are in the same page and we aren't doing the
2182 if (same_page
&& len
< PAGE_CACHE_SIZE
) {
2183 if (offset
< round_up(inode
->i_size
, PAGE_CACHE_SIZE
))
2184 ret
= btrfs_truncate_page(inode
, offset
, len
, 0);
2185 mutex_unlock(&inode
->i_mutex
);
2189 /* zero back part of the first page */
2190 if (offset
< round_up(inode
->i_size
, PAGE_CACHE_SIZE
)) {
2191 ret
= btrfs_truncate_page(inode
, offset
, 0, 0);
2193 mutex_unlock(&inode
->i_mutex
);
2198 /* zero the front end of the last page */
2199 if (offset
+ len
< round_up(inode
->i_size
, PAGE_CACHE_SIZE
)) {
2200 ret
= btrfs_truncate_page(inode
, offset
+ len
, 0, 1);
2202 mutex_unlock(&inode
->i_mutex
);
2207 if (lockend
< lockstart
) {
2208 mutex_unlock(&inode
->i_mutex
);
2213 struct btrfs_ordered_extent
*ordered
;
2215 truncate_pagecache_range(inode
, lockstart
, lockend
);
2217 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2219 ordered
= btrfs_lookup_first_ordered_extent(inode
, lockend
);
2222 * We need to make sure we have no ordered extents in this range
2223 * and nobody raced in and read a page in this range, if we did
2224 * we need to try again.
2227 (ordered
->file_offset
+ ordered
->len
<= lockstart
||
2228 ordered
->file_offset
> lockend
)) &&
2229 !test_range_bit(&BTRFS_I(inode
)->io_tree
, lockstart
,
2230 lockend
, EXTENT_UPTODATE
, 0,
2233 btrfs_put_ordered_extent(ordered
);
2237 btrfs_put_ordered_extent(ordered
);
2238 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
,
2239 lockend
, &cached_state
, GFP_NOFS
);
2240 ret
= btrfs_wait_ordered_range(inode
, lockstart
,
2241 lockend
- lockstart
+ 1);
2243 mutex_unlock(&inode
->i_mutex
);
2248 path
= btrfs_alloc_path();
2254 rsv
= btrfs_alloc_block_rsv(root
, BTRFS_BLOCK_RSV_TEMP
);
2259 rsv
->size
= btrfs_calc_trunc_metadata_size(root
, 1);
2263 * 1 - update the inode
2264 * 1 - removing the extents in the range
2265 * 1 - adding the hole extent if no_holes isn't set
2267 rsv_count
= no_holes
? 2 : 3;
2268 trans
= btrfs_start_transaction(root
, rsv_count
);
2269 if (IS_ERR(trans
)) {
2270 err
= PTR_ERR(trans
);
2274 ret
= btrfs_block_rsv_migrate(&root
->fs_info
->trans_block_rsv
, rsv
,
2277 trans
->block_rsv
= rsv
;
2279 while (cur_offset
< lockend
) {
2280 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
,
2281 cur_offset
, lockend
+ 1,
2282 &drop_end
, 1, 0, 0, NULL
);
2286 trans
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
2288 ret
= fill_holes(trans
, inode
, path
, cur_offset
, drop_end
);
2294 cur_offset
= drop_end
;
2296 ret
= btrfs_update_inode(trans
, root
, inode
);
2302 btrfs_end_transaction(trans
, root
);
2303 btrfs_btree_balance_dirty(root
);
2305 trans
= btrfs_start_transaction(root
, rsv_count
);
2306 if (IS_ERR(trans
)) {
2307 ret
= PTR_ERR(trans
);
2312 ret
= btrfs_block_rsv_migrate(&root
->fs_info
->trans_block_rsv
,
2314 BUG_ON(ret
); /* shouldn't happen */
2315 trans
->block_rsv
= rsv
;
2323 trans
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
2324 ret
= fill_holes(trans
, inode
, path
, cur_offset
, drop_end
);
2334 inode_inc_iversion(inode
);
2335 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
2337 trans
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
2338 ret
= btrfs_update_inode(trans
, root
, inode
);
2339 btrfs_end_transaction(trans
, root
);
2340 btrfs_btree_balance_dirty(root
);
2342 btrfs_free_path(path
);
2343 btrfs_free_block_rsv(root
, rsv
);
2345 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2346 &cached_state
, GFP_NOFS
);
2347 mutex_unlock(&inode
->i_mutex
);
2353 static long btrfs_fallocate(struct file
*file
, int mode
,
2354 loff_t offset
, loff_t len
)
2356 struct inode
*inode
= file_inode(file
);
2357 struct extent_state
*cached_state
= NULL
;
2358 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2365 struct extent_map
*em
;
2366 int blocksize
= BTRFS_I(inode
)->root
->sectorsize
;
2369 alloc_start
= round_down(offset
, blocksize
);
2370 alloc_end
= round_up(offset
+ len
, blocksize
);
2372 /* Make sure we aren't being give some crap mode */
2373 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
2376 if (mode
& FALLOC_FL_PUNCH_HOLE
)
2377 return btrfs_punch_hole(inode
, offset
, len
);
2380 * Make sure we have enough space before we do the
2383 ret
= btrfs_check_data_free_space(inode
, alloc_end
- alloc_start
);
2386 if (root
->fs_info
->quota_enabled
) {
2387 ret
= btrfs_qgroup_reserve(root
, alloc_end
- alloc_start
);
2389 goto out_reserve_fail
;
2392 mutex_lock(&inode
->i_mutex
);
2393 ret
= inode_newsize_ok(inode
, alloc_end
);
2397 if (alloc_start
> inode
->i_size
) {
2398 ret
= btrfs_cont_expand(inode
, i_size_read(inode
),
2404 * If we are fallocating from the end of the file onward we
2405 * need to zero out the end of the page if i_size lands in the
2408 ret
= btrfs_truncate_page(inode
, inode
->i_size
, 0, 0);
2414 * wait for ordered IO before we have any locks. We'll loop again
2415 * below with the locks held.
2417 ret
= btrfs_wait_ordered_range(inode
, alloc_start
,
2418 alloc_end
- alloc_start
);
2422 locked_end
= alloc_end
- 1;
2424 struct btrfs_ordered_extent
*ordered
;
2426 /* the extent lock is ordered inside the running
2429 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, alloc_start
,
2430 locked_end
, 0, &cached_state
);
2431 ordered
= btrfs_lookup_first_ordered_extent(inode
,
2434 ordered
->file_offset
+ ordered
->len
> alloc_start
&&
2435 ordered
->file_offset
< alloc_end
) {
2436 btrfs_put_ordered_extent(ordered
);
2437 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
2438 alloc_start
, locked_end
,
2439 &cached_state
, GFP_NOFS
);
2441 * we can't wait on the range with the transaction
2442 * running or with the extent lock held
2444 ret
= btrfs_wait_ordered_range(inode
, alloc_start
,
2445 alloc_end
- alloc_start
);
2450 btrfs_put_ordered_extent(ordered
);
2455 cur_offset
= alloc_start
;
2459 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
2460 alloc_end
- cur_offset
, 0);
2461 if (IS_ERR_OR_NULL(em
)) {
2468 last_byte
= min(extent_map_end(em
), alloc_end
);
2469 actual_end
= min_t(u64
, extent_map_end(em
), offset
+ len
);
2470 last_byte
= ALIGN(last_byte
, blocksize
);
2472 if (em
->block_start
== EXTENT_MAP_HOLE
||
2473 (cur_offset
>= inode
->i_size
&&
2474 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))) {
2475 ret
= btrfs_prealloc_file_range(inode
, mode
, cur_offset
,
2476 last_byte
- cur_offset
,
2477 1 << inode
->i_blkbits
,
2482 free_extent_map(em
);
2485 } else if (actual_end
> inode
->i_size
&&
2486 !(mode
& FALLOC_FL_KEEP_SIZE
)) {
2488 * We didn't need to allocate any more space, but we
2489 * still extended the size of the file so we need to
2492 inode
->i_ctime
= CURRENT_TIME
;
2493 i_size_write(inode
, actual_end
);
2494 btrfs_ordered_update_i_size(inode
, actual_end
, NULL
);
2496 free_extent_map(em
);
2498 cur_offset
= last_byte
;
2499 if (cur_offset
>= alloc_end
) {
2504 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, alloc_start
, locked_end
,
2505 &cached_state
, GFP_NOFS
);
2507 mutex_unlock(&inode
->i_mutex
);
2508 if (root
->fs_info
->quota_enabled
)
2509 btrfs_qgroup_free(root
, alloc_end
- alloc_start
);
2511 /* Let go of our reservation. */
2512 btrfs_free_reserved_data_space(inode
, alloc_end
- alloc_start
);
2516 static int find_desired_extent(struct inode
*inode
, loff_t
*offset
, int whence
)
2518 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2519 struct extent_map
*em
= NULL
;
2520 struct extent_state
*cached_state
= NULL
;
2521 u64 lockstart
= *offset
;
2522 u64 lockend
= i_size_read(inode
);
2523 u64 start
= *offset
;
2524 u64 len
= i_size_read(inode
);
2527 lockend
= max_t(u64
, root
->sectorsize
, lockend
);
2528 if (lockend
<= lockstart
)
2529 lockend
= lockstart
+ root
->sectorsize
;
2532 len
= lockend
- lockstart
+ 1;
2534 len
= max_t(u64
, len
, root
->sectorsize
);
2535 if (inode
->i_size
== 0)
2538 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
, 0,
2541 while (start
< inode
->i_size
) {
2542 em
= btrfs_get_extent_fiemap(inode
, NULL
, 0, start
, len
, 0);
2549 if (whence
== SEEK_HOLE
&&
2550 (em
->block_start
== EXTENT_MAP_HOLE
||
2551 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)))
2553 else if (whence
== SEEK_DATA
&&
2554 (em
->block_start
!= EXTENT_MAP_HOLE
&&
2555 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)))
2558 start
= em
->start
+ em
->len
;
2559 free_extent_map(em
);
2563 free_extent_map(em
);
2565 if (whence
== SEEK_DATA
&& start
>= inode
->i_size
)
2568 *offset
= min_t(loff_t
, start
, inode
->i_size
);
2570 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2571 &cached_state
, GFP_NOFS
);
2575 static loff_t
btrfs_file_llseek(struct file
*file
, loff_t offset
, int whence
)
2577 struct inode
*inode
= file
->f_mapping
->host
;
2580 mutex_lock(&inode
->i_mutex
);
2584 offset
= generic_file_llseek(file
, offset
, whence
);
2588 if (offset
>= i_size_read(inode
)) {
2589 mutex_unlock(&inode
->i_mutex
);
2593 ret
= find_desired_extent(inode
, &offset
, whence
);
2595 mutex_unlock(&inode
->i_mutex
);
2600 offset
= vfs_setpos(file
, offset
, inode
->i_sb
->s_maxbytes
);
2602 mutex_unlock(&inode
->i_mutex
);
2606 const struct file_operations btrfs_file_operations
= {
2607 .llseek
= btrfs_file_llseek
,
2608 .read
= do_sync_read
,
2609 .write
= do_sync_write
,
2610 .aio_read
= generic_file_aio_read
,
2611 .splice_read
= generic_file_splice_read
,
2612 .aio_write
= btrfs_file_aio_write
,
2613 .mmap
= btrfs_file_mmap
,
2614 .open
= generic_file_open
,
2615 .release
= btrfs_release_file
,
2616 .fsync
= btrfs_sync_file
,
2617 .fallocate
= btrfs_fallocate
,
2618 .unlocked_ioctl
= btrfs_ioctl
,
2619 #ifdef CONFIG_COMPAT
2620 .compat_ioctl
= btrfs_ioctl
,
2624 void btrfs_auto_defrag_exit(void)
2626 if (btrfs_inode_defrag_cachep
)
2627 kmem_cache_destroy(btrfs_inode_defrag_cachep
);
2630 int btrfs_auto_defrag_init(void)
2632 btrfs_inode_defrag_cachep
= kmem_cache_create("btrfs_inode_defrag",
2633 sizeof(struct inode_defrag
), 0,
2634 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
,
2636 if (!btrfs_inode_defrag_cachep
)