2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
44 * when auto defrag is enabled we
45 * queue up these defrag structs to remember which
46 * inodes need defragging passes
49 struct rb_node rb_node
;
53 * transid where the defrag was added, we search for
54 * extents newer than this
61 /* last offset we were able to defrag */
64 /* if we've wrapped around back to zero once already */
68 static int __compare_inode_defrag(struct inode_defrag
*defrag1
,
69 struct inode_defrag
*defrag2
)
71 if (defrag1
->root
> defrag2
->root
)
73 else if (defrag1
->root
< defrag2
->root
)
75 else if (defrag1
->ino
> defrag2
->ino
)
77 else if (defrag1
->ino
< defrag2
->ino
)
83 /* pop a record for an inode into the defrag tree. The lock
84 * must be held already
86 * If you're inserting a record for an older transid than an
87 * existing record, the transid already in the tree is lowered
89 * If an existing record is found the defrag item you
92 static void __btrfs_add_inode_defrag(struct inode
*inode
,
93 struct inode_defrag
*defrag
)
95 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
96 struct inode_defrag
*entry
;
98 struct rb_node
*parent
= NULL
;
101 p
= &root
->fs_info
->defrag_inodes
.rb_node
;
104 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
106 ret
= __compare_inode_defrag(defrag
, entry
);
108 p
= &parent
->rb_left
;
110 p
= &parent
->rb_right
;
112 /* if we're reinserting an entry for
113 * an old defrag run, make sure to
114 * lower the transid of our existing record
116 if (defrag
->transid
< entry
->transid
)
117 entry
->transid
= defrag
->transid
;
118 if (defrag
->last_offset
> entry
->last_offset
)
119 entry
->last_offset
= defrag
->last_offset
;
123 set_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
124 rb_link_node(&defrag
->rb_node
, parent
, p
);
125 rb_insert_color(&defrag
->rb_node
, &root
->fs_info
->defrag_inodes
);
135 * insert a defrag record for this inode if auto defrag is
138 int btrfs_add_inode_defrag(struct btrfs_trans_handle
*trans
,
141 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
142 struct inode_defrag
*defrag
;
145 if (!btrfs_test_opt(root
, AUTO_DEFRAG
))
148 if (btrfs_fs_closing(root
->fs_info
))
151 if (test_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
))
155 transid
= trans
->transid
;
157 transid
= BTRFS_I(inode
)->root
->last_trans
;
159 defrag
= kzalloc(sizeof(*defrag
), GFP_NOFS
);
163 defrag
->ino
= btrfs_ino(inode
);
164 defrag
->transid
= transid
;
165 defrag
->root
= root
->root_key
.objectid
;
167 spin_lock(&root
->fs_info
->defrag_inodes_lock
);
168 if (!test_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
))
169 __btrfs_add_inode_defrag(inode
, defrag
);
172 spin_unlock(&root
->fs_info
->defrag_inodes_lock
);
177 * must be called with the defrag_inodes lock held
179 struct inode_defrag
*btrfs_find_defrag_inode(struct btrfs_fs_info
*info
,
181 struct rb_node
**next
)
183 struct inode_defrag
*entry
= NULL
;
184 struct inode_defrag tmp
;
186 struct rb_node
*parent
= NULL
;
192 p
= info
->defrag_inodes
.rb_node
;
195 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
197 ret
= __compare_inode_defrag(&tmp
, entry
);
201 p
= parent
->rb_right
;
207 while (parent
&& __compare_inode_defrag(&tmp
, entry
) > 0) {
208 parent
= rb_next(parent
);
209 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
217 * run through the list of inodes in the FS that need
220 int btrfs_run_defrag_inodes(struct btrfs_fs_info
*fs_info
)
222 struct inode_defrag
*defrag
;
223 struct btrfs_root
*inode_root
;
226 struct btrfs_key key
;
227 struct btrfs_ioctl_defrag_range_args range
;
229 u64 root_objectid
= 0;
231 int defrag_batch
= 1024;
233 memset(&range
, 0, sizeof(range
));
236 atomic_inc(&fs_info
->defrag_running
);
237 spin_lock(&fs_info
->defrag_inodes_lock
);
241 /* find an inode to defrag */
242 defrag
= btrfs_find_defrag_inode(fs_info
, root_objectid
,
246 defrag
= rb_entry(n
, struct inode_defrag
,
248 } else if (root_objectid
|| first_ino
) {
257 /* remove it from the rbtree */
258 first_ino
= defrag
->ino
+ 1;
259 root_objectid
= defrag
->root
;
260 rb_erase(&defrag
->rb_node
, &fs_info
->defrag_inodes
);
262 if (btrfs_fs_closing(fs_info
))
265 spin_unlock(&fs_info
->defrag_inodes_lock
);
268 key
.objectid
= defrag
->root
;
269 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
270 key
.offset
= (u64
)-1;
271 inode_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
272 if (IS_ERR(inode_root
))
275 key
.objectid
= defrag
->ino
;
276 btrfs_set_key_type(&key
, BTRFS_INODE_ITEM_KEY
);
279 inode
= btrfs_iget(fs_info
->sb
, &key
, inode_root
, NULL
);
283 /* do a chunk of defrag */
284 clear_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
285 range
.start
= defrag
->last_offset
;
286 num_defrag
= btrfs_defrag_file(inode
, NULL
, &range
, defrag
->transid
,
289 * if we filled the whole defrag batch, there
290 * must be more work to do. Queue this defrag
293 if (num_defrag
== defrag_batch
) {
294 defrag
->last_offset
= range
.start
;
295 __btrfs_add_inode_defrag(inode
, defrag
);
297 * we don't want to kfree defrag, we added it back to
301 } else if (defrag
->last_offset
&& !defrag
->cycled
) {
303 * we didn't fill our defrag batch, but
304 * we didn't start at zero. Make sure we loop
305 * around to the start of the file.
307 defrag
->last_offset
= 0;
309 __btrfs_add_inode_defrag(inode
, defrag
);
315 spin_lock(&fs_info
->defrag_inodes_lock
);
319 spin_unlock(&fs_info
->defrag_inodes_lock
);
321 atomic_dec(&fs_info
->defrag_running
);
324 * during unmount, we use the transaction_wait queue to
325 * wait for the defragger to stop
327 wake_up(&fs_info
->transaction_wait
);
331 /* simple helper to fault in pages and copy. This should go away
332 * and be replaced with calls into generic code.
334 static noinline
int btrfs_copy_from_user(loff_t pos
, int num_pages
,
336 struct page
**prepared_pages
,
340 size_t total_copied
= 0;
342 int offset
= pos
& (PAGE_CACHE_SIZE
- 1);
344 while (write_bytes
> 0) {
345 size_t count
= min_t(size_t,
346 PAGE_CACHE_SIZE
- offset
, write_bytes
);
347 struct page
*page
= prepared_pages
[pg
];
349 * Copy data from userspace to the current page
351 * Disable pagefault to avoid recursive lock since
352 * the pages are already locked
355 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, count
);
358 /* Flush processor's dcache for this page */
359 flush_dcache_page(page
);
362 * if we get a partial write, we can end up with
363 * partially up to date pages. These add
364 * a lot of complexity, so make sure they don't
365 * happen by forcing this copy to be retried.
367 * The rest of the btrfs_file_write code will fall
368 * back to page at a time copies after we return 0.
370 if (!PageUptodate(page
) && copied
< count
)
373 iov_iter_advance(i
, copied
);
374 write_bytes
-= copied
;
375 total_copied
+= copied
;
377 /* Return to btrfs_file_aio_write to fault page */
378 if (unlikely(copied
== 0))
381 if (unlikely(copied
< PAGE_CACHE_SIZE
- offset
)) {
392 * unlocks pages after btrfs_file_write is done with them
394 void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
397 for (i
= 0; i
< num_pages
; i
++) {
398 /* page checked is some magic around finding pages that
399 * have been modified without going through btrfs_set_page_dirty
402 ClearPageChecked(pages
[i
]);
403 unlock_page(pages
[i
]);
404 mark_page_accessed(pages
[i
]);
405 page_cache_release(pages
[i
]);
410 * after copy_from_user, pages need to be dirtied and we need to make
411 * sure holes are created between the current EOF and the start of
412 * any next extents (if required).
414 * this also makes the decision about creating an inline extent vs
415 * doing real data extents, marking pages dirty and delalloc as required.
417 int btrfs_dirty_pages(struct btrfs_root
*root
, struct inode
*inode
,
418 struct page
**pages
, size_t num_pages
,
419 loff_t pos
, size_t write_bytes
,
420 struct extent_state
**cached
)
426 u64 end_of_last_block
;
427 u64 end_pos
= pos
+ write_bytes
;
428 loff_t isize
= i_size_read(inode
);
430 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
431 num_bytes
= (write_bytes
+ pos
- start_pos
+
432 root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
434 end_of_last_block
= start_pos
+ num_bytes
- 1;
435 err
= btrfs_set_extent_delalloc(inode
, start_pos
, end_of_last_block
,
440 for (i
= 0; i
< num_pages
; i
++) {
441 struct page
*p
= pages
[i
];
448 * we've only changed i_size in ram, and we haven't updated
449 * the disk i_size. There is no need to log the inode
453 i_size_write(inode
, end_pos
);
458 * this drops all the extents in the cache that intersect the range
459 * [start, end]. Existing extents are split as required.
461 int btrfs_drop_extent_cache(struct inode
*inode
, u64 start
, u64 end
,
464 struct extent_map
*em
;
465 struct extent_map
*split
= NULL
;
466 struct extent_map
*split2
= NULL
;
467 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
468 u64 len
= end
- start
+ 1;
474 WARN_ON(end
< start
);
475 if (end
== (u64
)-1) {
481 split
= alloc_extent_map();
483 split2
= alloc_extent_map();
484 BUG_ON(!split
|| !split2
); /* -ENOMEM */
486 write_lock(&em_tree
->lock
);
487 em
= lookup_extent_mapping(em_tree
, start
, len
);
489 write_unlock(&em_tree
->lock
);
493 if (skip_pinned
&& test_bit(EXTENT_FLAG_PINNED
, &em
->flags
)) {
494 if (testend
&& em
->start
+ em
->len
>= start
+ len
) {
496 write_unlock(&em_tree
->lock
);
499 start
= em
->start
+ em
->len
;
501 len
= start
+ len
- (em
->start
+ em
->len
);
503 write_unlock(&em_tree
->lock
);
506 compressed
= test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
507 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
508 remove_extent_mapping(em_tree
, em
);
510 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
512 split
->start
= em
->start
;
513 split
->len
= start
- em
->start
;
514 split
->orig_start
= em
->orig_start
;
515 split
->block_start
= em
->block_start
;
518 split
->block_len
= em
->block_len
;
520 split
->block_len
= split
->len
;
522 split
->bdev
= em
->bdev
;
523 split
->flags
= flags
;
524 split
->compress_type
= em
->compress_type
;
525 ret
= add_extent_mapping(em_tree
, split
);
526 BUG_ON(ret
); /* Logic error */
527 free_extent_map(split
);
531 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
532 testend
&& em
->start
+ em
->len
> start
+ len
) {
533 u64 diff
= start
+ len
- em
->start
;
535 split
->start
= start
+ len
;
536 split
->len
= em
->start
+ em
->len
- (start
+ len
);
537 split
->bdev
= em
->bdev
;
538 split
->flags
= flags
;
539 split
->compress_type
= em
->compress_type
;
542 split
->block_len
= em
->block_len
;
543 split
->block_start
= em
->block_start
;
544 split
->orig_start
= em
->orig_start
;
546 split
->block_len
= split
->len
;
547 split
->block_start
= em
->block_start
+ diff
;
548 split
->orig_start
= split
->start
;
551 ret
= add_extent_mapping(em_tree
, split
);
552 BUG_ON(ret
); /* Logic error */
553 free_extent_map(split
);
556 write_unlock(&em_tree
->lock
);
560 /* once for the tree*/
564 free_extent_map(split
);
566 free_extent_map(split2
);
571 * this is very complex, but the basic idea is to drop all extents
572 * in the range start - end. hint_block is filled in with a block number
573 * that would be a good hint to the block allocator for this file.
575 * If an extent intersects the range but is not entirely inside the range
576 * it is either truncated or split. Anything entirely inside the range
577 * is deleted from the tree.
579 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
, struct inode
*inode
,
580 u64 start
, u64 end
, u64
*hint_byte
, int drop_cache
)
582 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
583 struct extent_buffer
*leaf
;
584 struct btrfs_file_extent_item
*fi
;
585 struct btrfs_path
*path
;
586 struct btrfs_key key
;
587 struct btrfs_key new_key
;
588 u64 ino
= btrfs_ino(inode
);
589 u64 search_start
= start
;
592 u64 extent_offset
= 0;
599 int modify_tree
= -1;
602 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
604 path
= btrfs_alloc_path();
608 if (start
>= BTRFS_I(inode
)->disk_i_size
)
613 ret
= btrfs_lookup_file_extent(trans
, root
, path
, ino
,
614 search_start
, modify_tree
);
617 if (ret
> 0 && path
->slots
[0] > 0 && search_start
== start
) {
618 leaf
= path
->nodes
[0];
619 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
620 if (key
.objectid
== ino
&&
621 key
.type
== BTRFS_EXTENT_DATA_KEY
)
626 leaf
= path
->nodes
[0];
627 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
629 ret
= btrfs_next_leaf(root
, path
);
636 leaf
= path
->nodes
[0];
640 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
641 if (key
.objectid
> ino
||
642 key
.type
> BTRFS_EXTENT_DATA_KEY
|| key
.offset
>= end
)
645 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
646 struct btrfs_file_extent_item
);
647 extent_type
= btrfs_file_extent_type(leaf
, fi
);
649 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
650 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
651 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
652 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
653 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
654 extent_end
= key
.offset
+
655 btrfs_file_extent_num_bytes(leaf
, fi
);
656 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
657 extent_end
= key
.offset
+
658 btrfs_file_extent_inline_len(leaf
, fi
);
661 extent_end
= search_start
;
664 if (extent_end
<= search_start
) {
669 search_start
= max(key
.offset
, start
);
670 if (recow
|| !modify_tree
) {
672 btrfs_release_path(path
);
677 * | - range to drop - |
678 * | -------- extent -------- |
680 if (start
> key
.offset
&& end
< extent_end
) {
682 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
684 memcpy(&new_key
, &key
, sizeof(new_key
));
685 new_key
.offset
= start
;
686 ret
= btrfs_duplicate_item(trans
, root
, path
,
688 if (ret
== -EAGAIN
) {
689 btrfs_release_path(path
);
695 leaf
= path
->nodes
[0];
696 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
697 struct btrfs_file_extent_item
);
698 btrfs_set_file_extent_num_bytes(leaf
, fi
,
701 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
702 struct btrfs_file_extent_item
);
704 extent_offset
+= start
- key
.offset
;
705 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
706 btrfs_set_file_extent_num_bytes(leaf
, fi
,
708 btrfs_mark_buffer_dirty(leaf
);
710 if (disk_bytenr
> 0) {
711 ret
= btrfs_inc_extent_ref(trans
, root
,
712 disk_bytenr
, num_bytes
, 0,
713 root
->root_key
.objectid
,
715 start
- extent_offset
, 0);
716 BUG_ON(ret
); /* -ENOMEM */
717 *hint_byte
= disk_bytenr
;
722 * | ---- range to drop ----- |
723 * | -------- extent -------- |
725 if (start
<= key
.offset
&& end
< extent_end
) {
726 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
728 memcpy(&new_key
, &key
, sizeof(new_key
));
729 new_key
.offset
= end
;
730 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
732 extent_offset
+= end
- key
.offset
;
733 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
734 btrfs_set_file_extent_num_bytes(leaf
, fi
,
736 btrfs_mark_buffer_dirty(leaf
);
737 if (disk_bytenr
> 0) {
738 inode_sub_bytes(inode
, end
- key
.offset
);
739 *hint_byte
= disk_bytenr
;
744 search_start
= extent_end
;
746 * | ---- range to drop ----- |
747 * | -------- extent -------- |
749 if (start
> key
.offset
&& end
>= extent_end
) {
751 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
753 btrfs_set_file_extent_num_bytes(leaf
, fi
,
755 btrfs_mark_buffer_dirty(leaf
);
756 if (disk_bytenr
> 0) {
757 inode_sub_bytes(inode
, extent_end
- start
);
758 *hint_byte
= disk_bytenr
;
760 if (end
== extent_end
)
768 * | ---- range to drop ----- |
769 * | ------ extent ------ |
771 if (start
<= key
.offset
&& end
>= extent_end
) {
773 del_slot
= path
->slots
[0];
776 BUG_ON(del_slot
+ del_nr
!= path
->slots
[0]);
780 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
781 inode_sub_bytes(inode
,
782 extent_end
- key
.offset
);
783 extent_end
= ALIGN(extent_end
,
785 } else if (disk_bytenr
> 0) {
786 ret
= btrfs_free_extent(trans
, root
,
787 disk_bytenr
, num_bytes
, 0,
788 root
->root_key
.objectid
,
789 key
.objectid
, key
.offset
-
791 BUG_ON(ret
); /* -ENOMEM */
792 inode_sub_bytes(inode
,
793 extent_end
- key
.offset
);
794 *hint_byte
= disk_bytenr
;
797 if (end
== extent_end
)
800 if (path
->slots
[0] + 1 < btrfs_header_nritems(leaf
)) {
805 ret
= btrfs_del_items(trans
, root
, path
, del_slot
,
808 btrfs_abort_transaction(trans
, root
, ret
);
815 btrfs_release_path(path
);
822 if (!ret
&& del_nr
> 0) {
823 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
825 btrfs_abort_transaction(trans
, root
, ret
);
829 btrfs_free_path(path
);
833 static int extent_mergeable(struct extent_buffer
*leaf
, int slot
,
834 u64 objectid
, u64 bytenr
, u64 orig_offset
,
835 u64
*start
, u64
*end
)
837 struct btrfs_file_extent_item
*fi
;
838 struct btrfs_key key
;
841 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
844 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
845 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
848 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
849 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
||
850 btrfs_file_extent_disk_bytenr(leaf
, fi
) != bytenr
||
851 btrfs_file_extent_offset(leaf
, fi
) != key
.offset
- orig_offset
||
852 btrfs_file_extent_compression(leaf
, fi
) ||
853 btrfs_file_extent_encryption(leaf
, fi
) ||
854 btrfs_file_extent_other_encoding(leaf
, fi
))
857 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
858 if ((*start
&& *start
!= key
.offset
) || (*end
&& *end
!= extent_end
))
867 * Mark extent in the range start - end as written.
869 * This changes extent type from 'pre-allocated' to 'regular'. If only
870 * part of extent is marked as written, the extent will be split into
873 int btrfs_mark_extent_written(struct btrfs_trans_handle
*trans
,
874 struct inode
*inode
, u64 start
, u64 end
)
876 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
877 struct extent_buffer
*leaf
;
878 struct btrfs_path
*path
;
879 struct btrfs_file_extent_item
*fi
;
880 struct btrfs_key key
;
881 struct btrfs_key new_key
;
893 u64 ino
= btrfs_ino(inode
);
895 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
897 path
= btrfs_alloc_path();
904 key
.type
= BTRFS_EXTENT_DATA_KEY
;
907 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
910 if (ret
> 0 && path
->slots
[0] > 0)
913 leaf
= path
->nodes
[0];
914 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
915 BUG_ON(key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
);
916 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
917 struct btrfs_file_extent_item
);
918 BUG_ON(btrfs_file_extent_type(leaf
, fi
) !=
919 BTRFS_FILE_EXTENT_PREALLOC
);
920 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
921 BUG_ON(key
.offset
> start
|| extent_end
< end
);
923 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
924 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
925 orig_offset
= key
.offset
- btrfs_file_extent_offset(leaf
, fi
);
926 memcpy(&new_key
, &key
, sizeof(new_key
));
928 if (start
== key
.offset
&& end
< extent_end
) {
931 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
932 ino
, bytenr
, orig_offset
,
933 &other_start
, &other_end
)) {
934 new_key
.offset
= end
;
935 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
936 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
937 struct btrfs_file_extent_item
);
938 btrfs_set_file_extent_num_bytes(leaf
, fi
,
940 btrfs_set_file_extent_offset(leaf
, fi
,
942 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
943 struct btrfs_file_extent_item
);
944 btrfs_set_file_extent_num_bytes(leaf
, fi
,
946 btrfs_mark_buffer_dirty(leaf
);
951 if (start
> key
.offset
&& end
== extent_end
) {
954 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
955 ino
, bytenr
, orig_offset
,
956 &other_start
, &other_end
)) {
957 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
958 struct btrfs_file_extent_item
);
959 btrfs_set_file_extent_num_bytes(leaf
, fi
,
962 new_key
.offset
= start
;
963 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
965 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
966 struct btrfs_file_extent_item
);
967 btrfs_set_file_extent_num_bytes(leaf
, fi
,
969 btrfs_set_file_extent_offset(leaf
, fi
,
970 start
- orig_offset
);
971 btrfs_mark_buffer_dirty(leaf
);
976 while (start
> key
.offset
|| end
< extent_end
) {
977 if (key
.offset
== start
)
980 new_key
.offset
= split
;
981 ret
= btrfs_duplicate_item(trans
, root
, path
, &new_key
);
982 if (ret
== -EAGAIN
) {
983 btrfs_release_path(path
);
987 btrfs_abort_transaction(trans
, root
, ret
);
991 leaf
= path
->nodes
[0];
992 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
993 struct btrfs_file_extent_item
);
994 btrfs_set_file_extent_num_bytes(leaf
, fi
,
997 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
998 struct btrfs_file_extent_item
);
1000 btrfs_set_file_extent_offset(leaf
, fi
, split
- orig_offset
);
1001 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1002 extent_end
- split
);
1003 btrfs_mark_buffer_dirty(leaf
);
1005 ret
= btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
, 0,
1006 root
->root_key
.objectid
,
1007 ino
, orig_offset
, 0);
1008 BUG_ON(ret
); /* -ENOMEM */
1010 if (split
== start
) {
1013 BUG_ON(start
!= key
.offset
);
1022 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1023 ino
, bytenr
, orig_offset
,
1024 &other_start
, &other_end
)) {
1026 btrfs_release_path(path
);
1029 extent_end
= other_end
;
1030 del_slot
= path
->slots
[0] + 1;
1032 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1033 0, root
->root_key
.objectid
,
1034 ino
, orig_offset
, 0);
1035 BUG_ON(ret
); /* -ENOMEM */
1039 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1040 ino
, bytenr
, orig_offset
,
1041 &other_start
, &other_end
)) {
1043 btrfs_release_path(path
);
1046 key
.offset
= other_start
;
1047 del_slot
= path
->slots
[0];
1049 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1050 0, root
->root_key
.objectid
,
1051 ino
, orig_offset
, 0);
1052 BUG_ON(ret
); /* -ENOMEM */
1055 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1056 struct btrfs_file_extent_item
);
1057 btrfs_set_file_extent_type(leaf
, fi
,
1058 BTRFS_FILE_EXTENT_REG
);
1059 btrfs_mark_buffer_dirty(leaf
);
1061 fi
= btrfs_item_ptr(leaf
, del_slot
- 1,
1062 struct btrfs_file_extent_item
);
1063 btrfs_set_file_extent_type(leaf
, fi
,
1064 BTRFS_FILE_EXTENT_REG
);
1065 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1066 extent_end
- key
.offset
);
1067 btrfs_mark_buffer_dirty(leaf
);
1069 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
1071 btrfs_abort_transaction(trans
, root
, ret
);
1076 btrfs_free_path(path
);
1081 * on error we return an unlocked page and the error value
1082 * on success we return a locked page and 0
1084 static int prepare_uptodate_page(struct page
*page
, u64 pos
,
1085 bool force_uptodate
)
1089 if (((pos
& (PAGE_CACHE_SIZE
- 1)) || force_uptodate
) &&
1090 !PageUptodate(page
)) {
1091 ret
= btrfs_readpage(NULL
, page
);
1095 if (!PageUptodate(page
)) {
1104 * this gets pages into the page cache and locks them down, it also properly
1105 * waits for data=ordered extents to finish before allowing the pages to be
1108 static noinline
int prepare_pages(struct btrfs_root
*root
, struct file
*file
,
1109 struct page
**pages
, size_t num_pages
,
1110 loff_t pos
, unsigned long first_index
,
1111 size_t write_bytes
, bool force_uptodate
)
1113 struct extent_state
*cached_state
= NULL
;
1115 unsigned long index
= pos
>> PAGE_CACHE_SHIFT
;
1116 struct inode
*inode
= fdentry(file
)->d_inode
;
1117 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
1123 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
1124 last_pos
= ((u64
)index
+ num_pages
) << PAGE_CACHE_SHIFT
;
1127 for (i
= 0; i
< num_pages
; i
++) {
1128 pages
[i
] = find_or_create_page(inode
->i_mapping
, index
+ i
,
1129 mask
| __GFP_WRITE
);
1137 err
= prepare_uptodate_page(pages
[i
], pos
,
1139 if (i
== num_pages
- 1)
1140 err
= prepare_uptodate_page(pages
[i
],
1141 pos
+ write_bytes
, false);
1143 page_cache_release(pages
[i
]);
1147 wait_on_page_writeback(pages
[i
]);
1150 if (start_pos
< inode
->i_size
) {
1151 struct btrfs_ordered_extent
*ordered
;
1152 lock_extent_bits(&BTRFS_I(inode
)->io_tree
,
1153 start_pos
, last_pos
- 1, 0, &cached_state
);
1154 ordered
= btrfs_lookup_first_ordered_extent(inode
,
1157 ordered
->file_offset
+ ordered
->len
> start_pos
&&
1158 ordered
->file_offset
< last_pos
) {
1159 btrfs_put_ordered_extent(ordered
);
1160 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1161 start_pos
, last_pos
- 1,
1162 &cached_state
, GFP_NOFS
);
1163 for (i
= 0; i
< num_pages
; i
++) {
1164 unlock_page(pages
[i
]);
1165 page_cache_release(pages
[i
]);
1167 btrfs_wait_ordered_range(inode
, start_pos
,
1168 last_pos
- start_pos
);
1172 btrfs_put_ordered_extent(ordered
);
1174 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, start_pos
,
1175 last_pos
- 1, EXTENT_DIRTY
| EXTENT_DELALLOC
|
1176 EXTENT_DO_ACCOUNTING
, 0, 0, &cached_state
,
1178 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1179 start_pos
, last_pos
- 1, &cached_state
,
1182 for (i
= 0; i
< num_pages
; i
++) {
1183 if (clear_page_dirty_for_io(pages
[i
]))
1184 account_page_redirty(pages
[i
]);
1185 set_page_extent_mapped(pages
[i
]);
1186 WARN_ON(!PageLocked(pages
[i
]));
1190 while (faili
>= 0) {
1191 unlock_page(pages
[faili
]);
1192 page_cache_release(pages
[faili
]);
1199 static noinline ssize_t
__btrfs_buffered_write(struct file
*file
,
1203 struct inode
*inode
= fdentry(file
)->d_inode
;
1204 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1205 struct page
**pages
= NULL
;
1206 unsigned long first_index
;
1207 size_t num_written
= 0;
1210 bool force_page_uptodate
= false;
1212 nrptrs
= min((iov_iter_count(i
) + PAGE_CACHE_SIZE
- 1) /
1213 PAGE_CACHE_SIZE
, PAGE_CACHE_SIZE
/
1214 (sizeof(struct page
*)));
1215 nrptrs
= min(nrptrs
, current
->nr_dirtied_pause
- current
->nr_dirtied
);
1216 nrptrs
= max(nrptrs
, 8);
1217 pages
= kmalloc(nrptrs
* sizeof(struct page
*), GFP_KERNEL
);
1221 first_index
= pos
>> PAGE_CACHE_SHIFT
;
1223 while (iov_iter_count(i
) > 0) {
1224 size_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
1225 size_t write_bytes
= min(iov_iter_count(i
),
1226 nrptrs
* (size_t)PAGE_CACHE_SIZE
-
1228 size_t num_pages
= (write_bytes
+ offset
+
1229 PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
1233 WARN_ON(num_pages
> nrptrs
);
1236 * Fault pages before locking them in prepare_pages
1237 * to avoid recursive lock
1239 if (unlikely(iov_iter_fault_in_readable(i
, write_bytes
))) {
1244 ret
= btrfs_delalloc_reserve_space(inode
,
1245 num_pages
<< PAGE_CACHE_SHIFT
);
1250 * This is going to setup the pages array with the number of
1251 * pages we want, so we don't really need to worry about the
1252 * contents of pages from loop to loop
1254 ret
= prepare_pages(root
, file
, pages
, num_pages
,
1255 pos
, first_index
, write_bytes
,
1256 force_page_uptodate
);
1258 btrfs_delalloc_release_space(inode
,
1259 num_pages
<< PAGE_CACHE_SHIFT
);
1263 copied
= btrfs_copy_from_user(pos
, num_pages
,
1264 write_bytes
, pages
, i
);
1267 * if we have trouble faulting in the pages, fall
1268 * back to one page at a time
1270 if (copied
< write_bytes
)
1274 force_page_uptodate
= true;
1277 force_page_uptodate
= false;
1278 dirty_pages
= (copied
+ offset
+
1279 PAGE_CACHE_SIZE
- 1) >>
1284 * If we had a short copy we need to release the excess delaloc
1285 * bytes we reserved. We need to increment outstanding_extents
1286 * because btrfs_delalloc_release_space will decrement it, but
1287 * we still have an outstanding extent for the chunk we actually
1290 if (num_pages
> dirty_pages
) {
1292 spin_lock(&BTRFS_I(inode
)->lock
);
1293 BTRFS_I(inode
)->outstanding_extents
++;
1294 spin_unlock(&BTRFS_I(inode
)->lock
);
1296 btrfs_delalloc_release_space(inode
,
1297 (num_pages
- dirty_pages
) <<
1302 ret
= btrfs_dirty_pages(root
, inode
, pages
,
1303 dirty_pages
, pos
, copied
,
1306 btrfs_delalloc_release_space(inode
,
1307 dirty_pages
<< PAGE_CACHE_SHIFT
);
1308 btrfs_drop_pages(pages
, num_pages
);
1313 btrfs_drop_pages(pages
, num_pages
);
1317 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
,
1319 if (dirty_pages
< (root
->leafsize
>> PAGE_CACHE_SHIFT
) + 1)
1320 btrfs_btree_balance_dirty(root
, 1);
1323 num_written
+= copied
;
1328 return num_written
? num_written
: ret
;
1331 static ssize_t
__btrfs_direct_write(struct kiocb
*iocb
,
1332 const struct iovec
*iov
,
1333 unsigned long nr_segs
, loff_t pos
,
1334 loff_t
*ppos
, size_t count
, size_t ocount
)
1336 struct file
*file
= iocb
->ki_filp
;
1339 ssize_t written_buffered
;
1343 written
= generic_file_direct_write(iocb
, iov
, &nr_segs
, pos
, ppos
,
1346 if (written
< 0 || written
== count
)
1351 iov_iter_init(&i
, iov
, nr_segs
, count
, written
);
1352 written_buffered
= __btrfs_buffered_write(file
, &i
, pos
);
1353 if (written_buffered
< 0) {
1354 err
= written_buffered
;
1357 endbyte
= pos
+ written_buffered
- 1;
1358 err
= filemap_write_and_wait_range(file
->f_mapping
, pos
, endbyte
);
1361 written
+= written_buffered
;
1362 *ppos
= pos
+ written_buffered
;
1363 invalidate_mapping_pages(file
->f_mapping
, pos
>> PAGE_CACHE_SHIFT
,
1364 endbyte
>> PAGE_CACHE_SHIFT
);
1366 return written
? written
: err
;
1369 static ssize_t
btrfs_file_aio_write(struct kiocb
*iocb
,
1370 const struct iovec
*iov
,
1371 unsigned long nr_segs
, loff_t pos
)
1373 struct file
*file
= iocb
->ki_filp
;
1374 struct inode
*inode
= fdentry(file
)->d_inode
;
1375 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1376 loff_t
*ppos
= &iocb
->ki_pos
;
1378 ssize_t num_written
= 0;
1380 size_t count
, ocount
;
1382 vfs_check_frozen(inode
->i_sb
, SB_FREEZE_WRITE
);
1384 mutex_lock(&inode
->i_mutex
);
1386 err
= generic_segment_checks(iov
, &nr_segs
, &ocount
, VERIFY_READ
);
1388 mutex_unlock(&inode
->i_mutex
);
1393 current
->backing_dev_info
= inode
->i_mapping
->backing_dev_info
;
1394 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
1396 mutex_unlock(&inode
->i_mutex
);
1401 mutex_unlock(&inode
->i_mutex
);
1405 err
= file_remove_suid(file
);
1407 mutex_unlock(&inode
->i_mutex
);
1412 * If BTRFS flips readonly due to some impossible error
1413 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1414 * although we have opened a file as writable, we have
1415 * to stop this write operation to ensure FS consistency.
1417 if (root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
1418 mutex_unlock(&inode
->i_mutex
);
1423 err
= file_update_time(file
);
1425 mutex_unlock(&inode
->i_mutex
);
1429 start_pos
= round_down(pos
, root
->sectorsize
);
1430 if (start_pos
> i_size_read(inode
)) {
1431 err
= btrfs_cont_expand(inode
, i_size_read(inode
), start_pos
);
1433 mutex_unlock(&inode
->i_mutex
);
1438 if (unlikely(file
->f_flags
& O_DIRECT
)) {
1439 num_written
= __btrfs_direct_write(iocb
, iov
, nr_segs
,
1440 pos
, ppos
, count
, ocount
);
1444 iov_iter_init(&i
, iov
, nr_segs
, count
, num_written
);
1446 num_written
= __btrfs_buffered_write(file
, &i
, pos
);
1447 if (num_written
> 0)
1448 *ppos
= pos
+ num_written
;
1451 mutex_unlock(&inode
->i_mutex
);
1454 * we want to make sure fsync finds this change
1455 * but we haven't joined a transaction running right now.
1457 * Later on, someone is sure to update the inode and get the
1458 * real transid recorded.
1460 * We set last_trans now to the fs_info generation + 1,
1461 * this will either be one more than the running transaction
1462 * or the generation used for the next transaction if there isn't
1463 * one running right now.
1465 BTRFS_I(inode
)->last_trans
= root
->fs_info
->generation
+ 1;
1466 if (num_written
> 0 || num_written
== -EIOCBQUEUED
) {
1467 err
= generic_write_sync(file
, pos
, num_written
);
1468 if (err
< 0 && num_written
> 0)
1472 current
->backing_dev_info
= NULL
;
1473 return num_written
? num_written
: err
;
1476 int btrfs_release_file(struct inode
*inode
, struct file
*filp
)
1479 * ordered_data_close is set by settattr when we are about to truncate
1480 * a file from a non-zero size to a zero size. This tries to
1481 * flush down new bytes that may have been written if the
1482 * application were using truncate to replace a file in place.
1484 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE
,
1485 &BTRFS_I(inode
)->runtime_flags
)) {
1486 btrfs_add_ordered_operation(NULL
, BTRFS_I(inode
)->root
, inode
);
1487 if (inode
->i_size
> BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT
)
1488 filemap_flush(inode
->i_mapping
);
1490 if (filp
->private_data
)
1491 btrfs_ioctl_trans_end(filp
);
1496 * fsync call for both files and directories. This logs the inode into
1497 * the tree log instead of forcing full commits whenever possible.
1499 * It needs to call filemap_fdatawait so that all ordered extent updates are
1500 * in the metadata btree are up to date for copying to the log.
1502 * It drops the inode mutex before doing the tree log commit. This is an
1503 * important optimization for directories because holding the mutex prevents
1504 * new operations on the dir while we write to disk.
1506 int btrfs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
1508 struct dentry
*dentry
= file
->f_path
.dentry
;
1509 struct inode
*inode
= dentry
->d_inode
;
1510 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1512 struct btrfs_trans_handle
*trans
;
1514 trace_btrfs_sync_file(file
, datasync
);
1516 mutex_lock(&inode
->i_mutex
);
1519 * we wait first, since the writeback may change the inode, also wait
1520 * ordered range does a filemape_write_and_wait_range which is why we
1521 * don't do it above like other file systems.
1524 btrfs_wait_ordered_range(inode
, start
, end
);
1528 * check the transaction that last modified this inode
1529 * and see if its already been committed
1531 if (!BTRFS_I(inode
)->last_trans
) {
1532 mutex_unlock(&inode
->i_mutex
);
1537 * if the last transaction that changed this file was before
1538 * the current transaction, we can bail out now without any
1542 if (btrfs_inode_in_log(inode
, root
->fs_info
->generation
) ||
1543 BTRFS_I(inode
)->last_trans
<=
1544 root
->fs_info
->last_trans_committed
) {
1545 BTRFS_I(inode
)->last_trans
= 0;
1546 mutex_unlock(&inode
->i_mutex
);
1551 * ok we haven't committed the transaction yet, lets do a commit
1553 if (file
->private_data
)
1554 btrfs_ioctl_trans_end(file
);
1556 trans
= btrfs_start_transaction(root
, 0);
1557 if (IS_ERR(trans
)) {
1558 ret
= PTR_ERR(trans
);
1559 mutex_unlock(&inode
->i_mutex
);
1563 ret
= btrfs_log_dentry_safe(trans
, root
, dentry
);
1565 mutex_unlock(&inode
->i_mutex
);
1569 /* we've logged all the items and now have a consistent
1570 * version of the file in the log. It is possible that
1571 * someone will come in and modify the file, but that's
1572 * fine because the log is consistent on disk, and we
1573 * have references to all of the file's extents
1575 * It is possible that someone will come in and log the
1576 * file again, but that will end up using the synchronization
1577 * inside btrfs_sync_log to keep things safe.
1579 mutex_unlock(&inode
->i_mutex
);
1581 if (ret
!= BTRFS_NO_LOG_SYNC
) {
1583 ret
= btrfs_commit_transaction(trans
, root
);
1585 ret
= btrfs_sync_log(trans
, root
);
1587 ret
= btrfs_end_transaction(trans
, root
);
1589 ret
= btrfs_commit_transaction(trans
, root
);
1592 ret
= btrfs_end_transaction(trans
, root
);
1595 return ret
> 0 ? -EIO
: ret
;
1598 static const struct vm_operations_struct btrfs_file_vm_ops
= {
1599 .fault
= filemap_fault
,
1600 .page_mkwrite
= btrfs_page_mkwrite
,
1603 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1605 struct address_space
*mapping
= filp
->f_mapping
;
1607 if (!mapping
->a_ops
->readpage
)
1610 file_accessed(filp
);
1611 vma
->vm_ops
= &btrfs_file_vm_ops
;
1612 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1617 static long btrfs_fallocate(struct file
*file
, int mode
,
1618 loff_t offset
, loff_t len
)
1620 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1621 struct extent_state
*cached_state
= NULL
;
1628 u64 mask
= BTRFS_I(inode
)->root
->sectorsize
- 1;
1629 struct extent_map
*em
;
1632 alloc_start
= offset
& ~mask
;
1633 alloc_end
= (offset
+ len
+ mask
) & ~mask
;
1635 /* We only support the FALLOC_FL_KEEP_SIZE mode */
1636 if (mode
& ~FALLOC_FL_KEEP_SIZE
)
1640 * Make sure we have enough space before we do the
1643 ret
= btrfs_check_data_free_space(inode
, len
);
1648 * wait for ordered IO before we have any locks. We'll loop again
1649 * below with the locks held.
1651 btrfs_wait_ordered_range(inode
, alloc_start
, alloc_end
- alloc_start
);
1653 mutex_lock(&inode
->i_mutex
);
1654 ret
= inode_newsize_ok(inode
, alloc_end
);
1658 if (alloc_start
> inode
->i_size
) {
1659 ret
= btrfs_cont_expand(inode
, i_size_read(inode
),
1665 locked_end
= alloc_end
- 1;
1667 struct btrfs_ordered_extent
*ordered
;
1669 /* the extent lock is ordered inside the running
1672 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, alloc_start
,
1673 locked_end
, 0, &cached_state
);
1674 ordered
= btrfs_lookup_first_ordered_extent(inode
,
1677 ordered
->file_offset
+ ordered
->len
> alloc_start
&&
1678 ordered
->file_offset
< alloc_end
) {
1679 btrfs_put_ordered_extent(ordered
);
1680 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1681 alloc_start
, locked_end
,
1682 &cached_state
, GFP_NOFS
);
1684 * we can't wait on the range with the transaction
1685 * running or with the extent lock held
1687 btrfs_wait_ordered_range(inode
, alloc_start
,
1688 alloc_end
- alloc_start
);
1691 btrfs_put_ordered_extent(ordered
);
1696 cur_offset
= alloc_start
;
1700 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
1701 alloc_end
- cur_offset
, 0);
1702 if (IS_ERR_OR_NULL(em
)) {
1709 last_byte
= min(extent_map_end(em
), alloc_end
);
1710 actual_end
= min_t(u64
, extent_map_end(em
), offset
+ len
);
1711 last_byte
= (last_byte
+ mask
) & ~mask
;
1713 if (em
->block_start
== EXTENT_MAP_HOLE
||
1714 (cur_offset
>= inode
->i_size
&&
1715 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))) {
1716 ret
= btrfs_prealloc_file_range(inode
, mode
, cur_offset
,
1717 last_byte
- cur_offset
,
1718 1 << inode
->i_blkbits
,
1723 free_extent_map(em
);
1726 } else if (actual_end
> inode
->i_size
&&
1727 !(mode
& FALLOC_FL_KEEP_SIZE
)) {
1729 * We didn't need to allocate any more space, but we
1730 * still extended the size of the file so we need to
1733 inode
->i_ctime
= CURRENT_TIME
;
1734 i_size_write(inode
, actual_end
);
1735 btrfs_ordered_update_i_size(inode
, actual_end
, NULL
);
1737 free_extent_map(em
);
1739 cur_offset
= last_byte
;
1740 if (cur_offset
>= alloc_end
) {
1745 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, alloc_start
, locked_end
,
1746 &cached_state
, GFP_NOFS
);
1748 mutex_unlock(&inode
->i_mutex
);
1749 /* Let go of our reservation. */
1750 btrfs_free_reserved_data_space(inode
, len
);
1754 static int find_desired_extent(struct inode
*inode
, loff_t
*offset
, int origin
)
1756 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1757 struct extent_map
*em
;
1758 struct extent_state
*cached_state
= NULL
;
1759 u64 lockstart
= *offset
;
1760 u64 lockend
= i_size_read(inode
);
1761 u64 start
= *offset
;
1762 u64 orig_start
= *offset
;
1763 u64 len
= i_size_read(inode
);
1767 lockend
= max_t(u64
, root
->sectorsize
, lockend
);
1768 if (lockend
<= lockstart
)
1769 lockend
= lockstart
+ root
->sectorsize
;
1771 len
= lockend
- lockstart
+ 1;
1773 len
= max_t(u64
, len
, root
->sectorsize
);
1774 if (inode
->i_size
== 0)
1777 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
, 0,
1781 * Delalloc is such a pain. If we have a hole and we have pending
1782 * delalloc for a portion of the hole we will get back a hole that
1783 * exists for the entire range since it hasn't been actually written
1784 * yet. So to take care of this case we need to look for an extent just
1785 * before the position we want in case there is outstanding delalloc
1788 if (origin
== SEEK_HOLE
&& start
!= 0) {
1789 if (start
<= root
->sectorsize
)
1790 em
= btrfs_get_extent_fiemap(inode
, NULL
, 0, 0,
1791 root
->sectorsize
, 0);
1793 em
= btrfs_get_extent_fiemap(inode
, NULL
, 0,
1794 start
- root
->sectorsize
,
1795 root
->sectorsize
, 0);
1800 last_end
= em
->start
+ em
->len
;
1801 if (em
->block_start
== EXTENT_MAP_DELALLOC
)
1802 last_end
= min_t(u64
, last_end
, inode
->i_size
);
1803 free_extent_map(em
);
1807 em
= btrfs_get_extent_fiemap(inode
, NULL
, 0, start
, len
, 0);
1813 if (em
->block_start
== EXTENT_MAP_HOLE
) {
1814 if (test_bit(EXTENT_FLAG_VACANCY
, &em
->flags
)) {
1815 if (last_end
<= orig_start
) {
1816 free_extent_map(em
);
1822 if (origin
== SEEK_HOLE
) {
1824 free_extent_map(em
);
1828 if (origin
== SEEK_DATA
) {
1829 if (em
->block_start
== EXTENT_MAP_DELALLOC
) {
1830 if (start
>= inode
->i_size
) {
1831 free_extent_map(em
);
1838 free_extent_map(em
);
1843 start
= em
->start
+ em
->len
;
1844 last_end
= em
->start
+ em
->len
;
1846 if (em
->block_start
== EXTENT_MAP_DELALLOC
)
1847 last_end
= min_t(u64
, last_end
, inode
->i_size
);
1849 if (test_bit(EXTENT_FLAG_VACANCY
, &em
->flags
)) {
1850 free_extent_map(em
);
1854 free_extent_map(em
);
1858 *offset
= min(*offset
, inode
->i_size
);
1860 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
1861 &cached_state
, GFP_NOFS
);
1865 static loff_t
btrfs_file_llseek(struct file
*file
, loff_t offset
, int origin
)
1867 struct inode
*inode
= file
->f_mapping
->host
;
1870 mutex_lock(&inode
->i_mutex
);
1874 offset
= generic_file_llseek(file
, offset
, origin
);
1878 if (offset
>= i_size_read(inode
)) {
1879 mutex_unlock(&inode
->i_mutex
);
1883 ret
= find_desired_extent(inode
, &offset
, origin
);
1885 mutex_unlock(&inode
->i_mutex
);
1890 if (offset
< 0 && !(file
->f_mode
& FMODE_UNSIGNED_OFFSET
)) {
1894 if (offset
> inode
->i_sb
->s_maxbytes
) {
1899 /* Special lock needed here? */
1900 if (offset
!= file
->f_pos
) {
1901 file
->f_pos
= offset
;
1902 file
->f_version
= 0;
1905 mutex_unlock(&inode
->i_mutex
);
1909 const struct file_operations btrfs_file_operations
= {
1910 .llseek
= btrfs_file_llseek
,
1911 .read
= do_sync_read
,
1912 .write
= do_sync_write
,
1913 .aio_read
= generic_file_aio_read
,
1914 .splice_read
= generic_file_splice_read
,
1915 .aio_write
= btrfs_file_aio_write
,
1916 .mmap
= btrfs_file_mmap
,
1917 .open
= generic_file_open
,
1918 .release
= btrfs_release_file
,
1919 .fsync
= btrfs_sync_file
,
1920 .fallocate
= btrfs_fallocate
,
1921 .unlocked_ioctl
= btrfs_ioctl
,
1922 #ifdef CONFIG_COMPAT
1923 .compat_ioctl
= btrfs_ioctl
,