4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
23 #include <linux/random.h>
32 #include <trace/events/f2fs.h>
34 static int f2fs_vm_page_mkwrite(struct vm_area_struct
*vma
,
37 struct page
*page
= vmf
->page
;
38 struct inode
*inode
= file_inode(vma
->vm_file
);
39 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
40 struct dnode_of_data dn
;
43 sb_start_pagefault(inode
->i_sb
);
45 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
47 /* block allocation */
49 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
50 err
= f2fs_reserve_block(&dn
, page
->index
);
58 f2fs_balance_fs(sbi
, dn
.node_changed
);
60 file_update_time(vma
->vm_file
);
62 if (unlikely(page
->mapping
!= inode
->i_mapping
||
63 page_offset(page
) > i_size_read(inode
) ||
64 !PageUptodate(page
))) {
71 * check to see if the page is mapped already (no holes)
73 if (PageMappedToDisk(page
))
76 /* page is wholly or partially inside EOF */
77 if (((loff_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
) >
80 offset
= i_size_read(inode
) & ~PAGE_CACHE_MASK
;
81 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
84 SetPageUptodate(page
);
86 trace_f2fs_vm_page_mkwrite(page
, DATA
);
89 f2fs_wait_on_page_writeback(page
, DATA
, false);
91 /* wait for GCed encrypted page writeback */
92 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
93 f2fs_wait_on_encrypted_page_writeback(sbi
, dn
.data_blkaddr
);
95 /* if gced page is attached, don't write to cold segment */
96 clear_cold_data(page
);
98 sb_end_pagefault(inode
->i_sb
);
99 f2fs_update_time(sbi
, REQ_TIME
);
100 return block_page_mkwrite_return(err
);
103 static const struct vm_operations_struct f2fs_file_vm_ops
= {
104 .fault
= filemap_fault
,
105 .map_pages
= filemap_map_pages
,
106 .page_mkwrite
= f2fs_vm_page_mkwrite
,
109 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
111 struct dentry
*dentry
;
113 inode
= igrab(inode
);
114 dentry
= d_find_any_alias(inode
);
119 if (update_dent_inode(inode
, inode
, &dentry
->d_name
)) {
124 *pino
= parent_ino(dentry
);
129 static inline bool need_do_checkpoint(struct inode
*inode
)
131 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
132 bool need_cp
= false;
134 if (!S_ISREG(inode
->i_mode
) || inode
->i_nlink
!= 1)
136 else if (file_enc_name(inode
) && need_dentry_mark(sbi
, inode
->i_ino
))
138 else if (file_wrong_pino(inode
))
140 else if (!space_for_roll_forward(sbi
))
142 else if (!is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
144 else if (F2FS_I(inode
)->xattr_ver
== cur_cp_version(F2FS_CKPT(sbi
)))
146 else if (test_opt(sbi
, FASTBOOT
))
148 else if (sbi
->active_logs
== 2)
154 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
156 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
158 /* But we need to avoid that there are some inode updates */
159 if ((i
&& PageDirty(i
)) || need_inode_block_update(sbi
, ino
))
165 static void try_to_fix_pino(struct inode
*inode
)
167 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
170 down_write(&fi
->i_sem
);
172 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
173 get_parent_ino(inode
, &pino
)) {
175 file_got_pino(inode
);
176 up_write(&fi
->i_sem
);
178 mark_inode_dirty_sync(inode
);
179 f2fs_write_inode(inode
, NULL
);
181 up_write(&fi
->i_sem
);
185 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
187 struct inode
*inode
= file
->f_mapping
->host
;
188 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
189 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
190 nid_t ino
= inode
->i_ino
;
192 bool need_cp
= false;
193 struct writeback_control wbc
= {
194 .sync_mode
= WB_SYNC_ALL
,
195 .nr_to_write
= LONG_MAX
,
199 if (unlikely(f2fs_readonly(inode
->i_sb
)))
202 trace_f2fs_sync_file_enter(inode
);
204 /* if fdatasync is triggered, let's do in-place-update */
205 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
206 set_inode_flag(fi
, FI_NEED_IPU
);
207 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
208 clear_inode_flag(fi
, FI_NEED_IPU
);
211 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
215 /* if the inode is dirty, let's recover all the time */
217 f2fs_write_inode(inode
, NULL
);
222 * if there is no written data, don't waste time to write recovery info.
224 if (!is_inode_flag_set(fi
, FI_APPEND_WRITE
) &&
225 !exist_written_data(sbi
, ino
, APPEND_INO
)) {
227 /* it may call write_inode just prior to fsync */
228 if (need_inode_page_update(sbi
, ino
))
231 if (is_inode_flag_set(fi
, FI_UPDATE_WRITE
) ||
232 exist_written_data(sbi
, ino
, UPDATE_INO
))
238 * Both of fdatasync() and fsync() are able to be recovered from
241 down_read(&fi
->i_sem
);
242 need_cp
= need_do_checkpoint(inode
);
246 /* all the dirty node pages should be flushed for POR */
247 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
250 * We've secured consistency through sync_fs. Following pino
251 * will be used only for fsynced inodes after checkpoint.
253 try_to_fix_pino(inode
);
254 clear_inode_flag(fi
, FI_APPEND_WRITE
);
255 clear_inode_flag(fi
, FI_UPDATE_WRITE
);
259 sync_node_pages(sbi
, ino
, &wbc
);
261 /* if cp_error was enabled, we should avoid infinite loop */
262 if (unlikely(f2fs_cp_error(sbi
))) {
267 if (need_inode_block_update(sbi
, ino
)) {
268 mark_inode_dirty_sync(inode
);
269 f2fs_write_inode(inode
, NULL
);
273 ret
= wait_on_node_pages_writeback(sbi
, ino
);
277 /* once recovery info is written, don't need to tack this */
278 remove_ino_entry(sbi
, ino
, APPEND_INO
);
279 clear_inode_flag(fi
, FI_APPEND_WRITE
);
281 remove_ino_entry(sbi
, ino
, UPDATE_INO
);
282 clear_inode_flag(fi
, FI_UPDATE_WRITE
);
283 ret
= f2fs_issue_flush(sbi
);
284 f2fs_update_time(sbi
, REQ_TIME
);
286 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
287 f2fs_trace_ios(NULL
, 1);
291 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
292 pgoff_t pgofs
, int whence
)
297 if (whence
!= SEEK_DATA
)
300 /* find first dirty page index */
301 pagevec_init(&pvec
, 0);
302 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &pgofs
,
303 PAGECACHE_TAG_DIRTY
, 1);
304 pgofs
= nr_pages
? pvec
.pages
[0]->index
: ULONG_MAX
;
305 pagevec_release(&pvec
);
309 static bool __found_offset(block_t blkaddr
, pgoff_t dirty
, pgoff_t pgofs
,
314 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
315 (blkaddr
!= NEW_ADDR
&& blkaddr
!= NULL_ADDR
))
319 if (blkaddr
== NULL_ADDR
)
326 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
328 struct inode
*inode
= file
->f_mapping
->host
;
329 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
330 struct dnode_of_data dn
;
331 pgoff_t pgofs
, end_offset
, dirty
;
332 loff_t data_ofs
= offset
;
338 isize
= i_size_read(inode
);
342 /* handle inline data case */
343 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
344 if (whence
== SEEK_HOLE
)
349 pgofs
= (pgoff_t
)(offset
>> PAGE_CACHE_SHIFT
);
351 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
353 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_CACHE_SHIFT
) {
354 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
355 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE_RA
);
356 if (err
&& err
!= -ENOENT
) {
358 } else if (err
== -ENOENT
) {
359 /* direct node does not exists */
360 if (whence
== SEEK_DATA
) {
361 pgofs
= get_next_page_offset(&dn
, pgofs
);
368 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
370 /* find data/hole in dnode block */
371 for (; dn
.ofs_in_node
< end_offset
;
372 dn
.ofs_in_node
++, pgofs
++,
373 data_ofs
= (loff_t
)pgofs
<< PAGE_CACHE_SHIFT
) {
375 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
377 if (__found_offset(blkaddr
, dirty
, pgofs
, whence
)) {
385 if (whence
== SEEK_DATA
)
388 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
391 return vfs_setpos(file
, data_ofs
, maxbytes
);
397 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
399 struct inode
*inode
= file
->f_mapping
->host
;
400 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
406 return generic_file_llseek_size(file
, offset
, whence
,
407 maxbytes
, i_size_read(inode
));
412 return f2fs_seek_block(file
, offset
, whence
);
418 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
420 struct inode
*inode
= file_inode(file
);
423 if (f2fs_encrypted_inode(inode
)) {
424 err
= f2fs_get_encryption_info(inode
);
427 if (!f2fs_encrypted_inode(inode
))
431 /* we don't need to use inline_data strictly */
432 err
= f2fs_convert_inline_inode(inode
);
437 vma
->vm_ops
= &f2fs_file_vm_ops
;
441 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
443 int ret
= generic_file_open(inode
, filp
);
445 if (!ret
&& f2fs_encrypted_inode(inode
)) {
446 ret
= f2fs_get_encryption_info(inode
);
449 if (!f2fs_encrypted_inode(inode
))
455 int truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
457 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
458 struct f2fs_node
*raw_node
;
459 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
462 raw_node
= F2FS_NODE(dn
->node_page
);
463 addr
= blkaddr_in_node(raw_node
) + ofs
;
465 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++) {
466 block_t blkaddr
= le32_to_cpu(*addr
);
467 if (blkaddr
== NULL_ADDR
)
470 dn
->data_blkaddr
= NULL_ADDR
;
471 set_data_blkaddr(dn
);
472 invalidate_blocks(sbi
, blkaddr
);
473 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
474 clear_inode_flag(F2FS_I(dn
->inode
),
475 FI_FIRST_BLOCK_WRITTEN
);
482 * once we invalidate valid blkaddr in range [ofs, ofs + count],
483 * we will invalidate all blkaddr in the whole range.
485 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
),
487 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
488 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
491 dn
->ofs_in_node
= ofs
;
493 f2fs_update_time(sbi
, REQ_TIME
);
494 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
495 dn
->ofs_in_node
, nr_free
);
499 void truncate_data_blocks(struct dnode_of_data
*dn
)
501 truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK
);
504 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
507 unsigned offset
= from
& (PAGE_CACHE_SIZE
- 1);
508 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
509 struct address_space
*mapping
= inode
->i_mapping
;
512 if (!offset
&& !cache_only
)
516 page
= f2fs_grab_cache_page(mapping
, index
, false);
517 if (page
&& PageUptodate(page
))
519 f2fs_put_page(page
, 1);
523 page
= get_lock_data_page(inode
, index
, true);
527 f2fs_wait_on_page_writeback(page
, DATA
, true);
528 zero_user(page
, offset
, PAGE_CACHE_SIZE
- offset
);
529 if (!cache_only
|| !f2fs_encrypted_inode(inode
) || !S_ISREG(inode
->i_mode
))
530 set_page_dirty(page
);
531 f2fs_put_page(page
, 1);
535 int truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
537 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
538 unsigned int blocksize
= inode
->i_sb
->s_blocksize
;
539 struct dnode_of_data dn
;
541 int count
= 0, err
= 0;
543 bool truncate_page
= false;
545 trace_f2fs_truncate_blocks_enter(inode
, from
);
547 free_from
= (pgoff_t
)F2FS_BYTES_TO_BLK(from
+ blocksize
- 1);
552 ipage
= get_node_page(sbi
, inode
->i_ino
);
554 err
= PTR_ERR(ipage
);
558 if (f2fs_has_inline_data(inode
)) {
559 if (truncate_inline_inode(ipage
, from
))
560 set_page_dirty(ipage
);
561 f2fs_put_page(ipage
, 1);
562 truncate_page
= true;
566 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
567 err
= get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE
);
574 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
576 count
-= dn
.ofs_in_node
;
577 f2fs_bug_on(sbi
, count
< 0);
579 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
580 truncate_data_blocks_range(&dn
, count
);
586 err
= truncate_inode_blocks(inode
, free_from
);
591 /* lastly zero out the first data page */
593 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
595 trace_f2fs_truncate_blocks_exit(inode
, err
);
599 int f2fs_truncate(struct inode
*inode
, bool lock
)
603 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
604 S_ISLNK(inode
->i_mode
)))
607 trace_f2fs_truncate(inode
);
609 /* we should check inline_data size */
610 if (!f2fs_may_inline_data(inode
)) {
611 err
= f2fs_convert_inline_inode(inode
);
616 err
= truncate_blocks(inode
, i_size_read(inode
), lock
);
620 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
621 mark_inode_dirty(inode
);
625 int f2fs_getattr(struct vfsmount
*mnt
,
626 struct dentry
*dentry
, struct kstat
*stat
)
628 struct inode
*inode
= d_inode(dentry
);
629 generic_fillattr(inode
, stat
);
634 #ifdef CONFIG_F2FS_FS_POSIX_ACL
635 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
637 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
638 unsigned int ia_valid
= attr
->ia_valid
;
640 if (ia_valid
& ATTR_UID
)
641 inode
->i_uid
= attr
->ia_uid
;
642 if (ia_valid
& ATTR_GID
)
643 inode
->i_gid
= attr
->ia_gid
;
644 if (ia_valid
& ATTR_ATIME
)
645 inode
->i_atime
= timespec_trunc(attr
->ia_atime
,
646 inode
->i_sb
->s_time_gran
);
647 if (ia_valid
& ATTR_MTIME
)
648 inode
->i_mtime
= timespec_trunc(attr
->ia_mtime
,
649 inode
->i_sb
->s_time_gran
);
650 if (ia_valid
& ATTR_CTIME
)
651 inode
->i_ctime
= timespec_trunc(attr
->ia_ctime
,
652 inode
->i_sb
->s_time_gran
);
653 if (ia_valid
& ATTR_MODE
) {
654 umode_t mode
= attr
->ia_mode
;
656 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
658 set_acl_inode(fi
, mode
);
662 #define __setattr_copy setattr_copy
665 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
667 struct inode
*inode
= d_inode(dentry
);
668 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
671 err
= inode_change_ok(inode
, attr
);
675 if (attr
->ia_valid
& ATTR_SIZE
) {
676 if (f2fs_encrypted_inode(inode
) &&
677 f2fs_get_encryption_info(inode
))
680 if (attr
->ia_size
<= i_size_read(inode
)) {
681 truncate_setsize(inode
, attr
->ia_size
);
682 err
= f2fs_truncate(inode
, true);
685 f2fs_balance_fs(F2FS_I_SB(inode
), true);
688 * do not trim all blocks after i_size if target size is
689 * larger than i_size.
691 truncate_setsize(inode
, attr
->ia_size
);
693 /* should convert inline inode here */
694 if (!f2fs_may_inline_data(inode
)) {
695 err
= f2fs_convert_inline_inode(inode
);
699 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
703 __setattr_copy(inode
, attr
);
705 if (attr
->ia_valid
& ATTR_MODE
) {
706 err
= posix_acl_chmod(inode
, get_inode_mode(inode
));
707 if (err
|| is_inode_flag_set(fi
, FI_ACL_MODE
)) {
708 inode
->i_mode
= fi
->i_acl_mode
;
709 clear_inode_flag(fi
, FI_ACL_MODE
);
713 mark_inode_dirty(inode
);
717 const struct inode_operations f2fs_file_inode_operations
= {
718 .getattr
= f2fs_getattr
,
719 .setattr
= f2fs_setattr
,
720 .get_acl
= f2fs_get_acl
,
721 .set_acl
= f2fs_set_acl
,
722 #ifdef CONFIG_F2FS_FS_XATTR
723 .setxattr
= generic_setxattr
,
724 .getxattr
= generic_getxattr
,
725 .listxattr
= f2fs_listxattr
,
726 .removexattr
= generic_removexattr
,
728 .fiemap
= f2fs_fiemap
,
731 static int fill_zero(struct inode
*inode
, pgoff_t index
,
732 loff_t start
, loff_t len
)
734 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
740 f2fs_balance_fs(sbi
, true);
743 page
= get_new_data_page(inode
, NULL
, index
, false);
747 return PTR_ERR(page
);
749 f2fs_wait_on_page_writeback(page
, DATA
, true);
750 zero_user(page
, start
, len
);
751 set_page_dirty(page
);
752 f2fs_put_page(page
, 1);
756 int truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
760 while (pg_start
< pg_end
) {
761 struct dnode_of_data dn
;
762 pgoff_t end_offset
, count
;
764 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
765 err
= get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
767 if (err
== -ENOENT
) {
774 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
775 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
777 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
779 truncate_data_blocks_range(&dn
, count
);
787 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
789 pgoff_t pg_start
, pg_end
;
790 loff_t off_start
, off_end
;
793 ret
= f2fs_convert_inline_inode(inode
);
797 pg_start
= ((unsigned long long) offset
) >> PAGE_CACHE_SHIFT
;
798 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_CACHE_SHIFT
;
800 off_start
= offset
& (PAGE_CACHE_SIZE
- 1);
801 off_end
= (offset
+ len
) & (PAGE_CACHE_SIZE
- 1);
803 if (pg_start
== pg_end
) {
804 ret
= fill_zero(inode
, pg_start
, off_start
,
805 off_end
- off_start
);
810 ret
= fill_zero(inode
, pg_start
++, off_start
,
811 PAGE_CACHE_SIZE
- off_start
);
816 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
821 if (pg_start
< pg_end
) {
822 struct address_space
*mapping
= inode
->i_mapping
;
823 loff_t blk_start
, blk_end
;
824 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
826 f2fs_balance_fs(sbi
, true);
828 blk_start
= (loff_t
)pg_start
<< PAGE_CACHE_SHIFT
;
829 blk_end
= (loff_t
)pg_end
<< PAGE_CACHE_SHIFT
;
830 truncate_inode_pages_range(mapping
, blk_start
,
834 ret
= truncate_hole(inode
, pg_start
, pg_end
);
842 static int __exchange_data_block(struct inode
*inode
, pgoff_t src
,
843 pgoff_t dst
, bool full
)
845 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
846 struct dnode_of_data dn
;
848 bool do_replace
= false;
851 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
852 ret
= get_dnode_of_data(&dn
, src
, LOOKUP_NODE_RA
);
853 if (ret
&& ret
!= -ENOENT
) {
855 } else if (ret
== -ENOENT
) {
856 new_addr
= NULL_ADDR
;
858 new_addr
= dn
.data_blkaddr
;
859 if (!is_checkpointed_data(sbi
, new_addr
)) {
860 dn
.data_blkaddr
= NULL_ADDR
;
861 /* do not invalidate this block address */
862 set_data_blkaddr(&dn
);
863 f2fs_update_extent_cache(&dn
);
869 if (new_addr
== NULL_ADDR
)
870 return full
? truncate_hole(inode
, dst
, dst
+ 1) : 0;
873 struct page
*ipage
= get_node_page(sbi
, inode
->i_ino
);
877 ret
= PTR_ERR(ipage
);
881 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
882 ret
= f2fs_reserve_block(&dn
, dst
);
886 truncate_data_blocks_range(&dn
, 1);
888 get_node_info(sbi
, dn
.nid
, &ni
);
889 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
, new_addr
,
890 ni
.version
, true, false);
893 struct page
*psrc
, *pdst
;
895 psrc
= get_lock_data_page(inode
, src
, true);
897 return PTR_ERR(psrc
);
898 pdst
= get_new_data_page(inode
, NULL
, dst
, true);
900 f2fs_put_page(psrc
, 1);
901 return PTR_ERR(pdst
);
903 f2fs_copy_page(psrc
, pdst
);
904 set_page_dirty(pdst
);
905 f2fs_put_page(pdst
, 1);
906 f2fs_put_page(psrc
, 1);
908 return truncate_hole(inode
, src
, src
+ 1);
913 if (!get_dnode_of_data(&dn
, src
, LOOKUP_NODE
)) {
914 dn
.data_blkaddr
= new_addr
;
915 set_data_blkaddr(&dn
);
916 f2fs_update_extent_cache(&dn
);
922 static int f2fs_do_collapse(struct inode
*inode
, pgoff_t start
, pgoff_t end
)
924 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
925 pgoff_t nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
928 for (; end
< nrpages
; start
++, end
++) {
929 f2fs_balance_fs(sbi
, true);
931 ret
= __exchange_data_block(inode
, end
, start
, true);
939 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
941 pgoff_t pg_start
, pg_end
;
945 if (offset
+ len
>= i_size_read(inode
))
948 /* collapse range should be aligned to block size of f2fs. */
949 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
952 ret
= f2fs_convert_inline_inode(inode
);
956 pg_start
= offset
>> PAGE_CACHE_SHIFT
;
957 pg_end
= (offset
+ len
) >> PAGE_CACHE_SHIFT
;
959 /* write out all dirty pages from offset */
960 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
964 truncate_pagecache(inode
, offset
);
966 ret
= f2fs_do_collapse(inode
, pg_start
, pg_end
);
970 /* write out all moved pages, if possible */
971 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
972 truncate_pagecache(inode
, offset
);
974 new_size
= i_size_read(inode
) - len
;
975 truncate_pagecache(inode
, new_size
);
977 ret
= truncate_blocks(inode
, new_size
, true);
979 i_size_write(inode
, new_size
);
984 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
987 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
988 struct address_space
*mapping
= inode
->i_mapping
;
989 pgoff_t index
, pg_start
, pg_end
;
990 loff_t new_size
= i_size_read(inode
);
991 loff_t off_start
, off_end
;
994 ret
= inode_newsize_ok(inode
, (len
+ offset
));
998 ret
= f2fs_convert_inline_inode(inode
);
1002 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1006 truncate_pagecache_range(inode
, offset
, offset
+ len
- 1);
1008 pg_start
= ((unsigned long long) offset
) >> PAGE_CACHE_SHIFT
;
1009 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_CACHE_SHIFT
;
1011 off_start
= offset
& (PAGE_CACHE_SIZE
- 1);
1012 off_end
= (offset
+ len
) & (PAGE_CACHE_SIZE
- 1);
1014 if (pg_start
== pg_end
) {
1015 ret
= fill_zero(inode
, pg_start
, off_start
,
1016 off_end
- off_start
);
1020 if (offset
+ len
> new_size
)
1021 new_size
= offset
+ len
;
1022 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1025 ret
= fill_zero(inode
, pg_start
++, off_start
,
1026 PAGE_CACHE_SIZE
- off_start
);
1030 new_size
= max_t(loff_t
, new_size
,
1031 (loff_t
)pg_start
<< PAGE_CACHE_SHIFT
);
1034 for (index
= pg_start
; index
< pg_end
; index
++) {
1035 struct dnode_of_data dn
;
1040 ipage
= get_node_page(sbi
, inode
->i_ino
);
1041 if (IS_ERR(ipage
)) {
1042 ret
= PTR_ERR(ipage
);
1043 f2fs_unlock_op(sbi
);
1047 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
1048 ret
= f2fs_reserve_block(&dn
, index
);
1050 f2fs_unlock_op(sbi
);
1054 if (dn
.data_blkaddr
!= NEW_ADDR
) {
1055 invalidate_blocks(sbi
, dn
.data_blkaddr
);
1057 dn
.data_blkaddr
= NEW_ADDR
;
1058 set_data_blkaddr(&dn
);
1060 dn
.data_blkaddr
= NULL_ADDR
;
1061 f2fs_update_extent_cache(&dn
);
1063 f2fs_put_dnode(&dn
);
1064 f2fs_unlock_op(sbi
);
1066 new_size
= max_t(loff_t
, new_size
,
1067 (loff_t
)(index
+ 1) << PAGE_CACHE_SHIFT
);
1071 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1075 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1080 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && i_size_read(inode
) < new_size
) {
1081 i_size_write(inode
, new_size
);
1082 mark_inode_dirty(inode
);
1083 update_inode_page(inode
);
1089 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1091 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1092 pgoff_t pg_start
, pg_end
, delta
, nrpages
, idx
;
1096 new_size
= i_size_read(inode
) + len
;
1097 if (new_size
> inode
->i_sb
->s_maxbytes
)
1100 if (offset
>= i_size_read(inode
))
1103 /* insert range should be aligned to block size of f2fs. */
1104 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1107 ret
= f2fs_convert_inline_inode(inode
);
1111 f2fs_balance_fs(sbi
, true);
1113 ret
= truncate_blocks(inode
, i_size_read(inode
), true);
1117 /* write out all dirty pages from offset */
1118 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1122 truncate_pagecache(inode
, offset
);
1124 pg_start
= offset
>> PAGE_CACHE_SHIFT
;
1125 pg_end
= (offset
+ len
) >> PAGE_CACHE_SHIFT
;
1126 delta
= pg_end
- pg_start
;
1127 nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1129 for (idx
= nrpages
- 1; idx
>= pg_start
&& idx
!= -1; idx
--) {
1131 ret
= __exchange_data_block(inode
, idx
, idx
+ delta
, false);
1132 f2fs_unlock_op(sbi
);
1137 /* write out all moved pages, if possible */
1138 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1139 truncate_pagecache(inode
, offset
);
1142 i_size_write(inode
, new_size
);
1146 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1147 loff_t len
, int mode
)
1149 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1150 pgoff_t index
, pg_start
, pg_end
;
1151 loff_t new_size
= i_size_read(inode
);
1152 loff_t off_start
, off_end
;
1155 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1159 ret
= f2fs_convert_inline_inode(inode
);
1163 f2fs_balance_fs(sbi
, true);
1165 pg_start
= ((unsigned long long) offset
) >> PAGE_CACHE_SHIFT
;
1166 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_CACHE_SHIFT
;
1168 off_start
= offset
& (PAGE_CACHE_SIZE
- 1);
1169 off_end
= (offset
+ len
) & (PAGE_CACHE_SIZE
- 1);
1173 for (index
= pg_start
; index
<= pg_end
; index
++) {
1174 struct dnode_of_data dn
;
1176 if (index
== pg_end
&& !off_end
)
1179 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1180 ret
= f2fs_reserve_block(&dn
, index
);
1184 if (pg_start
== pg_end
)
1185 new_size
= offset
+ len
;
1186 else if (index
== pg_start
&& off_start
)
1187 new_size
= (loff_t
)(index
+ 1) << PAGE_CACHE_SHIFT
;
1188 else if (index
== pg_end
)
1189 new_size
= ((loff_t
)index
<< PAGE_CACHE_SHIFT
) +
1192 new_size
+= PAGE_CACHE_SIZE
;
1195 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
1196 i_size_read(inode
) < new_size
) {
1197 i_size_write(inode
, new_size
);
1198 mark_inode_dirty(inode
);
1199 update_inode_page(inode
);
1201 f2fs_unlock_op(sbi
);
1206 static long f2fs_fallocate(struct file
*file
, int mode
,
1207 loff_t offset
, loff_t len
)
1209 struct inode
*inode
= file_inode(file
);
1212 /* f2fs only support ->fallocate for regular file */
1213 if (!S_ISREG(inode
->i_mode
))
1216 if (f2fs_encrypted_inode(inode
) &&
1217 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1220 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1221 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1222 FALLOC_FL_INSERT_RANGE
))
1227 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1228 if (offset
>= inode
->i_size
)
1231 ret
= punch_hole(inode
, offset
, len
);
1232 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1233 ret
= f2fs_collapse_range(inode
, offset
, len
);
1234 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1235 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1236 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1237 ret
= f2fs_insert_range(inode
, offset
, len
);
1239 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1243 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1244 mark_inode_dirty(inode
);
1245 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1249 inode_unlock(inode
);
1251 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1255 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1257 /* some remained atomic pages should discarded */
1258 if (f2fs_is_atomic_file(inode
))
1259 drop_inmem_pages(inode
);
1260 if (f2fs_is_volatile_file(inode
)) {
1261 set_inode_flag(F2FS_I(inode
), FI_DROP_CACHE
);
1262 filemap_fdatawrite(inode
->i_mapping
);
1263 clear_inode_flag(F2FS_I(inode
), FI_DROP_CACHE
);
1268 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
1269 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
1271 static inline __u32
f2fs_mask_flags(umode_t mode
, __u32 flags
)
1275 else if (S_ISREG(mode
))
1276 return flags
& F2FS_REG_FLMASK
;
1278 return flags
& F2FS_OTHER_FLMASK
;
1281 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1283 struct inode
*inode
= file_inode(filp
);
1284 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1285 unsigned int flags
= fi
->i_flags
& FS_FL_USER_VISIBLE
;
1286 return put_user(flags
, (int __user
*)arg
);
1289 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1291 struct inode
*inode
= file_inode(filp
);
1292 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1293 unsigned int flags
= fi
->i_flags
& FS_FL_USER_VISIBLE
;
1294 unsigned int oldflags
;
1297 ret
= mnt_want_write_file(filp
);
1301 if (!inode_owner_or_capable(inode
)) {
1306 if (get_user(flags
, (int __user
*)arg
)) {
1311 flags
= f2fs_mask_flags(inode
->i_mode
, flags
);
1315 oldflags
= fi
->i_flags
;
1317 if ((flags
^ oldflags
) & (FS_APPEND_FL
| FS_IMMUTABLE_FL
)) {
1318 if (!capable(CAP_LINUX_IMMUTABLE
)) {
1319 inode_unlock(inode
);
1325 flags
= flags
& FS_FL_USER_MODIFIABLE
;
1326 flags
|= oldflags
& ~FS_FL_USER_MODIFIABLE
;
1327 fi
->i_flags
= flags
;
1328 inode_unlock(inode
);
1330 f2fs_set_inode_flags(inode
);
1331 inode
->i_ctime
= CURRENT_TIME
;
1332 mark_inode_dirty(inode
);
1334 mnt_drop_write_file(filp
);
1338 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1340 struct inode
*inode
= file_inode(filp
);
1342 return put_user(inode
->i_generation
, (int __user
*)arg
);
1345 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
1347 struct inode
*inode
= file_inode(filp
);
1350 if (!inode_owner_or_capable(inode
))
1353 if (f2fs_is_atomic_file(inode
))
1356 ret
= f2fs_convert_inline_inode(inode
);
1360 set_inode_flag(F2FS_I(inode
), FI_ATOMIC_FILE
);
1361 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1366 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
1368 struct inode
*inode
= file_inode(filp
);
1371 if (!inode_owner_or_capable(inode
))
1374 if (f2fs_is_volatile_file(inode
))
1377 ret
= mnt_want_write_file(filp
);
1381 if (f2fs_is_atomic_file(inode
)) {
1382 clear_inode_flag(F2FS_I(inode
), FI_ATOMIC_FILE
);
1383 ret
= commit_inmem_pages(inode
);
1385 set_inode_flag(F2FS_I(inode
), FI_ATOMIC_FILE
);
1390 ret
= f2fs_sync_file(filp
, 0, LLONG_MAX
, 0);
1392 mnt_drop_write_file(filp
);
1396 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
1398 struct inode
*inode
= file_inode(filp
);
1401 if (!inode_owner_or_capable(inode
))
1404 if (f2fs_is_volatile_file(inode
))
1407 ret
= f2fs_convert_inline_inode(inode
);
1411 set_inode_flag(F2FS_I(inode
), FI_VOLATILE_FILE
);
1412 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1416 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
1418 struct inode
*inode
= file_inode(filp
);
1420 if (!inode_owner_or_capable(inode
))
1423 if (!f2fs_is_volatile_file(inode
))
1426 if (!f2fs_is_first_block_written(inode
))
1427 return truncate_partial_data_page(inode
, 0, true);
1429 return punch_hole(inode
, 0, F2FS_BLKSIZE
);
1432 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
1434 struct inode
*inode
= file_inode(filp
);
1437 if (!inode_owner_or_capable(inode
))
1440 ret
= mnt_want_write_file(filp
);
1444 if (f2fs_is_atomic_file(inode
)) {
1445 clear_inode_flag(F2FS_I(inode
), FI_ATOMIC_FILE
);
1446 drop_inmem_pages(inode
);
1448 if (f2fs_is_volatile_file(inode
)) {
1449 clear_inode_flag(F2FS_I(inode
), FI_VOLATILE_FILE
);
1450 ret
= f2fs_sync_file(filp
, 0, LLONG_MAX
, 0);
1453 mnt_drop_write_file(filp
);
1454 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1458 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
1460 struct inode
*inode
= file_inode(filp
);
1461 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1462 struct super_block
*sb
= sbi
->sb
;
1465 if (!capable(CAP_SYS_ADMIN
))
1468 if (get_user(in
, (__u32 __user
*)arg
))
1472 case F2FS_GOING_DOWN_FULLSYNC
:
1473 sb
= freeze_bdev(sb
->s_bdev
);
1474 if (sb
&& !IS_ERR(sb
)) {
1475 f2fs_stop_checkpoint(sbi
);
1476 thaw_bdev(sb
->s_bdev
, sb
);
1479 case F2FS_GOING_DOWN_METASYNC
:
1480 /* do checkpoint only */
1481 f2fs_sync_fs(sb
, 1);
1482 f2fs_stop_checkpoint(sbi
);
1484 case F2FS_GOING_DOWN_NOSYNC
:
1485 f2fs_stop_checkpoint(sbi
);
1487 case F2FS_GOING_DOWN_METAFLUSH
:
1488 sync_meta_pages(sbi
, META
, LONG_MAX
);
1489 f2fs_stop_checkpoint(sbi
);
1494 f2fs_update_time(sbi
, REQ_TIME
);
1498 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
1500 struct inode
*inode
= file_inode(filp
);
1501 struct super_block
*sb
= inode
->i_sb
;
1502 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1503 struct fstrim_range range
;
1506 if (!capable(CAP_SYS_ADMIN
))
1509 if (!blk_queue_discard(q
))
1512 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
1516 range
.minlen
= max((unsigned int)range
.minlen
,
1517 q
->limits
.discard_granularity
);
1518 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
1522 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
1525 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1529 static bool uuid_is_nonzero(__u8 u
[16])
1533 for (i
= 0; i
< 16; i
++)
1539 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
1541 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1542 struct f2fs_encryption_policy policy
;
1543 struct inode
*inode
= file_inode(filp
);
1545 if (copy_from_user(&policy
, (struct f2fs_encryption_policy __user
*)arg
,
1549 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1550 return f2fs_process_policy(&policy
, inode
);
1556 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
1558 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1559 struct f2fs_encryption_policy policy
;
1560 struct inode
*inode
= file_inode(filp
);
1563 err
= f2fs_get_policy(inode
, &policy
);
1567 if (copy_to_user((struct f2fs_encryption_policy __user
*)arg
, &policy
,
1576 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
1578 struct inode
*inode
= file_inode(filp
);
1579 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1582 if (!f2fs_sb_has_crypto(inode
->i_sb
))
1585 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
1588 err
= mnt_want_write_file(filp
);
1592 /* update superblock with uuid */
1593 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
1595 err
= f2fs_commit_super(sbi
, false);
1598 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
1599 mnt_drop_write_file(filp
);
1602 mnt_drop_write_file(filp
);
1604 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
1610 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
1612 struct inode
*inode
= file_inode(filp
);
1613 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1616 if (!capable(CAP_SYS_ADMIN
))
1619 if (get_user(sync
, (__u32 __user
*)arg
))
1622 if (f2fs_readonly(sbi
->sb
))
1626 if (!mutex_trylock(&sbi
->gc_mutex
))
1629 mutex_lock(&sbi
->gc_mutex
);
1632 return f2fs_gc(sbi
, sync
);
1635 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
1637 struct inode
*inode
= file_inode(filp
);
1638 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1640 if (!capable(CAP_SYS_ADMIN
))
1643 if (f2fs_readonly(sbi
->sb
))
1646 return f2fs_sync_fs(sbi
->sb
, 1);
1649 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
1651 struct f2fs_defragment
*range
)
1653 struct inode
*inode
= file_inode(filp
);
1654 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
};
1655 struct extent_info ei
;
1656 pgoff_t pg_start
, pg_end
;
1657 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
1658 unsigned int total
= 0, sec_num
;
1659 unsigned int pages_per_sec
= sbi
->segs_per_sec
* blk_per_seg
;
1660 block_t blk_end
= 0;
1661 bool fragmented
= false;
1664 /* if in-place-update policy is enabled, don't waste time here */
1665 if (need_inplace_update(inode
))
1668 pg_start
= range
->start
>> PAGE_CACHE_SHIFT
;
1669 pg_end
= (range
->start
+ range
->len
) >> PAGE_CACHE_SHIFT
;
1671 f2fs_balance_fs(sbi
, true);
1675 /* writeback all dirty pages in the range */
1676 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
1677 range
->start
+ range
->len
- 1);
1682 * lookup mapping info in extent cache, skip defragmenting if physical
1683 * block addresses are continuous.
1685 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
1686 if (ei
.fofs
+ ei
.len
>= pg_end
)
1690 map
.m_lblk
= pg_start
;
1693 * lookup mapping info in dnode page cache, skip defragmenting if all
1694 * physical block addresses are continuous even if there are hole(s)
1695 * in logical blocks.
1697 while (map
.m_lblk
< pg_end
) {
1698 map
.m_len
= pg_end
- map
.m_lblk
;
1699 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_READ
);
1703 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
1708 if (blk_end
&& blk_end
!= map
.m_pblk
) {
1712 blk_end
= map
.m_pblk
+ map
.m_len
;
1714 map
.m_lblk
+= map
.m_len
;
1720 map
.m_lblk
= pg_start
;
1721 map
.m_len
= pg_end
- pg_start
;
1723 sec_num
= (map
.m_len
+ pages_per_sec
- 1) / pages_per_sec
;
1726 * make sure there are enough free section for LFS allocation, this can
1727 * avoid defragment running in SSR mode when free section are allocated
1730 if (has_not_enough_free_secs(sbi
, sec_num
)) {
1735 while (map
.m_lblk
< pg_end
) {
1740 map
.m_len
= pg_end
- map
.m_lblk
;
1741 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_READ
);
1745 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
1750 set_inode_flag(F2FS_I(inode
), FI_DO_DEFRAG
);
1753 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
1756 page
= get_lock_data_page(inode
, idx
, true);
1758 err
= PTR_ERR(page
);
1762 set_page_dirty(page
);
1763 f2fs_put_page(page
, 1);
1772 if (idx
< pg_end
&& cnt
< blk_per_seg
)
1775 clear_inode_flag(F2FS_I(inode
), FI_DO_DEFRAG
);
1777 err
= filemap_fdatawrite(inode
->i_mapping
);
1782 clear_inode_flag(F2FS_I(inode
), FI_DO_DEFRAG
);
1784 inode_unlock(inode
);
1786 range
->len
= (u64
)total
<< PAGE_CACHE_SHIFT
;
1790 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
1792 struct inode
*inode
= file_inode(filp
);
1793 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1794 struct f2fs_defragment range
;
1797 if (!capable(CAP_SYS_ADMIN
))
1800 if (!S_ISREG(inode
->i_mode
))
1803 err
= mnt_want_write_file(filp
);
1807 if (f2fs_readonly(sbi
->sb
)) {
1812 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
1818 /* verify alignment of offset & size */
1819 if (range
.start
& (F2FS_BLKSIZE
- 1) ||
1820 range
.len
& (F2FS_BLKSIZE
- 1)) {
1825 err
= f2fs_defragment_range(sbi
, filp
, &range
);
1826 f2fs_update_time(sbi
, REQ_TIME
);
1830 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
1834 mnt_drop_write_file(filp
);
1838 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1841 case F2FS_IOC_GETFLAGS
:
1842 return f2fs_ioc_getflags(filp
, arg
);
1843 case F2FS_IOC_SETFLAGS
:
1844 return f2fs_ioc_setflags(filp
, arg
);
1845 case F2FS_IOC_GETVERSION
:
1846 return f2fs_ioc_getversion(filp
, arg
);
1847 case F2FS_IOC_START_ATOMIC_WRITE
:
1848 return f2fs_ioc_start_atomic_write(filp
);
1849 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
1850 return f2fs_ioc_commit_atomic_write(filp
);
1851 case F2FS_IOC_START_VOLATILE_WRITE
:
1852 return f2fs_ioc_start_volatile_write(filp
);
1853 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
1854 return f2fs_ioc_release_volatile_write(filp
);
1855 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
1856 return f2fs_ioc_abort_volatile_write(filp
);
1857 case F2FS_IOC_SHUTDOWN
:
1858 return f2fs_ioc_shutdown(filp
, arg
);
1860 return f2fs_ioc_fitrim(filp
, arg
);
1861 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
1862 return f2fs_ioc_set_encryption_policy(filp
, arg
);
1863 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
1864 return f2fs_ioc_get_encryption_policy(filp
, arg
);
1865 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
1866 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
1867 case F2FS_IOC_GARBAGE_COLLECT
:
1868 return f2fs_ioc_gc(filp
, arg
);
1869 case F2FS_IOC_WRITE_CHECKPOINT
:
1870 return f2fs_ioc_write_checkpoint(filp
, arg
);
1871 case F2FS_IOC_DEFRAGMENT
:
1872 return f2fs_ioc_defragment(filp
, arg
);
1878 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1880 struct file
*file
= iocb
->ki_filp
;
1881 struct inode
*inode
= file_inode(file
);
1884 if (f2fs_encrypted_inode(inode
) &&
1885 !f2fs_has_encryption_key(inode
) &&
1886 f2fs_get_encryption_info(inode
))
1890 ret
= generic_write_checks(iocb
, from
);
1892 ret
= f2fs_preallocate_blocks(iocb
, from
);
1894 ret
= __generic_file_write_iter(iocb
, from
);
1896 inode_unlock(inode
);
1901 err
= generic_write_sync(file
, iocb
->ki_pos
- ret
, ret
);
1908 #ifdef CONFIG_COMPAT
1909 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1912 case F2FS_IOC32_GETFLAGS
:
1913 cmd
= F2FS_IOC_GETFLAGS
;
1915 case F2FS_IOC32_SETFLAGS
:
1916 cmd
= F2FS_IOC_SETFLAGS
;
1918 case F2FS_IOC32_GETVERSION
:
1919 cmd
= F2FS_IOC_GETVERSION
;
1921 case F2FS_IOC_START_ATOMIC_WRITE
:
1922 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
1923 case F2FS_IOC_START_VOLATILE_WRITE
:
1924 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
1925 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
1926 case F2FS_IOC_SHUTDOWN
:
1927 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
1928 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
1929 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
1930 case F2FS_IOC_GARBAGE_COLLECT
:
1931 case F2FS_IOC_WRITE_CHECKPOINT
:
1932 case F2FS_IOC_DEFRAGMENT
:
1935 return -ENOIOCTLCMD
;
1937 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
1941 const struct file_operations f2fs_file_operations
= {
1942 .llseek
= f2fs_llseek
,
1943 .read_iter
= generic_file_read_iter
,
1944 .write_iter
= f2fs_file_write_iter
,
1945 .open
= f2fs_file_open
,
1946 .release
= f2fs_release_file
,
1947 .mmap
= f2fs_file_mmap
,
1948 .fsync
= f2fs_sync_file
,
1949 .fallocate
= f2fs_fallocate
,
1950 .unlocked_ioctl
= f2fs_ioctl
,
1951 #ifdef CONFIG_COMPAT
1952 .compat_ioctl
= f2fs_compat_ioctl
,
1954 .splice_read
= generic_file_splice_read
,
1955 .splice_write
= iter_file_splice_write
,