4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/pagevec.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
21 #include <linux/uio.h>
22 #include <linux/cleancache.h>
28 #include <trace/events/f2fs.h>
30 static void f2fs_read_end_io(struct bio
*bio
)
35 if (f2fs_bio_encrypted(bio
)) {
37 f2fs_release_crypto_ctx(bio
->bi_private
);
39 f2fs_end_io_crypto_work(bio
->bi_private
, bio
);
44 bio_for_each_segment_all(bvec
, bio
, i
) {
45 struct page
*page
= bvec
->bv_page
;
48 SetPageUptodate(page
);
50 ClearPageUptodate(page
);
58 static void f2fs_write_end_io(struct bio
*bio
)
60 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
64 bio_for_each_segment_all(bvec
, bio
, i
) {
65 struct page
*page
= bvec
->bv_page
;
67 f2fs_restore_and_release_control_page(&page
);
69 if (unlikely(bio
->bi_error
)) {
70 set_bit(AS_EIO
, &page
->mapping
->flags
);
71 f2fs_stop_checkpoint(sbi
);
73 end_page_writeback(page
);
74 dec_page_count(sbi
, F2FS_WRITEBACK
);
77 if (!get_pages(sbi
, F2FS_WRITEBACK
) && wq_has_sleeper(&sbi
->cp_wait
))
78 wake_up(&sbi
->cp_wait
);
84 * Low-level block read/write IO operations.
86 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
87 int npages
, bool is_read
)
91 bio
= f2fs_bio_alloc(npages
);
93 bio
->bi_bdev
= sbi
->sb
->s_bdev
;
94 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
95 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
96 bio
->bi_private
= is_read
? NULL
: sbi
;
101 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
103 struct f2fs_io_info
*fio
= &io
->fio
;
108 if (is_read_io(fio
->rw
))
109 trace_f2fs_submit_read_bio(io
->sbi
->sb
, fio
, io
->bio
);
111 trace_f2fs_submit_write_bio(io
->sbi
->sb
, fio
, io
->bio
);
113 submit_bio(fio
->rw
, io
->bio
);
117 static bool __has_merged_page(struct f2fs_bio_info
*io
, struct inode
*inode
,
118 struct page
*page
, nid_t ino
)
120 struct bio_vec
*bvec
;
127 if (!inode
&& !page
&& !ino
)
130 bio_for_each_segment_all(bvec
, io
->bio
, i
) {
132 if (bvec
->bv_page
->mapping
) {
133 target
= bvec
->bv_page
;
135 struct f2fs_crypto_ctx
*ctx
;
138 ctx
= (struct f2fs_crypto_ctx
*)page_private(
140 target
= ctx
->w
.control_page
;
143 if (inode
&& inode
== target
->mapping
->host
)
145 if (page
&& page
== target
)
147 if (ino
&& ino
== ino_of_node(target
))
154 static bool has_merged_page(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
155 struct page
*page
, nid_t ino
,
158 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
159 struct f2fs_bio_info
*io
= &sbi
->write_io
[btype
];
162 down_read(&io
->io_rwsem
);
163 ret
= __has_merged_page(io
, inode
, page
, ino
);
164 up_read(&io
->io_rwsem
);
168 static void __f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
169 struct inode
*inode
, struct page
*page
,
170 nid_t ino
, enum page_type type
, int rw
)
172 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
173 struct f2fs_bio_info
*io
;
175 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
177 down_write(&io
->io_rwsem
);
179 if (!__has_merged_page(io
, inode
, page
, ino
))
182 /* change META to META_FLUSH in the checkpoint procedure */
183 if (type
>= META_FLUSH
) {
184 io
->fio
.type
= META_FLUSH
;
185 if (test_opt(sbi
, NOBARRIER
))
186 io
->fio
.rw
= WRITE_FLUSH
| REQ_META
| REQ_PRIO
;
188 io
->fio
.rw
= WRITE_FLUSH_FUA
| REQ_META
| REQ_PRIO
;
190 __submit_merged_bio(io
);
192 up_write(&io
->io_rwsem
);
195 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
, enum page_type type
,
198 __f2fs_submit_merged_bio(sbi
, NULL
, NULL
, 0, type
, rw
);
201 void f2fs_submit_merged_bio_cond(struct f2fs_sb_info
*sbi
,
202 struct inode
*inode
, struct page
*page
,
203 nid_t ino
, enum page_type type
, int rw
)
205 if (has_merged_page(sbi
, inode
, page
, ino
, type
))
206 __f2fs_submit_merged_bio(sbi
, inode
, page
, ino
, type
, rw
);
209 void f2fs_flush_merged_bios(struct f2fs_sb_info
*sbi
)
211 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
212 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
213 f2fs_submit_merged_bio(sbi
, META
, WRITE
);
217 * Fill the locked page with data located in the block address.
218 * Return unlocked page.
220 int f2fs_submit_page_bio(struct f2fs_io_info
*fio
)
223 struct page
*page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
225 trace_f2fs_submit_page_bio(page
, fio
);
226 f2fs_trace_ios(fio
, 0);
228 /* Allocate a new bio */
229 bio
= __bio_alloc(fio
->sbi
, fio
->new_blkaddr
, 1, is_read_io(fio
->rw
));
231 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
236 submit_bio(fio
->rw
, bio
);
240 void f2fs_submit_page_mbio(struct f2fs_io_info
*fio
)
242 struct f2fs_sb_info
*sbi
= fio
->sbi
;
243 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
244 struct f2fs_bio_info
*io
;
245 bool is_read
= is_read_io(fio
->rw
);
246 struct page
*bio_page
;
248 io
= is_read
? &sbi
->read_io
: &sbi
->write_io
[btype
];
250 if (fio
->old_blkaddr
!= NEW_ADDR
)
251 verify_block_addr(sbi
, fio
->old_blkaddr
);
252 verify_block_addr(sbi
, fio
->new_blkaddr
);
254 down_write(&io
->io_rwsem
);
257 inc_page_count(sbi
, F2FS_WRITEBACK
);
259 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->new_blkaddr
- 1 ||
260 io
->fio
.rw
!= fio
->rw
))
261 __submit_merged_bio(io
);
263 if (io
->bio
== NULL
) {
264 int bio_blocks
= MAX_BIO_BLOCKS(sbi
);
266 io
->bio
= __bio_alloc(sbi
, fio
->new_blkaddr
,
267 bio_blocks
, is_read
);
271 bio_page
= fio
->encrypted_page
? fio
->encrypted_page
: fio
->page
;
273 if (bio_add_page(io
->bio
, bio_page
, PAGE_CACHE_SIZE
, 0) <
275 __submit_merged_bio(io
);
279 io
->last_block_in_bio
= fio
->new_blkaddr
;
280 f2fs_trace_ios(fio
, 0);
282 up_write(&io
->io_rwsem
);
283 trace_f2fs_submit_page_mbio(fio
->page
, fio
);
287 * Lock ordering for the change of data block address:
290 * update block addresses in the node page
292 void set_data_blkaddr(struct dnode_of_data
*dn
)
294 struct f2fs_node
*rn
;
296 struct page
*node_page
= dn
->node_page
;
297 unsigned int ofs_in_node
= dn
->ofs_in_node
;
299 f2fs_wait_on_page_writeback(node_page
, NODE
, true);
301 rn
= F2FS_NODE(node_page
);
303 /* Get physical address of data block */
304 addr_array
= blkaddr_in_node(rn
);
305 addr_array
[ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
306 if (set_page_dirty(node_page
))
307 dn
->node_changed
= true;
310 void f2fs_update_data_blkaddr(struct dnode_of_data
*dn
, block_t blkaddr
)
312 dn
->data_blkaddr
= blkaddr
;
313 set_data_blkaddr(dn
);
314 f2fs_update_extent_cache(dn
);
317 int reserve_new_block(struct dnode_of_data
*dn
)
319 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
321 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
323 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
326 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
328 dn
->data_blkaddr
= NEW_ADDR
;
329 set_data_blkaddr(dn
);
330 mark_inode_dirty(dn
->inode
);
335 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
337 bool need_put
= dn
->inode_page
? false : true;
340 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
344 if (dn
->data_blkaddr
== NULL_ADDR
)
345 err
= reserve_new_block(dn
);
351 int f2fs_get_block(struct dnode_of_data
*dn
, pgoff_t index
)
353 struct extent_info ei
;
354 struct inode
*inode
= dn
->inode
;
356 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
357 dn
->data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
361 return f2fs_reserve_block(dn
, index
);
364 struct page
*get_read_data_page(struct inode
*inode
, pgoff_t index
,
365 int rw
, bool for_write
)
367 struct address_space
*mapping
= inode
->i_mapping
;
368 struct dnode_of_data dn
;
370 struct extent_info ei
;
372 struct f2fs_io_info fio
= {
373 .sbi
= F2FS_I_SB(inode
),
376 .encrypted_page
= NULL
,
379 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
380 return read_mapping_page(mapping
, index
, NULL
);
382 page
= f2fs_grab_cache_page(mapping
, index
, for_write
);
384 return ERR_PTR(-ENOMEM
);
386 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
387 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
391 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
392 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
397 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
402 if (PageUptodate(page
)) {
408 * A new dentry page is allocated but not able to be written, since its
409 * new inode page couldn't be allocated due to -ENOSPC.
410 * In such the case, its blkaddr can be remained as NEW_ADDR.
411 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
413 if (dn
.data_blkaddr
== NEW_ADDR
) {
414 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
415 SetPageUptodate(page
);
420 fio
.new_blkaddr
= fio
.old_blkaddr
= dn
.data_blkaddr
;
422 err
= f2fs_submit_page_bio(&fio
);
428 f2fs_put_page(page
, 1);
432 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
)
434 struct address_space
*mapping
= inode
->i_mapping
;
437 page
= find_get_page(mapping
, index
);
438 if (page
&& PageUptodate(page
))
440 f2fs_put_page(page
, 0);
442 page
= get_read_data_page(inode
, index
, READ_SYNC
, false);
446 if (PageUptodate(page
))
449 wait_on_page_locked(page
);
450 if (unlikely(!PageUptodate(page
))) {
451 f2fs_put_page(page
, 0);
452 return ERR_PTR(-EIO
);
458 * If it tries to access a hole, return an error.
459 * Because, the callers, functions in dir.c and GC, should be able to know
460 * whether this page exists or not.
462 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
,
465 struct address_space
*mapping
= inode
->i_mapping
;
468 page
= get_read_data_page(inode
, index
, READ_SYNC
, for_write
);
472 /* wait for read completion */
474 if (unlikely(!PageUptodate(page
))) {
475 f2fs_put_page(page
, 1);
476 return ERR_PTR(-EIO
);
478 if (unlikely(page
->mapping
!= mapping
)) {
479 f2fs_put_page(page
, 1);
486 * Caller ensures that this data page is never allocated.
487 * A new zero-filled data page is allocated in the page cache.
489 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
491 * Note that, ipage is set only by make_empty_dir, and if any error occur,
492 * ipage should be released by this function.
494 struct page
*get_new_data_page(struct inode
*inode
,
495 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
497 struct address_space
*mapping
= inode
->i_mapping
;
499 struct dnode_of_data dn
;
502 page
= f2fs_grab_cache_page(mapping
, index
, true);
505 * before exiting, we should make sure ipage will be released
506 * if any error occur.
508 f2fs_put_page(ipage
, 1);
509 return ERR_PTR(-ENOMEM
);
512 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
513 err
= f2fs_reserve_block(&dn
, index
);
515 f2fs_put_page(page
, 1);
521 if (PageUptodate(page
))
524 if (dn
.data_blkaddr
== NEW_ADDR
) {
525 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
526 SetPageUptodate(page
);
528 f2fs_put_page(page
, 1);
530 /* if ipage exists, blkaddr should be NEW_ADDR */
531 f2fs_bug_on(F2FS_I_SB(inode
), ipage
);
532 page
= get_lock_data_page(inode
, index
, true);
537 if (new_i_size
&& i_size_read(inode
) <
538 ((loff_t
)(index
+ 1) << PAGE_CACHE_SHIFT
)) {
539 i_size_write(inode
, ((loff_t
)(index
+ 1) << PAGE_CACHE_SHIFT
));
540 /* Only the directory inode sets new_i_size */
541 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
546 static int __allocate_data_block(struct dnode_of_data
*dn
)
548 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
549 struct f2fs_summary sum
;
551 int seg
= CURSEG_WARM_DATA
;
554 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
557 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
558 if (dn
->data_blkaddr
== NEW_ADDR
)
561 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
565 get_node_info(sbi
, dn
->nid
, &ni
);
566 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
568 if (dn
->ofs_in_node
== 0 && dn
->inode_page
== dn
->node_page
)
569 seg
= CURSEG_DIRECT_IO
;
571 allocate_data_block(sbi
, NULL
, dn
->data_blkaddr
, &dn
->data_blkaddr
,
573 set_data_blkaddr(dn
);
576 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), dn
->inode
) +
578 if (i_size_read(dn
->inode
) < ((loff_t
)(fofs
+ 1) << PAGE_CACHE_SHIFT
))
579 i_size_write(dn
->inode
,
580 ((loff_t
)(fofs
+ 1) << PAGE_CACHE_SHIFT
));
584 ssize_t
f2fs_preallocate_blocks(struct kiocb
*iocb
, struct iov_iter
*from
)
586 struct inode
*inode
= file_inode(iocb
->ki_filp
);
587 struct f2fs_map_blocks map
;
590 map
.m_lblk
= F2FS_BYTES_TO_BLK(iocb
->ki_pos
);
591 map
.m_len
= F2FS_BLK_ALIGN(iov_iter_count(from
));
592 map
.m_next_pgofs
= NULL
;
594 if (f2fs_encrypted_inode(inode
))
597 if (iocb
->ki_flags
& IOCB_DIRECT
) {
598 ret
= f2fs_convert_inline_inode(inode
);
601 return f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_DIO
);
603 if (iocb
->ki_pos
+ iov_iter_count(from
) > MAX_INLINE_DATA
) {
604 ret
= f2fs_convert_inline_inode(inode
);
608 if (!f2fs_has_inline_data(inode
))
609 return f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
614 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
615 * f2fs_map_blocks structure.
616 * If original data blocks are allocated, then give them to blockdev.
618 * a. preallocate requested block addresses
619 * b. do not use extent cache for better performance
620 * c. give the block addresses to blockdev
622 int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
623 int create
, int flag
)
625 unsigned int maxblocks
= map
->m_len
;
626 struct dnode_of_data dn
;
627 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
628 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE_RA
;
629 pgoff_t pgofs
, end_offset
;
630 int err
= 0, ofs
= 1;
631 struct extent_info ei
;
632 bool allocated
= false;
638 /* it only supports block size == page size */
639 pgofs
= (pgoff_t
)map
->m_lblk
;
641 if (!create
&& f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
642 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
643 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
644 map
->m_flags
= F2FS_MAP_MAPPED
;
652 /* When reading holes, we need its node page */
653 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
654 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
656 if (err
== -ENOENT
) {
658 if (map
->m_next_pgofs
)
660 get_next_page_offset(&dn
, pgofs
);
665 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
668 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
670 if (blkaddr
== NEW_ADDR
|| blkaddr
== NULL_ADDR
) {
672 if (unlikely(f2fs_cp_error(sbi
))) {
676 if (flag
== F2FS_GET_BLOCK_PRE_AIO
) {
677 if (blkaddr
== NULL_ADDR
)
678 err
= reserve_new_block(&dn
);
680 err
= __allocate_data_block(&dn
);
685 map
->m_flags
= F2FS_MAP_NEW
;
686 blkaddr
= dn
.data_blkaddr
;
688 if (flag
== F2FS_GET_BLOCK_FIEMAP
&&
689 blkaddr
== NULL_ADDR
) {
690 if (map
->m_next_pgofs
)
691 *map
->m_next_pgofs
= pgofs
+ 1;
693 if (flag
!= F2FS_GET_BLOCK_FIEMAP
||
694 blkaddr
!= NEW_ADDR
) {
695 if (flag
== F2FS_GET_BLOCK_BMAP
)
702 if (map
->m_len
== 0) {
703 /* preallocated unwritten block should be mapped for fiemap. */
704 if (blkaddr
== NEW_ADDR
)
705 map
->m_flags
|= F2FS_MAP_UNWRITTEN
;
706 map
->m_flags
|= F2FS_MAP_MAPPED
;
708 map
->m_pblk
= blkaddr
;
710 } else if ((map
->m_pblk
!= NEW_ADDR
&&
711 blkaddr
== (map
->m_pblk
+ ofs
)) ||
712 (map
->m_pblk
== NEW_ADDR
&& blkaddr
== NEW_ADDR
) ||
713 flag
== F2FS_GET_BLOCK_PRE_DIO
||
714 flag
== F2FS_GET_BLOCK_PRE_AIO
) {
724 if (map
->m_len
< maxblocks
) {
725 if (dn
.ofs_in_node
< end_offset
)
729 sync_inode_page(&dn
);
734 f2fs_balance_fs(sbi
, allocated
);
742 sync_inode_page(&dn
);
747 f2fs_balance_fs(sbi
, allocated
);
750 trace_f2fs_map_blocks(inode
, map
, err
);
754 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
755 struct buffer_head
*bh
, int create
, int flag
,
758 struct f2fs_map_blocks map
;
762 map
.m_len
= bh
->b_size
>> inode
->i_blkbits
;
763 map
.m_next_pgofs
= next_pgofs
;
765 ret
= f2fs_map_blocks(inode
, &map
, create
, flag
);
767 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
768 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
769 bh
->b_size
= map
.m_len
<< inode
->i_blkbits
;
774 static int get_data_block(struct inode
*inode
, sector_t iblock
,
775 struct buffer_head
*bh_result
, int create
, int flag
,
778 return __get_data_block(inode
, iblock
, bh_result
, create
,
782 static int get_data_block_dio(struct inode
*inode
, sector_t iblock
,
783 struct buffer_head
*bh_result
, int create
)
785 return __get_data_block(inode
, iblock
, bh_result
, create
,
786 F2FS_GET_BLOCK_DIO
, NULL
);
789 static int get_data_block_bmap(struct inode
*inode
, sector_t iblock
,
790 struct buffer_head
*bh_result
, int create
)
792 /* Block number less than F2FS MAX BLOCKS */
793 if (unlikely(iblock
>= F2FS_I_SB(inode
)->max_file_blocks
))
796 return __get_data_block(inode
, iblock
, bh_result
, create
,
797 F2FS_GET_BLOCK_BMAP
, NULL
);
800 static inline sector_t
logical_to_blk(struct inode
*inode
, loff_t offset
)
802 return (offset
>> inode
->i_blkbits
);
805 static inline loff_t
blk_to_logical(struct inode
*inode
, sector_t blk
)
807 return (blk
<< inode
->i_blkbits
);
810 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
813 struct buffer_head map_bh
;
814 sector_t start_blk
, last_blk
;
817 u64 logical
= 0, phys
= 0, size
= 0;
821 ret
= fiemap_check_flags(fieinfo
, FIEMAP_FLAG_SYNC
);
825 if (f2fs_has_inline_data(inode
)) {
826 ret
= f2fs_inline_data_fiemap(inode
, fieinfo
, start
, len
);
833 isize
= i_size_read(inode
);
837 if (start
+ len
> isize
)
840 if (logical_to_blk(inode
, len
) == 0)
841 len
= blk_to_logical(inode
, 1);
843 start_blk
= logical_to_blk(inode
, start
);
844 last_blk
= logical_to_blk(inode
, start
+ len
- 1);
847 memset(&map_bh
, 0, sizeof(struct buffer_head
));
850 ret
= get_data_block(inode
, start_blk
, &map_bh
, 0,
851 F2FS_GET_BLOCK_FIEMAP
, &next_pgofs
);
856 if (!buffer_mapped(&map_bh
)) {
857 start_blk
= next_pgofs
;
858 /* Go through holes util pass the EOF */
859 if (blk_to_logical(inode
, start_blk
) < isize
)
861 /* Found a hole beyond isize means no more extents.
862 * Note that the premise is that filesystems don't
863 * punch holes beyond isize and keep size unchanged.
865 flags
|= FIEMAP_EXTENT_LAST
;
869 if (f2fs_encrypted_inode(inode
))
870 flags
|= FIEMAP_EXTENT_DATA_ENCRYPTED
;
872 ret
= fiemap_fill_next_extent(fieinfo
, logical
,
876 if (start_blk
> last_blk
|| ret
)
879 logical
= blk_to_logical(inode
, start_blk
);
880 phys
= blk_to_logical(inode
, map_bh
.b_blocknr
);
881 size
= map_bh
.b_size
;
883 if (buffer_unwritten(&map_bh
))
884 flags
= FIEMAP_EXTENT_UNWRITTEN
;
886 start_blk
+= logical_to_blk(inode
, size
);
890 if (fatal_signal_pending(current
))
903 * This function was originally taken from fs/mpage.c, and customized for f2fs.
904 * Major change was from block_size == page_size in f2fs by default.
906 static int f2fs_mpage_readpages(struct address_space
*mapping
,
907 struct list_head
*pages
, struct page
*page
,
910 struct bio
*bio
= NULL
;
912 sector_t last_block_in_bio
= 0;
913 struct inode
*inode
= mapping
->host
;
914 const unsigned blkbits
= inode
->i_blkbits
;
915 const unsigned blocksize
= 1 << blkbits
;
916 sector_t block_in_file
;
918 sector_t last_block_in_file
;
920 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
921 struct f2fs_map_blocks map
;
927 map
.m_next_pgofs
= NULL
;
929 for (page_idx
= 0; nr_pages
; page_idx
++, nr_pages
--) {
931 prefetchw(&page
->flags
);
933 page
= list_entry(pages
->prev
, struct page
, lru
);
934 list_del(&page
->lru
);
935 if (add_to_page_cache_lru(page
, mapping
,
936 page
->index
, GFP_KERNEL
))
940 block_in_file
= (sector_t
)page
->index
;
941 last_block
= block_in_file
+ nr_pages
;
942 last_block_in_file
= (i_size_read(inode
) + blocksize
- 1) >>
944 if (last_block
> last_block_in_file
)
945 last_block
= last_block_in_file
;
948 * Map blocks using the previous result first.
950 if ((map
.m_flags
& F2FS_MAP_MAPPED
) &&
951 block_in_file
> map
.m_lblk
&&
952 block_in_file
< (map
.m_lblk
+ map
.m_len
))
956 * Then do more f2fs_map_blocks() calls until we are
957 * done with this page.
961 if (block_in_file
< last_block
) {
962 map
.m_lblk
= block_in_file
;
963 map
.m_len
= last_block
- block_in_file
;
965 if (f2fs_map_blocks(inode
, &map
, 0,
966 F2FS_GET_BLOCK_READ
))
970 if ((map
.m_flags
& F2FS_MAP_MAPPED
)) {
971 block_nr
= map
.m_pblk
+ block_in_file
- map
.m_lblk
;
972 SetPageMappedToDisk(page
);
974 if (!PageUptodate(page
) && !cleancache_get_page(page
)) {
975 SetPageUptodate(page
);
979 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
980 SetPageUptodate(page
);
986 * This page will go to BIO. Do we need to send this
989 if (bio
&& (last_block_in_bio
!= block_nr
- 1)) {
991 submit_bio(READ
, bio
);
995 struct f2fs_crypto_ctx
*ctx
= NULL
;
997 if (f2fs_encrypted_inode(inode
) &&
998 S_ISREG(inode
->i_mode
)) {
1000 ctx
= f2fs_get_crypto_ctx(inode
);
1002 goto set_error_page
;
1004 /* wait the page to be moved by cleaning */
1005 f2fs_wait_on_encrypted_page_writeback(
1006 F2FS_I_SB(inode
), block_nr
);
1009 bio
= bio_alloc(GFP_KERNEL
,
1010 min_t(int, nr_pages
, BIO_MAX_PAGES
));
1013 f2fs_release_crypto_ctx(ctx
);
1014 goto set_error_page
;
1016 bio
->bi_bdev
= bdev
;
1017 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(block_nr
);
1018 bio
->bi_end_io
= f2fs_read_end_io
;
1019 bio
->bi_private
= ctx
;
1022 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
1023 goto submit_and_realloc
;
1025 last_block_in_bio
= block_nr
;
1029 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1034 submit_bio(READ
, bio
);
1040 page_cache_release(page
);
1042 BUG_ON(pages
&& !list_empty(pages
));
1044 submit_bio(READ
, bio
);
1048 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1050 struct inode
*inode
= page
->mapping
->host
;
1053 trace_f2fs_readpage(page
, DATA
);
1055 /* If the file has inline data, try to read it directly */
1056 if (f2fs_has_inline_data(inode
))
1057 ret
= f2fs_read_inline_data(inode
, page
);
1059 ret
= f2fs_mpage_readpages(page
->mapping
, NULL
, page
, 1);
1063 static int f2fs_read_data_pages(struct file
*file
,
1064 struct address_space
*mapping
,
1065 struct list_head
*pages
, unsigned nr_pages
)
1067 struct inode
*inode
= file
->f_mapping
->host
;
1068 struct page
*page
= list_entry(pages
->prev
, struct page
, lru
);
1070 trace_f2fs_readpages(inode
, page
, nr_pages
);
1072 /* If the file has inline data, skip readpages */
1073 if (f2fs_has_inline_data(inode
))
1076 return f2fs_mpage_readpages(mapping
, pages
, NULL
, nr_pages
);
1079 int do_write_data_page(struct f2fs_io_info
*fio
)
1081 struct page
*page
= fio
->page
;
1082 struct inode
*inode
= page
->mapping
->host
;
1083 struct dnode_of_data dn
;
1086 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1087 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1091 fio
->old_blkaddr
= dn
.data_blkaddr
;
1093 /* This page is already truncated */
1094 if (fio
->old_blkaddr
== NULL_ADDR
) {
1095 ClearPageUptodate(page
);
1099 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) {
1101 /* wait for GCed encrypted page writeback */
1102 f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode
),
1105 fio
->encrypted_page
= f2fs_encrypt(inode
, fio
->page
);
1106 if (IS_ERR(fio
->encrypted_page
)) {
1107 err
= PTR_ERR(fio
->encrypted_page
);
1112 set_page_writeback(page
);
1115 * If current allocation needs SSR,
1116 * it had better in-place writes for updated data.
1118 if (unlikely(fio
->old_blkaddr
!= NEW_ADDR
&&
1119 !is_cold_data(page
) &&
1120 !IS_ATOMIC_WRITTEN_PAGE(page
) &&
1121 need_inplace_update(inode
))) {
1122 rewrite_data_page(fio
);
1123 set_inode_flag(F2FS_I(inode
), FI_UPDATE_WRITE
);
1124 trace_f2fs_do_write_data_page(page
, IPU
);
1126 write_data_page(&dn
, fio
);
1127 trace_f2fs_do_write_data_page(page
, OPU
);
1128 set_inode_flag(F2FS_I(inode
), FI_APPEND_WRITE
);
1129 if (page
->index
== 0)
1130 set_inode_flag(F2FS_I(inode
), FI_FIRST_BLOCK_WRITTEN
);
1133 f2fs_put_dnode(&dn
);
1137 static int f2fs_write_data_page(struct page
*page
,
1138 struct writeback_control
*wbc
)
1140 struct inode
*inode
= page
->mapping
->host
;
1141 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1142 loff_t i_size
= i_size_read(inode
);
1143 const pgoff_t end_index
= ((unsigned long long) i_size
)
1144 >> PAGE_CACHE_SHIFT
;
1145 unsigned offset
= 0;
1146 bool need_balance_fs
= false;
1148 struct f2fs_io_info fio
= {
1151 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1153 .encrypted_page
= NULL
,
1156 trace_f2fs_writepage(page
, DATA
);
1158 if (page
->index
< end_index
)
1162 * If the offset is out-of-range of file size,
1163 * this page does not have to be written to disk.
1165 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
1166 if ((page
->index
>= end_index
+ 1) || !offset
)
1169 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
1171 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1173 if (f2fs_is_drop_cache(inode
))
1175 if (f2fs_is_volatile_file(inode
) && !wbc
->for_reclaim
&&
1176 available_free_memory(sbi
, BASE_CHECK
))
1179 /* Dentry blocks are controlled by checkpoint */
1180 if (S_ISDIR(inode
->i_mode
)) {
1181 if (unlikely(f2fs_cp_error(sbi
)))
1183 err
= do_write_data_page(&fio
);
1187 /* we should bypass data pages to proceed the kworkder jobs */
1188 if (unlikely(f2fs_cp_error(sbi
))) {
1193 if (!wbc
->for_reclaim
)
1194 need_balance_fs
= true;
1195 else if (has_not_enough_free_secs(sbi
, 0))
1200 if (f2fs_has_inline_data(inode
))
1201 err
= f2fs_write_inline_data(inode
, page
);
1203 err
= do_write_data_page(&fio
);
1204 f2fs_unlock_op(sbi
);
1206 if (err
&& err
!= -ENOENT
)
1209 clear_cold_data(page
);
1211 inode_dec_dirty_pages(inode
);
1213 ClearPageUptodate(page
);
1215 if (wbc
->for_reclaim
) {
1216 f2fs_submit_merged_bio_cond(sbi
, NULL
, page
, 0, DATA
, WRITE
);
1217 remove_dirty_inode(inode
);
1221 f2fs_balance_fs(sbi
, need_balance_fs
);
1223 if (unlikely(f2fs_cp_error(sbi
)))
1224 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1229 redirty_page_for_writepage(wbc
, page
);
1230 return AOP_WRITEPAGE_ACTIVATE
;
1233 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
1236 struct address_space
*mapping
= data
;
1237 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
1238 mapping_set_error(mapping
, ret
);
1243 * This function was copied from write_cche_pages from mm/page-writeback.c.
1244 * The major change is making write step of cold data page separately from
1245 * warm/hot data page.
1247 static int f2fs_write_cache_pages(struct address_space
*mapping
,
1248 struct writeback_control
*wbc
, writepage_t writepage
,
1253 struct pagevec pvec
;
1255 pgoff_t
uninitialized_var(writeback_index
);
1257 pgoff_t end
; /* Inclusive */
1260 int range_whole
= 0;
1264 pagevec_init(&pvec
, 0);
1266 if (wbc
->range_cyclic
) {
1267 writeback_index
= mapping
->writeback_index
; /* prev offset */
1268 index
= writeback_index
;
1275 index
= wbc
->range_start
>> PAGE_CACHE_SHIFT
;
1276 end
= wbc
->range_end
>> PAGE_CACHE_SHIFT
;
1277 if (wbc
->range_start
== 0 && wbc
->range_end
== LLONG_MAX
)
1279 cycled
= 1; /* ignore range_cyclic tests */
1281 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
1282 tag
= PAGECACHE_TAG_TOWRITE
;
1284 tag
= PAGECACHE_TAG_DIRTY
;
1286 if (wbc
->sync_mode
== WB_SYNC_ALL
|| wbc
->tagged_writepages
)
1287 tag_pages_for_writeback(mapping
, index
, end
);
1289 while (!done
&& (index
<= end
)) {
1292 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
, tag
,
1293 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
- 1) + 1);
1297 for (i
= 0; i
< nr_pages
; i
++) {
1298 struct page
*page
= pvec
.pages
[i
];
1300 if (page
->index
> end
) {
1305 done_index
= page
->index
;
1309 if (unlikely(page
->mapping
!= mapping
)) {
1315 if (!PageDirty(page
)) {
1316 /* someone wrote it for us */
1317 goto continue_unlock
;
1320 if (step
== is_cold_data(page
))
1321 goto continue_unlock
;
1323 if (PageWriteback(page
)) {
1324 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
1325 f2fs_wait_on_page_writeback(page
,
1328 goto continue_unlock
;
1331 BUG_ON(PageWriteback(page
));
1332 if (!clear_page_dirty_for_io(page
))
1333 goto continue_unlock
;
1335 ret
= (*writepage
)(page
, wbc
, data
);
1336 if (unlikely(ret
)) {
1337 if (ret
== AOP_WRITEPAGE_ACTIVATE
) {
1341 done_index
= page
->index
+ 1;
1347 if (--wbc
->nr_to_write
<= 0 &&
1348 wbc
->sync_mode
== WB_SYNC_NONE
) {
1353 pagevec_release(&pvec
);
1362 if (!cycled
&& !done
) {
1365 end
= writeback_index
- 1;
1368 if (wbc
->range_cyclic
|| (range_whole
&& wbc
->nr_to_write
> 0))
1369 mapping
->writeback_index
= done_index
;
1374 static int f2fs_write_data_pages(struct address_space
*mapping
,
1375 struct writeback_control
*wbc
)
1377 struct inode
*inode
= mapping
->host
;
1378 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1379 bool locked
= false;
1383 /* deal with chardevs and other special file */
1384 if (!mapping
->a_ops
->writepage
)
1387 /* skip writing if there is no dirty page in this inode */
1388 if (!get_dirty_pages(inode
) && wbc
->sync_mode
== WB_SYNC_NONE
)
1391 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
1392 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
1393 available_free_memory(sbi
, DIRTY_DENTS
))
1396 /* skip writing during file defragment */
1397 if (is_inode_flag_set(F2FS_I(inode
), FI_DO_DEFRAG
))
1400 /* during POR, we don't need to trigger writepage at all. */
1401 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1404 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1406 diff
= nr_pages_to_write(sbi
, DATA
, wbc
);
1408 if (!S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_ALL
) {
1409 mutex_lock(&sbi
->writepages
);
1412 ret
= f2fs_write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
1413 f2fs_submit_merged_bio_cond(sbi
, inode
, NULL
, 0, DATA
, WRITE
);
1415 mutex_unlock(&sbi
->writepages
);
1417 remove_dirty_inode(inode
);
1419 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1423 wbc
->pages_skipped
+= get_dirty_pages(inode
);
1424 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1428 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
1430 struct inode
*inode
= mapping
->host
;
1431 loff_t i_size
= i_size_read(inode
);
1434 truncate_pagecache(inode
, i_size
);
1435 truncate_blocks(inode
, i_size
, true);
1439 static int prepare_write_begin(struct f2fs_sb_info
*sbi
,
1440 struct page
*page
, loff_t pos
, unsigned len
,
1441 block_t
*blk_addr
, bool *node_changed
)
1443 struct inode
*inode
= page
->mapping
->host
;
1444 pgoff_t index
= page
->index
;
1445 struct dnode_of_data dn
;
1447 bool locked
= false;
1448 struct extent_info ei
;
1452 * we already allocated all the blocks, so we don't need to get
1453 * the block addresses when there is no need to fill the page.
1455 if (!f2fs_has_inline_data(inode
) && !f2fs_encrypted_inode(inode
) &&
1456 len
== PAGE_CACHE_SIZE
)
1459 if (f2fs_has_inline_data(inode
) ||
1460 (pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
1465 /* check inline_data */
1466 ipage
= get_node_page(sbi
, inode
->i_ino
);
1467 if (IS_ERR(ipage
)) {
1468 err
= PTR_ERR(ipage
);
1472 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
1474 if (f2fs_has_inline_data(inode
)) {
1475 if (pos
+ len
<= MAX_INLINE_DATA
) {
1476 read_inline_data(page
, ipage
);
1477 set_inode_flag(F2FS_I(inode
), FI_DATA_EXIST
);
1478 set_inline_node(ipage
);
1480 err
= f2fs_convert_inline_page(&dn
, page
);
1483 if (dn
.data_blkaddr
== NULL_ADDR
)
1484 err
= f2fs_get_block(&dn
, index
);
1486 } else if (locked
) {
1487 err
= f2fs_get_block(&dn
, index
);
1489 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
1490 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
1493 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
1494 if (err
|| (!err
&& dn
.data_blkaddr
== NULL_ADDR
)) {
1495 f2fs_put_dnode(&dn
);
1503 /* convert_inline_page can make node_changed */
1504 *blk_addr
= dn
.data_blkaddr
;
1505 *node_changed
= dn
.node_changed
;
1507 f2fs_put_dnode(&dn
);
1510 f2fs_unlock_op(sbi
);
1514 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
1515 loff_t pos
, unsigned len
, unsigned flags
,
1516 struct page
**pagep
, void **fsdata
)
1518 struct inode
*inode
= mapping
->host
;
1519 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1520 struct page
*page
= NULL
;
1521 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
1522 bool need_balance
= false;
1523 block_t blkaddr
= NULL_ADDR
;
1526 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
1529 * We should check this at this moment to avoid deadlock on inode page
1530 * and #0 page. The locking rule for inline_data conversion should be:
1531 * lock_page(page #0) -> lock_page(inode_page)
1534 err
= f2fs_convert_inline_inode(inode
);
1539 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
1547 err
= prepare_write_begin(sbi
, page
, pos
, len
,
1548 &blkaddr
, &need_balance
);
1552 if (need_balance
&& has_not_enough_free_secs(sbi
, 0)) {
1554 f2fs_balance_fs(sbi
, true);
1556 if (page
->mapping
!= mapping
) {
1557 /* The page got truncated from under us */
1558 f2fs_put_page(page
, 1);
1563 f2fs_wait_on_page_writeback(page
, DATA
, false);
1565 /* wait for GCed encrypted page writeback */
1566 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
1567 f2fs_wait_on_encrypted_page_writeback(sbi
, blkaddr
);
1569 if (len
== PAGE_CACHE_SIZE
)
1571 if (PageUptodate(page
))
1574 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
1575 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
1576 unsigned end
= start
+ len
;
1578 /* Reading beyond i_size is simple: memset to zero */
1579 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
1583 if (blkaddr
== NEW_ADDR
) {
1584 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1586 struct f2fs_io_info fio
= {
1590 .old_blkaddr
= blkaddr
,
1591 .new_blkaddr
= blkaddr
,
1593 .encrypted_page
= NULL
,
1595 err
= f2fs_submit_page_bio(&fio
);
1600 if (unlikely(!PageUptodate(page
))) {
1604 if (unlikely(page
->mapping
!= mapping
)) {
1605 f2fs_put_page(page
, 1);
1609 /* avoid symlink page */
1610 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
)) {
1611 err
= f2fs_decrypt(page
);
1617 SetPageUptodate(page
);
1619 clear_cold_data(page
);
1623 f2fs_put_page(page
, 1);
1624 f2fs_write_failed(mapping
, pos
+ len
);
1628 static int f2fs_write_end(struct file
*file
,
1629 struct address_space
*mapping
,
1630 loff_t pos
, unsigned len
, unsigned copied
,
1631 struct page
*page
, void *fsdata
)
1633 struct inode
*inode
= page
->mapping
->host
;
1635 trace_f2fs_write_end(inode
, pos
, len
, copied
);
1637 set_page_dirty(page
);
1639 if (pos
+ copied
> i_size_read(inode
)) {
1640 i_size_write(inode
, pos
+ copied
);
1641 mark_inode_dirty(inode
);
1644 f2fs_put_page(page
, 1);
1645 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1649 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
1652 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
1654 if (offset
& blocksize_mask
)
1657 if (iov_iter_alignment(iter
) & blocksize_mask
)
1663 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
1666 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1667 struct inode
*inode
= mapping
->host
;
1668 size_t count
= iov_iter_count(iter
);
1671 err
= check_direct_IO(inode
, iter
, offset
);
1675 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
1678 trace_f2fs_direct_IO_enter(inode
, offset
, count
, iov_iter_rw(iter
));
1680 err
= blockdev_direct_IO(iocb
, inode
, iter
, offset
, get_data_block_dio
);
1681 if (err
< 0 && iov_iter_rw(iter
) == WRITE
)
1682 f2fs_write_failed(mapping
, offset
+ count
);
1684 trace_f2fs_direct_IO_exit(inode
, offset
, count
, iov_iter_rw(iter
), err
);
1689 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
1690 unsigned int length
)
1692 struct inode
*inode
= page
->mapping
->host
;
1693 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1695 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
1696 (offset
% PAGE_CACHE_SIZE
|| length
!= PAGE_CACHE_SIZE
))
1699 if (PageDirty(page
)) {
1700 if (inode
->i_ino
== F2FS_META_INO(sbi
))
1701 dec_page_count(sbi
, F2FS_DIRTY_META
);
1702 else if (inode
->i_ino
== F2FS_NODE_INO(sbi
))
1703 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1705 inode_dec_dirty_pages(inode
);
1708 /* This is atomic written page, keep Private */
1709 if (IS_ATOMIC_WRITTEN_PAGE(page
))
1712 ClearPagePrivate(page
);
1715 int f2fs_release_page(struct page
*page
, gfp_t wait
)
1717 /* If this is dirty page, keep PagePrivate */
1718 if (PageDirty(page
))
1721 /* This is atomic written page, keep Private */
1722 if (IS_ATOMIC_WRITTEN_PAGE(page
))
1725 ClearPagePrivate(page
);
1729 static int f2fs_set_data_page_dirty(struct page
*page
)
1731 struct address_space
*mapping
= page
->mapping
;
1732 struct inode
*inode
= mapping
->host
;
1734 trace_f2fs_set_page_dirty(page
, DATA
);
1736 SetPageUptodate(page
);
1738 if (f2fs_is_atomic_file(inode
)) {
1739 if (!IS_ATOMIC_WRITTEN_PAGE(page
)) {
1740 register_inmem_page(inode
, page
);
1744 * Previously, this page has been registered, we just
1750 if (!PageDirty(page
)) {
1751 __set_page_dirty_nobuffers(page
);
1752 update_dirty_page(inode
, page
);
1758 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
1760 struct inode
*inode
= mapping
->host
;
1762 if (f2fs_has_inline_data(inode
))
1765 /* make sure allocating whole blocks */
1766 if (mapping_tagged(mapping
, PAGECACHE_TAG_DIRTY
))
1767 filemap_write_and_wait(mapping
);
1769 return generic_block_bmap(mapping
, block
, get_data_block_bmap
);
1772 const struct address_space_operations f2fs_dblock_aops
= {
1773 .readpage
= f2fs_read_data_page
,
1774 .readpages
= f2fs_read_data_pages
,
1775 .writepage
= f2fs_write_data_page
,
1776 .writepages
= f2fs_write_data_pages
,
1777 .write_begin
= f2fs_write_begin
,
1778 .write_end
= f2fs_write_end
,
1779 .set_page_dirty
= f2fs_set_data_page_dirty
,
1780 .invalidatepage
= f2fs_invalidate_page
,
1781 .releasepage
= f2fs_release_page
,
1782 .direct_IO
= f2fs_direct_IO
,