4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/prefetch.h>
24 #include <trace/events/f2fs.h>
27 * Lock ordering for the change of data block address:
30 * update block addresses in the node page
32 static void __set_data_blkaddr(struct dnode_of_data
*dn
, block_t new_addr
)
36 struct page
*node_page
= dn
->node_page
;
37 unsigned int ofs_in_node
= dn
->ofs_in_node
;
39 wait_on_page_writeback(node_page
);
41 rn
= (struct f2fs_node
*)page_address(node_page
);
43 /* Get physical address of data block */
44 addr_array
= blkaddr_in_node(rn
);
45 addr_array
[ofs_in_node
] = cpu_to_le32(new_addr
);
46 set_page_dirty(node_page
);
49 int reserve_new_block(struct dnode_of_data
*dn
)
51 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
53 if (is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
))
55 if (!inc_valid_block_count(sbi
, dn
->inode
, 1))
58 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
60 __set_data_blkaddr(dn
, NEW_ADDR
);
61 dn
->data_blkaddr
= NEW_ADDR
;
66 static int check_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
67 struct buffer_head
*bh_result
)
69 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
70 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
71 pgoff_t start_fofs
, end_fofs
;
72 block_t start_blkaddr
;
74 read_lock(&fi
->ext
.ext_lock
);
75 if (fi
->ext
.len
== 0) {
76 read_unlock(&fi
->ext
.ext_lock
);
81 start_fofs
= fi
->ext
.fofs
;
82 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
83 start_blkaddr
= fi
->ext
.blk_addr
;
85 if (pgofs
>= start_fofs
&& pgofs
<= end_fofs
) {
86 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
89 clear_buffer_new(bh_result
);
90 map_bh(bh_result
, inode
->i_sb
,
91 start_blkaddr
+ pgofs
- start_fofs
);
92 count
= end_fofs
- pgofs
+ 1;
93 if (count
< (UINT_MAX
>> blkbits
))
94 bh_result
->b_size
= (count
<< blkbits
);
96 bh_result
->b_size
= UINT_MAX
;
99 read_unlock(&fi
->ext
.ext_lock
);
102 read_unlock(&fi
->ext
.ext_lock
);
106 void update_extent_cache(block_t blk_addr
, struct dnode_of_data
*dn
)
108 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
109 pgoff_t fofs
, start_fofs
, end_fofs
;
110 block_t start_blkaddr
, end_blkaddr
;
112 BUG_ON(blk_addr
== NEW_ADDR
);
113 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
)) + dn
->ofs_in_node
;
115 /* Update the page address in the parent node */
116 __set_data_blkaddr(dn
, blk_addr
);
118 write_lock(&fi
->ext
.ext_lock
);
120 start_fofs
= fi
->ext
.fofs
;
121 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
122 start_blkaddr
= fi
->ext
.blk_addr
;
123 end_blkaddr
= fi
->ext
.blk_addr
+ fi
->ext
.len
- 1;
125 /* Drop and initialize the matched extent */
126 if (fi
->ext
.len
== 1 && fofs
== start_fofs
)
130 if (fi
->ext
.len
== 0) {
131 if (blk_addr
!= NULL_ADDR
) {
133 fi
->ext
.blk_addr
= blk_addr
;
140 if (fofs
== start_fofs
- 1 && blk_addr
== start_blkaddr
- 1) {
148 if (fofs
== end_fofs
+ 1 && blk_addr
== end_blkaddr
+ 1) {
153 /* Split the existing extent */
154 if (fi
->ext
.len
> 1 &&
155 fofs
>= start_fofs
&& fofs
<= end_fofs
) {
156 if ((end_fofs
- fofs
) < (fi
->ext
.len
>> 1)) {
157 fi
->ext
.len
= fofs
- start_fofs
;
159 fi
->ext
.fofs
= fofs
+ 1;
160 fi
->ext
.blk_addr
= start_blkaddr
+
161 fofs
- start_fofs
+ 1;
162 fi
->ext
.len
-= fofs
- start_fofs
+ 1;
166 write_unlock(&fi
->ext
.ext_lock
);
170 write_unlock(&fi
->ext
.ext_lock
);
175 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
, bool sync
)
177 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
178 struct address_space
*mapping
= inode
->i_mapping
;
179 struct dnode_of_data dn
;
183 page
= find_get_page(mapping
, index
);
184 if (page
&& PageUptodate(page
))
186 f2fs_put_page(page
, 0);
188 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
189 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
194 if (dn
.data_blkaddr
== NULL_ADDR
)
195 return ERR_PTR(-ENOENT
);
197 /* By fallocate(), there is no cached page, but with NEW_ADDR */
198 if (dn
.data_blkaddr
== NEW_ADDR
)
199 return ERR_PTR(-EINVAL
);
201 page
= grab_cache_page(mapping
, index
);
203 return ERR_PTR(-ENOMEM
);
205 if (PageUptodate(page
)) {
210 err
= f2fs_readpage(sbi
, page
, dn
.data_blkaddr
,
211 sync
? READ_SYNC
: READA
);
213 wait_on_page_locked(page
);
214 if (!PageUptodate(page
)) {
215 f2fs_put_page(page
, 0);
216 return ERR_PTR(-EIO
);
223 * If it tries to access a hole, return an error.
224 * Because, the callers, functions in dir.c and GC, should be able to know
225 * whether this page exists or not.
227 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
)
229 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
230 struct address_space
*mapping
= inode
->i_mapping
;
231 struct dnode_of_data dn
;
235 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
236 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
241 if (dn
.data_blkaddr
== NULL_ADDR
)
242 return ERR_PTR(-ENOENT
);
244 page
= grab_cache_page(mapping
, index
);
246 return ERR_PTR(-ENOMEM
);
248 if (PageUptodate(page
))
251 BUG_ON(dn
.data_blkaddr
== NEW_ADDR
);
252 BUG_ON(dn
.data_blkaddr
== NULL_ADDR
);
254 err
= f2fs_readpage(sbi
, page
, dn
.data_blkaddr
, READ_SYNC
);
259 if (!PageUptodate(page
)) {
260 f2fs_put_page(page
, 1);
261 return ERR_PTR(-EIO
);
267 * Caller ensures that this data page is never allocated.
268 * A new zero-filled data page is allocated in the page cache.
270 * Also, caller should grab and release a mutex by calling mutex_lock_op() and
273 struct page
*get_new_data_page(struct inode
*inode
, pgoff_t index
,
276 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
277 struct address_space
*mapping
= inode
->i_mapping
;
279 struct dnode_of_data dn
;
282 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
283 err
= get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
287 if (dn
.data_blkaddr
== NULL_ADDR
) {
288 if (reserve_new_block(&dn
)) {
290 return ERR_PTR(-ENOSPC
);
295 page
= grab_cache_page(mapping
, index
);
297 return ERR_PTR(-ENOMEM
);
299 if (PageUptodate(page
))
302 if (dn
.data_blkaddr
== NEW_ADDR
) {
303 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
304 SetPageUptodate(page
);
306 err
= f2fs_readpage(sbi
, page
, dn
.data_blkaddr
, READ_SYNC
);
310 if (!PageUptodate(page
)) {
311 f2fs_put_page(page
, 1);
312 return ERR_PTR(-EIO
);
317 i_size_read(inode
) < ((index
+ 1) << PAGE_CACHE_SHIFT
)) {
318 i_size_write(inode
, ((index
+ 1) << PAGE_CACHE_SHIFT
));
319 mark_inode_dirty_sync(inode
);
324 static void read_end_io(struct bio
*bio
, int err
)
326 const int uptodate
= test_bit(BIO_UPTODATE
, &bio
->bi_flags
);
327 struct bio_vec
*bvec
= bio
->bi_io_vec
+ bio
->bi_vcnt
- 1;
330 struct page
*page
= bvec
->bv_page
;
332 if (--bvec
>= bio
->bi_io_vec
)
333 prefetchw(&bvec
->bv_page
->flags
);
336 SetPageUptodate(page
);
338 ClearPageUptodate(page
);
342 } while (bvec
>= bio
->bi_io_vec
);
343 kfree(bio
->bi_private
);
348 * Fill the locked page with data located in the block address.
349 * Return unlocked page.
351 int f2fs_readpage(struct f2fs_sb_info
*sbi
, struct page
*page
,
352 block_t blk_addr
, int type
)
354 struct block_device
*bdev
= sbi
->sb
->s_bdev
;
357 trace_f2fs_readpage(page
, blk_addr
, type
);
359 down_read(&sbi
->bio_sem
);
361 /* Allocate a new bio */
362 bio
= f2fs_bio_alloc(bdev
, 1);
364 /* Initialize the bio */
365 bio
->bi_sector
= SECTOR_FROM_BLOCK(sbi
, blk_addr
);
366 bio
->bi_end_io
= read_end_io
;
368 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
369 kfree(bio
->bi_private
);
371 up_read(&sbi
->bio_sem
);
372 f2fs_put_page(page
, 1);
376 submit_bio(type
, bio
);
377 up_read(&sbi
->bio_sem
);
382 * This function should be used by the data read flow only where it
383 * does not check the "create" flag that indicates block allocation.
384 * The reason for this special functionality is to exploit VFS readahead
387 static int get_data_block_ro(struct inode
*inode
, sector_t iblock
,
388 struct buffer_head
*bh_result
, int create
)
390 unsigned int blkbits
= inode
->i_sb
->s_blocksize_bits
;
391 unsigned maxblocks
= bh_result
->b_size
>> blkbits
;
392 struct dnode_of_data dn
;
396 /* Get the page offset from the block offset(iblock) */
397 pgofs
= (pgoff_t
)(iblock
>> (PAGE_CACHE_SHIFT
- blkbits
));
399 if (check_extent_cache(inode
, pgofs
, bh_result
)) {
400 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, 0);
404 /* When reading holes, we need its node page */
405 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
406 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE_RA
);
408 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, err
);
409 return (err
== -ENOENT
) ? 0 : err
;
412 /* It does not support data allocation */
415 if (dn
.data_blkaddr
!= NEW_ADDR
&& dn
.data_blkaddr
!= NULL_ADDR
) {
417 unsigned int end_offset
;
419 end_offset
= IS_INODE(dn
.node_page
) ?
423 clear_buffer_new(bh_result
);
425 /* Give more consecutive addresses for the read ahead */
426 for (i
= 0; i
< end_offset
- dn
.ofs_in_node
; i
++)
427 if (((datablock_addr(dn
.node_page
,
429 != (dn
.data_blkaddr
+ i
)) || maxblocks
== i
)
431 map_bh(bh_result
, inode
->i_sb
, dn
.data_blkaddr
);
432 bh_result
->b_size
= (i
<< blkbits
);
435 trace_f2fs_get_data_block(inode
, iblock
, bh_result
, 0);
439 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
441 return mpage_readpage(page
, get_data_block_ro
);
444 static int f2fs_read_data_pages(struct file
*file
,
445 struct address_space
*mapping
,
446 struct list_head
*pages
, unsigned nr_pages
)
448 return mpage_readpages(mapping
, pages
, nr_pages
, get_data_block_ro
);
451 int do_write_data_page(struct page
*page
)
453 struct inode
*inode
= page
->mapping
->host
;
454 block_t old_blk_addr
, new_blk_addr
;
455 struct dnode_of_data dn
;
458 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
459 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
463 old_blk_addr
= dn
.data_blkaddr
;
465 /* This page is already truncated */
466 if (old_blk_addr
== NULL_ADDR
)
469 set_page_writeback(page
);
472 * If current allocation needs SSR,
473 * it had better in-place writes for updated data.
475 if (old_blk_addr
!= NEW_ADDR
&& !is_cold_data(page
) &&
476 need_inplace_update(inode
)) {
477 rewrite_data_page(F2FS_SB(inode
->i_sb
), page
,
480 write_data_page(inode
, page
, &dn
,
481 old_blk_addr
, &new_blk_addr
);
482 update_extent_cache(new_blk_addr
, &dn
);
489 static int f2fs_write_data_page(struct page
*page
,
490 struct writeback_control
*wbc
)
492 struct inode
*inode
= page
->mapping
->host
;
493 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
494 loff_t i_size
= i_size_read(inode
);
495 const pgoff_t end_index
= ((unsigned long long) i_size
)
498 bool need_balance_fs
= false;
501 if (page
->index
< end_index
)
505 * If the offset is out-of-range of file size,
506 * this page does not have to be written to disk.
508 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
509 if ((page
->index
>= end_index
+ 1) || !offset
) {
510 if (S_ISDIR(inode
->i_mode
)) {
511 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
512 inode_dec_dirty_dents(inode
);
517 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
519 if (sbi
->por_doing
) {
520 err
= AOP_WRITEPAGE_ACTIVATE
;
524 /* Dentry blocks are controlled by checkpoint */
525 if (S_ISDIR(inode
->i_mode
)) {
526 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
527 inode_dec_dirty_dents(inode
);
528 err
= do_write_data_page(page
);
530 int ilock
= mutex_lock_op(sbi
);
531 err
= do_write_data_page(page
);
532 mutex_unlock_op(sbi
, ilock
);
533 need_balance_fs
= true;
540 if (wbc
->for_reclaim
)
541 f2fs_submit_bio(sbi
, DATA
, true);
543 clear_cold_data(page
);
547 f2fs_balance_fs(sbi
);
551 wbc
->pages_skipped
++;
552 set_page_dirty(page
);
556 #define MAX_DESIRED_PAGES_WP 4096
558 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
561 struct address_space
*mapping
= data
;
562 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
563 mapping_set_error(mapping
, ret
);
567 static int f2fs_write_data_pages(struct address_space
*mapping
,
568 struct writeback_control
*wbc
)
570 struct inode
*inode
= mapping
->host
;
571 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
573 long excess_nrtw
= 0, desired_nrtw
;
575 /* deal with chardevs and other special file */
576 if (!mapping
->a_ops
->writepage
)
579 if (wbc
->nr_to_write
< MAX_DESIRED_PAGES_WP
) {
580 desired_nrtw
= MAX_DESIRED_PAGES_WP
;
581 excess_nrtw
= desired_nrtw
- wbc
->nr_to_write
;
582 wbc
->nr_to_write
= desired_nrtw
;
585 if (!S_ISDIR(inode
->i_mode
))
586 mutex_lock(&sbi
->writepages
);
587 ret
= write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
588 if (!S_ISDIR(inode
->i_mode
))
589 mutex_unlock(&sbi
->writepages
);
590 f2fs_submit_bio(sbi
, DATA
, (wbc
->sync_mode
== WB_SYNC_ALL
));
592 remove_dirty_dir_inode(inode
);
594 wbc
->nr_to_write
-= excess_nrtw
;
598 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
599 loff_t pos
, unsigned len
, unsigned flags
,
600 struct page
**pagep
, void **fsdata
)
602 struct inode
*inode
= mapping
->host
;
603 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
605 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
606 struct dnode_of_data dn
;
610 /* for nobh_write_end */
613 f2fs_balance_fs(sbi
);
615 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
620 ilock
= mutex_lock_op(sbi
);
622 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
623 err
= get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
627 if (dn
.data_blkaddr
== NULL_ADDR
)
628 err
= reserve_new_block(&dn
);
634 mutex_unlock_op(sbi
, ilock
);
636 if ((len
== PAGE_CACHE_SIZE
) || PageUptodate(page
))
639 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
640 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
641 unsigned end
= start
+ len
;
643 /* Reading beyond i_size is simple: memset to zero */
644 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
648 if (dn
.data_blkaddr
== NEW_ADDR
) {
649 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
651 err
= f2fs_readpage(sbi
, page
, dn
.data_blkaddr
, READ_SYNC
);
655 if (!PageUptodate(page
)) {
656 f2fs_put_page(page
, 1);
661 SetPageUptodate(page
);
662 clear_cold_data(page
);
666 mutex_unlock_op(sbi
, ilock
);
667 f2fs_put_page(page
, 1);
671 static ssize_t
f2fs_direct_IO(int rw
, struct kiocb
*iocb
,
672 const struct iovec
*iov
, loff_t offset
, unsigned long nr_segs
)
674 struct file
*file
= iocb
->ki_filp
;
675 struct inode
*inode
= file
->f_mapping
->host
;
680 /* Needs synchronization with the cleaner */
681 return blockdev_direct_IO(rw
, iocb
, inode
, iov
, offset
, nr_segs
,
685 static void f2fs_invalidate_data_page(struct page
*page
, unsigned long offset
)
687 struct inode
*inode
= page
->mapping
->host
;
688 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
689 if (S_ISDIR(inode
->i_mode
) && PageDirty(page
)) {
690 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
691 inode_dec_dirty_dents(inode
);
693 ClearPagePrivate(page
);
696 static int f2fs_release_data_page(struct page
*page
, gfp_t wait
)
698 ClearPagePrivate(page
);
702 static int f2fs_set_data_page_dirty(struct page
*page
)
704 struct address_space
*mapping
= page
->mapping
;
705 struct inode
*inode
= mapping
->host
;
707 SetPageUptodate(page
);
708 if (!PageDirty(page
)) {
709 __set_page_dirty_nobuffers(page
);
710 set_dirty_dir_page(inode
, page
);
716 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
718 return generic_block_bmap(mapping
, block
, get_data_block_ro
);
721 const struct address_space_operations f2fs_dblock_aops
= {
722 .readpage
= f2fs_read_data_page
,
723 .readpages
= f2fs_read_data_pages
,
724 .writepage
= f2fs_write_data_page
,
725 .writepages
= f2fs_write_data_pages
,
726 .write_begin
= f2fs_write_begin
,
727 .write_end
= nobh_write_end
,
728 .set_page_dirty
= f2fs_set_data_page_dirty
,
729 .invalidatepage
= f2fs_invalidate_data_page
,
730 .releasepage
= f2fs_release_data_page
,
731 .direct_IO
= f2fs_direct_IO
,