4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/prefetch.h>
20 #include <linux/uio.h>
21 #include <linux/cleancache.h>
27 #include <trace/events/f2fs.h>
29 static struct kmem_cache
*extent_tree_slab
;
30 static struct kmem_cache
*extent_node_slab
;
32 static void f2fs_read_end_io(struct bio
*bio
, int err
)
37 bio_for_each_segment_all(bvec
, bio
, i
) {
38 struct page
*page
= bvec
->bv_page
;
41 SetPageUptodate(page
);
43 ClearPageUptodate(page
);
52 * I/O completion handler for multipage BIOs.
53 * copied from fs/mpage.c
55 static void mpage_end_io(struct bio
*bio
, int err
)
60 bio_for_each_segment_all(bv
, bio
, i
) {
61 struct page
*page
= bv
->bv_page
;
64 SetPageUptodate(page
);
66 ClearPageUptodate(page
);
75 static void f2fs_write_end_io(struct bio
*bio
, int err
)
77 struct f2fs_sb_info
*sbi
= bio
->bi_private
;
81 bio_for_each_segment_all(bvec
, bio
, i
) {
82 struct page
*page
= bvec
->bv_page
;
86 set_bit(AS_EIO
, &page
->mapping
->flags
);
87 f2fs_stop_checkpoint(sbi
);
89 end_page_writeback(page
);
90 dec_page_count(sbi
, F2FS_WRITEBACK
);
93 if (!get_pages(sbi
, F2FS_WRITEBACK
) &&
94 !list_empty(&sbi
->cp_wait
.task_list
))
95 wake_up(&sbi
->cp_wait
);
101 * Low-level block read/write IO operations.
103 static struct bio
*__bio_alloc(struct f2fs_sb_info
*sbi
, block_t blk_addr
,
104 int npages
, bool is_read
)
108 /* No failure on bio allocation */
109 bio
= bio_alloc(GFP_NOIO
, npages
);
111 bio
->bi_bdev
= sbi
->sb
->s_bdev
;
112 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(blk_addr
);
113 bio
->bi_end_io
= is_read
? f2fs_read_end_io
: f2fs_write_end_io
;
114 bio
->bi_private
= sbi
;
119 static void __submit_merged_bio(struct f2fs_bio_info
*io
)
121 struct f2fs_io_info
*fio
= &io
->fio
;
126 if (is_read_io(fio
->rw
))
127 trace_f2fs_submit_read_bio(io
->sbi
->sb
, fio
, io
->bio
);
129 trace_f2fs_submit_write_bio(io
->sbi
->sb
, fio
, io
->bio
);
131 submit_bio(fio
->rw
, io
->bio
);
135 void f2fs_submit_merged_bio(struct f2fs_sb_info
*sbi
,
136 enum page_type type
, int rw
)
138 enum page_type btype
= PAGE_TYPE_OF_BIO(type
);
139 struct f2fs_bio_info
*io
;
141 io
= is_read_io(rw
) ? &sbi
->read_io
: &sbi
->write_io
[btype
];
143 down_write(&io
->io_rwsem
);
145 /* change META to META_FLUSH in the checkpoint procedure */
146 if (type
>= META_FLUSH
) {
147 io
->fio
.type
= META_FLUSH
;
148 if (test_opt(sbi
, NOBARRIER
))
149 io
->fio
.rw
= WRITE_FLUSH
| REQ_META
| REQ_PRIO
;
151 io
->fio
.rw
= WRITE_FLUSH_FUA
| REQ_META
| REQ_PRIO
;
153 __submit_merged_bio(io
);
154 up_write(&io
->io_rwsem
);
158 * Fill the locked page with data located in the block address.
159 * Return unlocked page.
161 int f2fs_submit_page_bio(struct f2fs_sb_info
*sbi
, struct page
*page
,
162 struct f2fs_io_info
*fio
)
166 trace_f2fs_submit_page_bio(page
, fio
);
167 f2fs_trace_ios(page
, fio
, 0);
169 /* Allocate a new bio */
170 bio
= __bio_alloc(sbi
, fio
->blk_addr
, 1, is_read_io(fio
->rw
));
172 if (bio_add_page(bio
, page
, PAGE_CACHE_SIZE
, 0) < PAGE_CACHE_SIZE
) {
174 f2fs_put_page(page
, 1);
178 submit_bio(fio
->rw
, bio
);
182 void f2fs_submit_page_mbio(struct f2fs_sb_info
*sbi
, struct page
*page
,
183 struct f2fs_io_info
*fio
)
185 enum page_type btype
= PAGE_TYPE_OF_BIO(fio
->type
);
186 struct f2fs_bio_info
*io
;
187 bool is_read
= is_read_io(fio
->rw
);
189 io
= is_read
? &sbi
->read_io
: &sbi
->write_io
[btype
];
191 verify_block_addr(sbi
, fio
->blk_addr
);
193 down_write(&io
->io_rwsem
);
196 inc_page_count(sbi
, F2FS_WRITEBACK
);
198 if (io
->bio
&& (io
->last_block_in_bio
!= fio
->blk_addr
- 1 ||
199 io
->fio
.rw
!= fio
->rw
))
200 __submit_merged_bio(io
);
202 if (io
->bio
== NULL
) {
203 int bio_blocks
= MAX_BIO_BLOCKS(sbi
);
205 io
->bio
= __bio_alloc(sbi
, fio
->blk_addr
, bio_blocks
, is_read
);
209 if (bio_add_page(io
->bio
, page
, PAGE_CACHE_SIZE
, 0) <
211 __submit_merged_bio(io
);
215 io
->last_block_in_bio
= fio
->blk_addr
;
216 f2fs_trace_ios(page
, fio
, 0);
218 up_write(&io
->io_rwsem
);
219 trace_f2fs_submit_page_mbio(page
, fio
);
223 * Lock ordering for the change of data block address:
226 * update block addresses in the node page
228 void set_data_blkaddr(struct dnode_of_data
*dn
)
230 struct f2fs_node
*rn
;
232 struct page
*node_page
= dn
->node_page
;
233 unsigned int ofs_in_node
= dn
->ofs_in_node
;
235 f2fs_wait_on_page_writeback(node_page
, NODE
);
237 rn
= F2FS_NODE(node_page
);
239 /* Get physical address of data block */
240 addr_array
= blkaddr_in_node(rn
);
241 addr_array
[ofs_in_node
] = cpu_to_le32(dn
->data_blkaddr
);
242 set_page_dirty(node_page
);
245 int reserve_new_block(struct dnode_of_data
*dn
)
247 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
249 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
251 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
254 trace_f2fs_reserve_new_block(dn
->inode
, dn
->nid
, dn
->ofs_in_node
);
256 dn
->data_blkaddr
= NEW_ADDR
;
257 set_data_blkaddr(dn
);
258 mark_inode_dirty(dn
->inode
);
263 int f2fs_reserve_block(struct dnode_of_data
*dn
, pgoff_t index
)
265 bool need_put
= dn
->inode_page
? false : true;
268 err
= get_dnode_of_data(dn
, index
, ALLOC_NODE
);
272 if (dn
->data_blkaddr
== NULL_ADDR
)
273 err
= reserve_new_block(dn
);
279 static bool lookup_extent_info(struct inode
*inode
, pgoff_t pgofs
,
280 struct extent_info
*ei
)
282 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
283 pgoff_t start_fofs
, end_fofs
;
284 block_t start_blkaddr
;
286 read_lock(&fi
->ext_lock
);
287 if (fi
->ext
.len
== 0) {
288 read_unlock(&fi
->ext_lock
);
292 stat_inc_total_hit(inode
->i_sb
);
294 start_fofs
= fi
->ext
.fofs
;
295 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
296 start_blkaddr
= fi
->ext
.blk
;
298 if (pgofs
>= start_fofs
&& pgofs
<= end_fofs
) {
300 stat_inc_read_hit(inode
->i_sb
);
301 read_unlock(&fi
->ext_lock
);
304 read_unlock(&fi
->ext_lock
);
308 static bool update_extent_info(struct inode
*inode
, pgoff_t fofs
,
311 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
312 pgoff_t start_fofs
, end_fofs
;
313 block_t start_blkaddr
, end_blkaddr
;
314 int need_update
= true;
316 write_lock(&fi
->ext_lock
);
318 start_fofs
= fi
->ext
.fofs
;
319 end_fofs
= fi
->ext
.fofs
+ fi
->ext
.len
- 1;
320 start_blkaddr
= fi
->ext
.blk
;
321 end_blkaddr
= fi
->ext
.blk
+ fi
->ext
.len
- 1;
323 /* Drop and initialize the matched extent */
324 if (fi
->ext
.len
== 1 && fofs
== start_fofs
)
328 if (fi
->ext
.len
== 0) {
329 if (blkaddr
!= NULL_ADDR
) {
331 fi
->ext
.blk
= blkaddr
;
338 if (fofs
== start_fofs
- 1 && blkaddr
== start_blkaddr
- 1) {
346 if (fofs
== end_fofs
+ 1 && blkaddr
== end_blkaddr
+ 1) {
351 /* Split the existing extent */
352 if (fi
->ext
.len
> 1 &&
353 fofs
>= start_fofs
&& fofs
<= end_fofs
) {
354 if ((end_fofs
- fofs
) < (fi
->ext
.len
>> 1)) {
355 fi
->ext
.len
= fofs
- start_fofs
;
357 fi
->ext
.fofs
= fofs
+ 1;
358 fi
->ext
.blk
= start_blkaddr
+ fofs
- start_fofs
+ 1;
359 fi
->ext
.len
-= fofs
- start_fofs
+ 1;
365 /* Finally, if the extent is very fragmented, let's drop the cache. */
366 if (fi
->ext
.len
< F2FS_MIN_EXTENT_LEN
) {
368 set_inode_flag(fi
, FI_NO_EXTENT
);
372 write_unlock(&fi
->ext_lock
);
376 static struct extent_node
*__attach_extent_node(struct f2fs_sb_info
*sbi
,
377 struct extent_tree
*et
, struct extent_info
*ei
,
378 struct rb_node
*parent
, struct rb_node
**p
)
380 struct extent_node
*en
;
382 en
= kmem_cache_alloc(extent_node_slab
, GFP_ATOMIC
);
387 INIT_LIST_HEAD(&en
->list
);
389 rb_link_node(&en
->rb_node
, parent
, p
);
390 rb_insert_color(&en
->rb_node
, &et
->root
);
392 atomic_inc(&sbi
->total_ext_node
);
396 static void __detach_extent_node(struct f2fs_sb_info
*sbi
,
397 struct extent_tree
*et
, struct extent_node
*en
)
399 rb_erase(&en
->rb_node
, &et
->root
);
401 atomic_dec(&sbi
->total_ext_node
);
403 if (et
->cached_en
== en
)
404 et
->cached_en
= NULL
;
407 static struct extent_tree
*__find_extent_tree(struct f2fs_sb_info
*sbi
,
410 struct extent_tree
*et
;
412 down_read(&sbi
->extent_tree_lock
);
413 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
415 up_read(&sbi
->extent_tree_lock
);
418 atomic_inc(&et
->refcount
);
419 up_read(&sbi
->extent_tree_lock
);
424 static struct extent_tree
*__grab_extent_tree(struct inode
*inode
)
426 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
427 struct extent_tree
*et
;
428 nid_t ino
= inode
->i_ino
;
430 down_write(&sbi
->extent_tree_lock
);
431 et
= radix_tree_lookup(&sbi
->extent_tree_root
, ino
);
433 et
= f2fs_kmem_cache_alloc(extent_tree_slab
, GFP_NOFS
);
434 f2fs_radix_tree_insert(&sbi
->extent_tree_root
, ino
, et
);
435 memset(et
, 0, sizeof(struct extent_tree
));
438 et
->cached_en
= NULL
;
439 rwlock_init(&et
->lock
);
440 atomic_set(&et
->refcount
, 0);
442 sbi
->total_ext_tree
++;
444 atomic_inc(&et
->refcount
);
445 up_write(&sbi
->extent_tree_lock
);
450 static struct extent_node
*__lookup_extent_tree(struct extent_tree
*et
,
453 struct rb_node
*node
= et
->root
.rb_node
;
454 struct extent_node
*en
;
457 struct extent_info
*cei
= &et
->cached_en
->ei
;
459 if (cei
->fofs
<= fofs
&& cei
->fofs
+ cei
->len
> fofs
)
460 return et
->cached_en
;
464 en
= rb_entry(node
, struct extent_node
, rb_node
);
466 if (fofs
< en
->ei
.fofs
) {
467 node
= node
->rb_left
;
468 } else if (fofs
>= en
->ei
.fofs
+ en
->ei
.len
) {
469 node
= node
->rb_right
;
478 static struct extent_node
*__try_back_merge(struct f2fs_sb_info
*sbi
,
479 struct extent_tree
*et
, struct extent_node
*en
)
481 struct extent_node
*prev
;
482 struct rb_node
*node
;
484 node
= rb_prev(&en
->rb_node
);
488 prev
= rb_entry(node
, struct extent_node
, rb_node
);
489 if (__is_back_mergeable(&en
->ei
, &prev
->ei
)) {
490 en
->ei
.fofs
= prev
->ei
.fofs
;
491 en
->ei
.blk
= prev
->ei
.blk
;
492 en
->ei
.len
+= prev
->ei
.len
;
493 __detach_extent_node(sbi
, et
, prev
);
499 static struct extent_node
*__try_front_merge(struct f2fs_sb_info
*sbi
,
500 struct extent_tree
*et
, struct extent_node
*en
)
502 struct extent_node
*next
;
503 struct rb_node
*node
;
505 node
= rb_next(&en
->rb_node
);
509 next
= rb_entry(node
, struct extent_node
, rb_node
);
510 if (__is_front_mergeable(&en
->ei
, &next
->ei
)) {
511 en
->ei
.len
+= next
->ei
.len
;
512 __detach_extent_node(sbi
, et
, next
);
518 static struct extent_node
*__insert_extent_tree(struct f2fs_sb_info
*sbi
,
519 struct extent_tree
*et
, struct extent_info
*ei
,
520 struct extent_node
**den
)
522 struct rb_node
**p
= &et
->root
.rb_node
;
523 struct rb_node
*parent
= NULL
;
524 struct extent_node
*en
;
528 en
= rb_entry(parent
, struct extent_node
, rb_node
);
530 if (ei
->fofs
< en
->ei
.fofs
) {
531 if (__is_front_mergeable(ei
, &en
->ei
)) {
532 f2fs_bug_on(sbi
, !den
);
533 en
->ei
.fofs
= ei
->fofs
;
534 en
->ei
.blk
= ei
->blk
;
535 en
->ei
.len
+= ei
->len
;
536 *den
= __try_back_merge(sbi
, et
, en
);
540 } else if (ei
->fofs
>= en
->ei
.fofs
+ en
->ei
.len
) {
541 if (__is_back_mergeable(ei
, &en
->ei
)) {
542 f2fs_bug_on(sbi
, !den
);
543 en
->ei
.len
+= ei
->len
;
544 *den
= __try_front_merge(sbi
, et
, en
);
553 return __attach_extent_node(sbi
, et
, ei
, parent
, p
);
556 static unsigned int __free_extent_tree(struct f2fs_sb_info
*sbi
,
557 struct extent_tree
*et
, bool free_all
)
559 struct rb_node
*node
, *next
;
560 struct extent_node
*en
;
561 unsigned int count
= et
->count
;
563 node
= rb_first(&et
->root
);
565 next
= rb_next(node
);
566 en
= rb_entry(node
, struct extent_node
, rb_node
);
569 spin_lock(&sbi
->extent_lock
);
570 if (!list_empty(&en
->list
))
571 list_del_init(&en
->list
);
572 spin_unlock(&sbi
->extent_lock
);
575 if (free_all
|| list_empty(&en
->list
)) {
576 __detach_extent_node(sbi
, et
, en
);
577 kmem_cache_free(extent_node_slab
, en
);
582 return count
- et
->count
;
585 static void f2fs_init_extent_tree(struct inode
*inode
,
586 struct f2fs_extent
*i_ext
)
588 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
589 struct extent_tree
*et
;
590 struct extent_node
*en
;
591 struct extent_info ei
;
593 if (le32_to_cpu(i_ext
->len
) < F2FS_MIN_EXTENT_LEN
)
596 et
= __grab_extent_tree(inode
);
598 write_lock(&et
->lock
);
602 set_extent_info(&ei
, le32_to_cpu(i_ext
->fofs
),
603 le32_to_cpu(i_ext
->blk
), le32_to_cpu(i_ext
->len
));
605 en
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
609 spin_lock(&sbi
->extent_lock
);
610 list_add_tail(&en
->list
, &sbi
->extent_list
);
611 spin_unlock(&sbi
->extent_lock
);
614 write_unlock(&et
->lock
);
615 atomic_dec(&et
->refcount
);
618 static bool f2fs_lookup_extent_tree(struct inode
*inode
, pgoff_t pgofs
,
619 struct extent_info
*ei
)
621 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
622 struct extent_tree
*et
;
623 struct extent_node
*en
;
625 trace_f2fs_lookup_extent_tree_start(inode
, pgofs
);
627 et
= __find_extent_tree(sbi
, inode
->i_ino
);
631 read_lock(&et
->lock
);
632 en
= __lookup_extent_tree(et
, pgofs
);
635 spin_lock(&sbi
->extent_lock
);
636 if (!list_empty(&en
->list
))
637 list_move_tail(&en
->list
, &sbi
->extent_list
);
638 spin_unlock(&sbi
->extent_lock
);
639 stat_inc_read_hit(sbi
->sb
);
641 stat_inc_total_hit(sbi
->sb
);
642 read_unlock(&et
->lock
);
644 trace_f2fs_lookup_extent_tree_end(inode
, pgofs
, en
);
646 atomic_dec(&et
->refcount
);
647 return en
? true : false;
650 static void f2fs_update_extent_tree(struct inode
*inode
, pgoff_t fofs
,
653 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
654 struct extent_tree
*et
;
655 struct extent_node
*en
= NULL
, *en1
= NULL
, *en2
= NULL
, *en3
= NULL
;
656 struct extent_node
*den
= NULL
;
657 struct extent_info ei
, dei
;
660 trace_f2fs_update_extent_tree(inode
, fofs
, blkaddr
);
662 et
= __grab_extent_tree(inode
);
664 write_lock(&et
->lock
);
666 /* 1. lookup and remove existing extent info in cache */
667 en
= __lookup_extent_tree(et
, fofs
);
672 __detach_extent_node(sbi
, et
, en
);
674 /* 2. if extent can be split more, split and insert the left part */
676 /* insert left part of split extent into cache */
677 if (fofs
- dei
.fofs
>= F2FS_MIN_EXTENT_LEN
) {
678 set_extent_info(&ei
, dei
.fofs
, dei
.blk
,
680 en1
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
683 /* insert right part of split extent into cache */
684 endofs
= dei
.fofs
+ dei
.len
- 1;
685 if (endofs
- fofs
>= F2FS_MIN_EXTENT_LEN
) {
686 set_extent_info(&ei
, fofs
+ 1,
687 fofs
- dei
.fofs
+ dei
.blk
, endofs
- fofs
);
688 en2
= __insert_extent_tree(sbi
, et
, &ei
, NULL
);
693 /* 3. update extent in extent cache */
695 set_extent_info(&ei
, fofs
, blkaddr
, 1);
696 en3
= __insert_extent_tree(sbi
, et
, &ei
, &den
);
699 /* 4. update in global extent list */
700 spin_lock(&sbi
->extent_lock
);
701 if (en
&& !list_empty(&en
->list
))
704 * en1 and en2 split from en, they will become more and more smaller
705 * fragments after splitting several times. So if the length is smaller
706 * than F2FS_MIN_EXTENT_LEN, we will not add them into extent tree.
709 list_add_tail(&en1
->list
, &sbi
->extent_list
);
711 list_add_tail(&en2
->list
, &sbi
->extent_list
);
713 if (list_empty(&en3
->list
))
714 list_add_tail(&en3
->list
, &sbi
->extent_list
);
716 list_move_tail(&en3
->list
, &sbi
->extent_list
);
718 if (den
&& !list_empty(&den
->list
))
719 list_del(&den
->list
);
720 spin_unlock(&sbi
->extent_lock
);
722 /* 5. release extent node */
724 kmem_cache_free(extent_node_slab
, en
);
726 kmem_cache_free(extent_node_slab
, den
);
728 write_unlock(&et
->lock
);
729 atomic_dec(&et
->refcount
);
732 void f2fs_preserve_extent_tree(struct inode
*inode
)
734 struct extent_tree
*et
;
735 struct extent_info
*ext
= &F2FS_I(inode
)->ext
;
738 if (!test_opt(F2FS_I_SB(inode
), EXTENT_CACHE
))
741 et
= __find_extent_tree(F2FS_I_SB(inode
), inode
->i_ino
);
745 update_inode_page(inode
);
750 read_lock(&et
->lock
);
752 struct extent_node
*en
;
757 struct rb_node
*node
= rb_first(&et
->root
);
760 node
= rb_last(&et
->root
);
761 en
= rb_entry(node
, struct extent_node
, rb_node
);
764 if (__is_extent_same(ext
, &en
->ei
))
769 } else if (ext
->len
) {
774 read_unlock(&et
->lock
);
775 atomic_dec(&et
->refcount
);
778 update_inode_page(inode
);
781 void f2fs_shrink_extent_tree(struct f2fs_sb_info
*sbi
, int nr_shrink
)
783 struct extent_tree
*treevec
[EXT_TREE_VEC_SIZE
];
784 struct extent_node
*en
, *tmp
;
785 unsigned long ino
= F2FS_ROOT_INO(sbi
);
786 struct radix_tree_iter iter
;
789 unsigned int node_cnt
= 0, tree_cnt
= 0;
791 if (!test_opt(sbi
, EXTENT_CACHE
))
794 if (available_free_memory(sbi
, EXTENT_CACHE
))
797 spin_lock(&sbi
->extent_lock
);
798 list_for_each_entry_safe(en
, tmp
, &sbi
->extent_list
, list
) {
801 list_del_init(&en
->list
);
803 spin_unlock(&sbi
->extent_lock
);
805 down_read(&sbi
->extent_tree_lock
);
806 while ((found
= radix_tree_gang_lookup(&sbi
->extent_tree_root
,
807 (void **)treevec
, ino
, EXT_TREE_VEC_SIZE
))) {
810 ino
= treevec
[found
- 1]->ino
+ 1;
811 for (i
= 0; i
< found
; i
++) {
812 struct extent_tree
*et
= treevec
[i
];
814 atomic_inc(&et
->refcount
);
815 write_lock(&et
->lock
);
816 node_cnt
+= __free_extent_tree(sbi
, et
, false);
817 write_unlock(&et
->lock
);
818 atomic_dec(&et
->refcount
);
821 up_read(&sbi
->extent_tree_lock
);
823 down_write(&sbi
->extent_tree_lock
);
824 radix_tree_for_each_slot(slot
, &sbi
->extent_tree_root
, &iter
,
825 F2FS_ROOT_INO(sbi
)) {
826 struct extent_tree
*et
= (struct extent_tree
*)*slot
;
828 if (!atomic_read(&et
->refcount
) && !et
->count
) {
829 radix_tree_delete(&sbi
->extent_tree_root
, et
->ino
);
830 kmem_cache_free(extent_tree_slab
, et
);
831 sbi
->total_ext_tree
--;
835 up_write(&sbi
->extent_tree_lock
);
837 trace_f2fs_shrink_extent_tree(sbi
, node_cnt
, tree_cnt
);
840 void f2fs_destroy_extent_tree(struct inode
*inode
)
842 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
843 struct extent_tree
*et
;
844 unsigned int node_cnt
= 0;
846 if (!test_opt(sbi
, EXTENT_CACHE
))
849 et
= __find_extent_tree(sbi
, inode
->i_ino
);
853 /* free all extent info belong to this extent tree */
854 write_lock(&et
->lock
);
855 node_cnt
= __free_extent_tree(sbi
, et
, true);
856 write_unlock(&et
->lock
);
858 atomic_dec(&et
->refcount
);
860 /* try to find and delete extent tree entry in radix tree */
861 down_write(&sbi
->extent_tree_lock
);
862 et
= radix_tree_lookup(&sbi
->extent_tree_root
, inode
->i_ino
);
864 up_write(&sbi
->extent_tree_lock
);
867 f2fs_bug_on(sbi
, atomic_read(&et
->refcount
) || et
->count
);
868 radix_tree_delete(&sbi
->extent_tree_root
, inode
->i_ino
);
869 kmem_cache_free(extent_tree_slab
, et
);
870 sbi
->total_ext_tree
--;
871 up_write(&sbi
->extent_tree_lock
);
873 trace_f2fs_destroy_extent_tree(inode
, node_cnt
);
877 void f2fs_init_extent_cache(struct inode
*inode
, struct f2fs_extent
*i_ext
)
879 if (test_opt(F2FS_I_SB(inode
), EXTENT_CACHE
))
880 f2fs_init_extent_tree(inode
, i_ext
);
882 write_lock(&F2FS_I(inode
)->ext_lock
);
883 get_extent_info(&F2FS_I(inode
)->ext
, *i_ext
);
884 write_unlock(&F2FS_I(inode
)->ext_lock
);
887 static bool f2fs_lookup_extent_cache(struct inode
*inode
, pgoff_t pgofs
,
888 struct extent_info
*ei
)
890 if (is_inode_flag_set(F2FS_I(inode
), FI_NO_EXTENT
))
893 if (test_opt(F2FS_I_SB(inode
), EXTENT_CACHE
))
894 return f2fs_lookup_extent_tree(inode
, pgofs
, ei
);
896 return lookup_extent_info(inode
, pgofs
, ei
);
899 void f2fs_update_extent_cache(struct dnode_of_data
*dn
)
901 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
904 f2fs_bug_on(F2FS_I_SB(dn
->inode
), dn
->data_blkaddr
== NEW_ADDR
);
906 if (is_inode_flag_set(fi
, FI_NO_EXTENT
))
909 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
912 if (test_opt(F2FS_I_SB(dn
->inode
), EXTENT_CACHE
))
913 return f2fs_update_extent_tree(dn
->inode
, fofs
,
916 if (update_extent_info(dn
->inode
, fofs
, dn
->data_blkaddr
))
920 struct page
*find_data_page(struct inode
*inode
, pgoff_t index
, bool sync
)
922 struct address_space
*mapping
= inode
->i_mapping
;
923 struct dnode_of_data dn
;
925 struct extent_info ei
;
927 struct f2fs_io_info fio
= {
929 .rw
= sync
? READ_SYNC
: READA
,
933 * If sync is false, it needs to check its block allocation.
934 * This is need and triggered by two flows:
935 * gc and truncate_partial_data_page.
940 page
= find_get_page(mapping
, index
);
941 if (page
&& PageUptodate(page
))
943 f2fs_put_page(page
, 0);
945 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
946 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
950 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
951 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
956 if (dn
.data_blkaddr
== NULL_ADDR
)
957 return ERR_PTR(-ENOENT
);
959 /* By fallocate(), there is no cached page, but with NEW_ADDR */
960 if (unlikely(dn
.data_blkaddr
== NEW_ADDR
))
961 return ERR_PTR(-EINVAL
);
964 page
= grab_cache_page(mapping
, index
);
966 return ERR_PTR(-ENOMEM
);
968 if (PageUptodate(page
)) {
973 fio
.blk_addr
= dn
.data_blkaddr
;
974 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
, &fio
);
979 wait_on_page_locked(page
);
980 if (unlikely(!PageUptodate(page
))) {
981 f2fs_put_page(page
, 0);
982 return ERR_PTR(-EIO
);
989 * If it tries to access a hole, return an error.
990 * Because, the callers, functions in dir.c and GC, should be able to know
991 * whether this page exists or not.
993 struct page
*get_lock_data_page(struct inode
*inode
, pgoff_t index
)
995 struct address_space
*mapping
= inode
->i_mapping
;
996 struct dnode_of_data dn
;
998 struct extent_info ei
;
1000 struct f2fs_io_info fio
= {
1005 page
= grab_cache_page(mapping
, index
);
1007 return ERR_PTR(-ENOMEM
);
1009 if (f2fs_lookup_extent_cache(inode
, index
, &ei
)) {
1010 dn
.data_blkaddr
= ei
.blk
+ index
- ei
.fofs
;
1014 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1015 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
1017 f2fs_put_page(page
, 1);
1018 return ERR_PTR(err
);
1020 f2fs_put_dnode(&dn
);
1022 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
1023 f2fs_put_page(page
, 1);
1024 return ERR_PTR(-ENOENT
);
1028 if (PageUptodate(page
))
1032 * A new dentry page is allocated but not able to be written, since its
1033 * new inode page couldn't be allocated due to -ENOSPC.
1034 * In such the case, its blkaddr can be remained as NEW_ADDR.
1035 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
1037 if (dn
.data_blkaddr
== NEW_ADDR
) {
1038 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1039 SetPageUptodate(page
);
1043 fio
.blk_addr
= dn
.data_blkaddr
;
1044 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
, &fio
);
1046 return ERR_PTR(err
);
1049 if (unlikely(!PageUptodate(page
))) {
1050 f2fs_put_page(page
, 1);
1051 return ERR_PTR(-EIO
);
1053 if (unlikely(page
->mapping
!= mapping
)) {
1054 f2fs_put_page(page
, 1);
1061 * Caller ensures that this data page is never allocated.
1062 * A new zero-filled data page is allocated in the page cache.
1064 * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
1066 * Note that, ipage is set only by make_empty_dir.
1068 struct page
*get_new_data_page(struct inode
*inode
,
1069 struct page
*ipage
, pgoff_t index
, bool new_i_size
)
1071 struct address_space
*mapping
= inode
->i_mapping
;
1073 struct dnode_of_data dn
;
1076 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
1077 err
= f2fs_reserve_block(&dn
, index
);
1079 return ERR_PTR(err
);
1081 page
= grab_cache_page(mapping
, index
);
1087 if (PageUptodate(page
))
1090 if (dn
.data_blkaddr
== NEW_ADDR
) {
1091 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1092 SetPageUptodate(page
);
1094 struct f2fs_io_info fio
= {
1097 .blk_addr
= dn
.data_blkaddr
,
1099 err
= f2fs_submit_page_bio(F2FS_I_SB(inode
), page
, &fio
);
1104 if (unlikely(!PageUptodate(page
))) {
1105 f2fs_put_page(page
, 1);
1109 if (unlikely(page
->mapping
!= mapping
)) {
1110 f2fs_put_page(page
, 1);
1116 i_size_read(inode
) < ((index
+ 1) << PAGE_CACHE_SHIFT
)) {
1117 i_size_write(inode
, ((index
+ 1) << PAGE_CACHE_SHIFT
));
1118 /* Only the directory inode sets new_i_size */
1119 set_inode_flag(F2FS_I(inode
), FI_UPDATE_DIR
);
1124 f2fs_put_dnode(&dn
);
1125 return ERR_PTR(err
);
1128 static int __allocate_data_block(struct dnode_of_data
*dn
)
1130 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1131 struct f2fs_inode_info
*fi
= F2FS_I(dn
->inode
);
1132 struct f2fs_summary sum
;
1133 struct node_info ni
;
1134 int seg
= CURSEG_WARM_DATA
;
1137 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
1140 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
1141 if (dn
->data_blkaddr
== NEW_ADDR
)
1144 if (unlikely(!inc_valid_block_count(sbi
, dn
->inode
, 1)))
1148 get_node_info(sbi
, dn
->nid
, &ni
);
1149 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, ni
.version
);
1151 if (dn
->ofs_in_node
== 0 && dn
->inode_page
== dn
->node_page
)
1152 seg
= CURSEG_DIRECT_IO
;
1154 allocate_data_block(sbi
, NULL
, dn
->data_blkaddr
, &dn
->data_blkaddr
,
1157 /* direct IO doesn't use extent cache to maximize the performance */
1158 set_data_blkaddr(dn
);
1161 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
), fi
) +
1163 if (i_size_read(dn
->inode
) < ((fofs
+ 1) << PAGE_CACHE_SHIFT
))
1164 i_size_write(dn
->inode
, ((fofs
+ 1) << PAGE_CACHE_SHIFT
));
1169 static void __allocate_data_blocks(struct inode
*inode
, loff_t offset
,
1172 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1173 struct dnode_of_data dn
;
1174 u64 start
= F2FS_BYTES_TO_BLK(offset
);
1175 u64 len
= F2FS_BYTES_TO_BLK(count
);
1180 f2fs_balance_fs(sbi
);
1183 /* When reading holes, we need its node page */
1184 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1185 if (get_dnode_of_data(&dn
, start
, ALLOC_NODE
))
1189 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1191 while (dn
.ofs_in_node
< end_offset
&& len
) {
1194 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
1195 if (blkaddr
== NULL_ADDR
|| blkaddr
== NEW_ADDR
) {
1196 if (__allocate_data_block(&dn
))
1206 sync_inode_page(&dn
);
1208 f2fs_put_dnode(&dn
);
1209 f2fs_unlock_op(sbi
);
1215 sync_inode_page(&dn
);
1216 f2fs_put_dnode(&dn
);
1218 f2fs_unlock_op(sbi
);
1223 * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with
1224 * f2fs_map_blocks structure.
1225 * If original data blocks are allocated, then give them to blockdev.
1227 * a. preallocate requested block addresses
1228 * b. do not use extent cache for better performance
1229 * c. give the block addresses to blockdev
1231 static int f2fs_map_blocks(struct inode
*inode
, struct f2fs_map_blocks
*map
,
1232 int create
, bool fiemap
)
1234 unsigned int maxblocks
= map
->m_len
;
1235 struct dnode_of_data dn
;
1236 int mode
= create
? ALLOC_NODE
: LOOKUP_NODE_RA
;
1237 pgoff_t pgofs
, end_offset
;
1238 int err
= 0, ofs
= 1;
1239 struct extent_info ei
;
1240 bool allocated
= false;
1245 /* it only supports block size == page size */
1246 pgofs
= (pgoff_t
)map
->m_lblk
;
1248 if (f2fs_lookup_extent_cache(inode
, pgofs
, &ei
)) {
1249 map
->m_pblk
= ei
.blk
+ pgofs
- ei
.fofs
;
1250 map
->m_len
= min((pgoff_t
)maxblocks
, ei
.fofs
+ ei
.len
- pgofs
);
1251 map
->m_flags
= F2FS_MAP_MAPPED
;
1256 f2fs_lock_op(F2FS_I_SB(inode
));
1258 /* When reading holes, we need its node page */
1259 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1260 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
1266 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
1269 if (dn
.data_blkaddr
!= NULL_ADDR
) {
1270 map
->m_flags
= F2FS_MAP_MAPPED
;
1271 map
->m_pblk
= dn
.data_blkaddr
;
1272 } else if (create
) {
1273 err
= __allocate_data_block(&dn
);
1277 map
->m_flags
= F2FS_MAP_NEW
| F2FS_MAP_MAPPED
;
1278 map
->m_pblk
= dn
.data_blkaddr
;
1283 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1289 if (dn
.ofs_in_node
>= end_offset
) {
1291 sync_inode_page(&dn
);
1293 f2fs_put_dnode(&dn
);
1295 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1296 err
= get_dnode_of_data(&dn
, pgofs
, mode
);
1302 if (dn
.data_blkaddr
== NEW_ADDR
&& !fiemap
)
1305 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
1308 if (maxblocks
> map
->m_len
) {
1309 block_t blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
1310 if (blkaddr
== NULL_ADDR
&& create
) {
1311 err
= __allocate_data_block(&dn
);
1315 map
->m_flags
|= F2FS_MAP_NEW
;
1316 blkaddr
= dn
.data_blkaddr
;
1318 /* Give more consecutive addresses for the readahead */
1319 if (map
->m_pblk
!= NEW_ADDR
&& blkaddr
== (map
->m_pblk
+ ofs
)) {
1329 sync_inode_page(&dn
);
1331 f2fs_put_dnode(&dn
);
1334 f2fs_unlock_op(F2FS_I_SB(inode
));
1336 trace_f2fs_map_blocks(inode
, map
, err
);
1340 static int __get_data_block(struct inode
*inode
, sector_t iblock
,
1341 struct buffer_head
*bh
, int create
, bool fiemap
)
1343 struct f2fs_map_blocks map
;
1346 map
.m_lblk
= iblock
;
1347 map
.m_len
= bh
->b_size
>> inode
->i_blkbits
;
1349 ret
= f2fs_map_blocks(inode
, &map
, create
, fiemap
);
1351 map_bh(bh
, inode
->i_sb
, map
.m_pblk
);
1352 bh
->b_state
= (bh
->b_state
& ~F2FS_MAP_FLAGS
) | map
.m_flags
;
1353 bh
->b_size
= map
.m_len
<< inode
->i_blkbits
;
1358 static int get_data_block(struct inode
*inode
, sector_t iblock
,
1359 struct buffer_head
*bh_result
, int create
)
1361 return __get_data_block(inode
, iblock
, bh_result
, create
, false);
1364 static int get_data_block_fiemap(struct inode
*inode
, sector_t iblock
,
1365 struct buffer_head
*bh_result
, int create
)
1367 return __get_data_block(inode
, iblock
, bh_result
, create
, true);
1370 int f2fs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
1373 return generic_block_fiemap(inode
, fieinfo
,
1374 start
, len
, get_data_block_fiemap
);
1378 * This function was originally taken from fs/mpage.c, and customized for f2fs.
1379 * Major change was from block_size == page_size in f2fs by default.
1381 static int f2fs_mpage_readpages(struct address_space
*mapping
,
1382 struct list_head
*pages
, struct page
*page
,
1385 struct bio
*bio
= NULL
;
1387 sector_t last_block_in_bio
= 0;
1388 struct inode
*inode
= mapping
->host
;
1389 const unsigned blkbits
= inode
->i_blkbits
;
1390 const unsigned blocksize
= 1 << blkbits
;
1391 sector_t block_in_file
;
1392 sector_t last_block
;
1393 sector_t last_block_in_file
;
1395 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
1396 struct f2fs_map_blocks map
;
1403 for (page_idx
= 0; nr_pages
; page_idx
++, nr_pages
--) {
1405 prefetchw(&page
->flags
);
1407 page
= list_entry(pages
->prev
, struct page
, lru
);
1408 list_del(&page
->lru
);
1409 if (add_to_page_cache_lru(page
, mapping
,
1410 page
->index
, GFP_KERNEL
))
1414 block_in_file
= (sector_t
)page
->index
;
1415 last_block
= block_in_file
+ nr_pages
;
1416 last_block_in_file
= (i_size_read(inode
) + blocksize
- 1) >>
1418 if (last_block
> last_block_in_file
)
1419 last_block
= last_block_in_file
;
1422 * Map blocks using the previous result first.
1424 if ((map
.m_flags
& F2FS_MAP_MAPPED
) &&
1425 block_in_file
> map
.m_lblk
&&
1426 block_in_file
< (map
.m_lblk
+ map
.m_len
))
1430 * Then do more f2fs_map_blocks() calls until we are
1431 * done with this page.
1435 if (block_in_file
< last_block
) {
1436 map
.m_lblk
= block_in_file
;
1437 map
.m_len
= last_block
- block_in_file
;
1439 if (f2fs_map_blocks(inode
, &map
, 0, false))
1440 goto set_error_page
;
1443 if ((map
.m_flags
& F2FS_MAP_MAPPED
)) {
1444 block_nr
= map
.m_pblk
+ block_in_file
- map
.m_lblk
;
1445 SetPageMappedToDisk(page
);
1447 if (!PageUptodate(page
) && !cleancache_get_page(page
)) {
1448 SetPageUptodate(page
);
1452 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1453 SetPageUptodate(page
);
1459 * This page will go to BIO. Do we need to send this
1462 if (bio
&& (last_block_in_bio
!= block_nr
- 1)) {
1464 submit_bio(READ
, bio
);
1468 bio
= bio_alloc(GFP_KERNEL
,
1469 min_t(int, nr_pages
, bio_get_nr_vecs(bdev
)));
1471 goto set_error_page
;
1472 bio
->bi_bdev
= bdev
;
1473 bio
->bi_iter
.bi_sector
= SECTOR_FROM_BLOCK(block_nr
);
1474 bio
->bi_end_io
= mpage_end_io
;
1475 bio
->bi_private
= NULL
;
1478 if (bio_add_page(bio
, page
, blocksize
, 0) < blocksize
)
1479 goto submit_and_realloc
;
1481 last_block_in_bio
= block_nr
;
1485 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1490 submit_bio(READ
, bio
);
1496 page_cache_release(page
);
1498 BUG_ON(pages
&& !list_empty(pages
));
1500 submit_bio(READ
, bio
);
1504 static int f2fs_read_data_page(struct file
*file
, struct page
*page
)
1506 struct inode
*inode
= page
->mapping
->host
;
1509 trace_f2fs_readpage(page
, DATA
);
1511 /* If the file has inline data, try to read it directly */
1512 if (f2fs_has_inline_data(inode
))
1513 ret
= f2fs_read_inline_data(inode
, page
);
1515 ret
= f2fs_mpage_readpages(page
->mapping
, NULL
, page
, 1);
1519 static int f2fs_read_data_pages(struct file
*file
,
1520 struct address_space
*mapping
,
1521 struct list_head
*pages
, unsigned nr_pages
)
1523 struct inode
*inode
= file
->f_mapping
->host
;
1525 /* If the file has inline data, skip readpages */
1526 if (f2fs_has_inline_data(inode
))
1529 return f2fs_mpage_readpages(mapping
, pages
, NULL
, nr_pages
);
1532 int do_write_data_page(struct page
*page
, struct f2fs_io_info
*fio
)
1534 struct inode
*inode
= page
->mapping
->host
;
1535 struct dnode_of_data dn
;
1538 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1539 err
= get_dnode_of_data(&dn
, page
->index
, LOOKUP_NODE
);
1543 fio
->blk_addr
= dn
.data_blkaddr
;
1545 /* This page is already truncated */
1546 if (fio
->blk_addr
== NULL_ADDR
) {
1547 ClearPageUptodate(page
);
1551 set_page_writeback(page
);
1554 * If current allocation needs SSR,
1555 * it had better in-place writes for updated data.
1557 if (unlikely(fio
->blk_addr
!= NEW_ADDR
&&
1558 !is_cold_data(page
) &&
1559 need_inplace_update(inode
))) {
1560 rewrite_data_page(page
, fio
);
1561 set_inode_flag(F2FS_I(inode
), FI_UPDATE_WRITE
);
1562 trace_f2fs_do_write_data_page(page
, IPU
);
1564 write_data_page(page
, &dn
, fio
);
1565 set_data_blkaddr(&dn
);
1566 f2fs_update_extent_cache(&dn
);
1567 trace_f2fs_do_write_data_page(page
, OPU
);
1568 set_inode_flag(F2FS_I(inode
), FI_APPEND_WRITE
);
1569 if (page
->index
== 0)
1570 set_inode_flag(F2FS_I(inode
), FI_FIRST_BLOCK_WRITTEN
);
1573 f2fs_put_dnode(&dn
);
1577 static int f2fs_write_data_page(struct page
*page
,
1578 struct writeback_control
*wbc
)
1580 struct inode
*inode
= page
->mapping
->host
;
1581 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1582 loff_t i_size
= i_size_read(inode
);
1583 const pgoff_t end_index
= ((unsigned long long) i_size
)
1584 >> PAGE_CACHE_SHIFT
;
1585 unsigned offset
= 0;
1586 bool need_balance_fs
= false;
1588 struct f2fs_io_info fio
= {
1590 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1593 trace_f2fs_writepage(page
, DATA
);
1595 if (page
->index
< end_index
)
1599 * If the offset is out-of-range of file size,
1600 * this page does not have to be written to disk.
1602 offset
= i_size
& (PAGE_CACHE_SIZE
- 1);
1603 if ((page
->index
>= end_index
+ 1) || !offset
)
1606 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
1608 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1610 if (f2fs_is_drop_cache(inode
))
1612 if (f2fs_is_volatile_file(inode
) && !wbc
->for_reclaim
&&
1613 available_free_memory(sbi
, BASE_CHECK
))
1616 /* Dentry blocks are controlled by checkpoint */
1617 if (S_ISDIR(inode
->i_mode
)) {
1618 if (unlikely(f2fs_cp_error(sbi
)))
1620 err
= do_write_data_page(page
, &fio
);
1624 /* we should bypass data pages to proceed the kworkder jobs */
1625 if (unlikely(f2fs_cp_error(sbi
))) {
1630 if (!wbc
->for_reclaim
)
1631 need_balance_fs
= true;
1632 else if (has_not_enough_free_secs(sbi
, 0))
1637 if (f2fs_has_inline_data(inode
))
1638 err
= f2fs_write_inline_data(inode
, page
);
1640 err
= do_write_data_page(page
, &fio
);
1641 f2fs_unlock_op(sbi
);
1643 if (err
&& err
!= -ENOENT
)
1646 clear_cold_data(page
);
1648 inode_dec_dirty_pages(inode
);
1650 ClearPageUptodate(page
);
1652 if (need_balance_fs
)
1653 f2fs_balance_fs(sbi
);
1654 if (wbc
->for_reclaim
)
1655 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1659 redirty_page_for_writepage(wbc
, page
);
1660 return AOP_WRITEPAGE_ACTIVATE
;
1663 static int __f2fs_writepage(struct page
*page
, struct writeback_control
*wbc
,
1666 struct address_space
*mapping
= data
;
1667 int ret
= mapping
->a_ops
->writepage(page
, wbc
);
1668 mapping_set_error(mapping
, ret
);
1672 static int f2fs_write_data_pages(struct address_space
*mapping
,
1673 struct writeback_control
*wbc
)
1675 struct inode
*inode
= mapping
->host
;
1676 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1677 bool locked
= false;
1681 trace_f2fs_writepages(mapping
->host
, wbc
, DATA
);
1683 /* deal with chardevs and other special file */
1684 if (!mapping
->a_ops
->writepage
)
1687 if (S_ISDIR(inode
->i_mode
) && wbc
->sync_mode
== WB_SYNC_NONE
&&
1688 get_dirty_pages(inode
) < nr_pages_to_skip(sbi
, DATA
) &&
1689 available_free_memory(sbi
, DIRTY_DENTS
))
1692 /* during POR, we don't need to trigger writepage at all. */
1693 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1696 diff
= nr_pages_to_write(sbi
, DATA
, wbc
);
1698 if (!S_ISDIR(inode
->i_mode
)) {
1699 mutex_lock(&sbi
->writepages
);
1702 ret
= write_cache_pages(mapping
, wbc
, __f2fs_writepage
, mapping
);
1704 mutex_unlock(&sbi
->writepages
);
1706 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
1708 remove_dirty_dir_inode(inode
);
1710 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1714 wbc
->pages_skipped
+= get_dirty_pages(inode
);
1718 static void f2fs_write_failed(struct address_space
*mapping
, loff_t to
)
1720 struct inode
*inode
= mapping
->host
;
1722 if (to
> inode
->i_size
) {
1723 truncate_pagecache(inode
, inode
->i_size
);
1724 truncate_blocks(inode
, inode
->i_size
, true);
1728 static int f2fs_write_begin(struct file
*file
, struct address_space
*mapping
,
1729 loff_t pos
, unsigned len
, unsigned flags
,
1730 struct page
**pagep
, void **fsdata
)
1732 struct inode
*inode
= mapping
->host
;
1733 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1734 struct page
*page
, *ipage
;
1735 pgoff_t index
= ((unsigned long long) pos
) >> PAGE_CACHE_SHIFT
;
1736 struct dnode_of_data dn
;
1739 trace_f2fs_write_begin(inode
, pos
, len
, flags
);
1741 f2fs_balance_fs(sbi
);
1744 * We should check this at this moment to avoid deadlock on inode page
1745 * and #0 page. The locking rule for inline_data conversion should be:
1746 * lock_page(page #0) -> lock_page(inode_page)
1749 err
= f2fs_convert_inline_inode(inode
);
1754 page
= grab_cache_page_write_begin(mapping
, index
, flags
);
1764 /* check inline_data */
1765 ipage
= get_node_page(sbi
, inode
->i_ino
);
1766 if (IS_ERR(ipage
)) {
1767 err
= PTR_ERR(ipage
);
1771 set_new_dnode(&dn
, inode
, ipage
, ipage
, 0);
1773 if (f2fs_has_inline_data(inode
)) {
1774 if (pos
+ len
<= MAX_INLINE_DATA
) {
1775 read_inline_data(page
, ipage
);
1776 set_inode_flag(F2FS_I(inode
), FI_DATA_EXIST
);
1777 sync_inode_page(&dn
);
1780 err
= f2fs_convert_inline_page(&dn
, page
);
1784 err
= f2fs_reserve_block(&dn
, index
);
1788 f2fs_put_dnode(&dn
);
1789 f2fs_unlock_op(sbi
);
1791 if ((len
== PAGE_CACHE_SIZE
) || PageUptodate(page
))
1794 f2fs_wait_on_page_writeback(page
, DATA
);
1796 if ((pos
& PAGE_CACHE_MASK
) >= i_size_read(inode
)) {
1797 unsigned start
= pos
& (PAGE_CACHE_SIZE
- 1);
1798 unsigned end
= start
+ len
;
1800 /* Reading beyond i_size is simple: memset to zero */
1801 zero_user_segments(page
, 0, start
, end
, PAGE_CACHE_SIZE
);
1805 if (dn
.data_blkaddr
== NEW_ADDR
) {
1806 zero_user_segment(page
, 0, PAGE_CACHE_SIZE
);
1808 struct f2fs_io_info fio
= {
1811 .blk_addr
= dn
.data_blkaddr
,
1813 err
= f2fs_submit_page_bio(sbi
, page
, &fio
);
1818 if (unlikely(!PageUptodate(page
))) {
1819 f2fs_put_page(page
, 1);
1823 if (unlikely(page
->mapping
!= mapping
)) {
1824 f2fs_put_page(page
, 1);
1829 SetPageUptodate(page
);
1830 clear_cold_data(page
);
1834 f2fs_put_dnode(&dn
);
1836 f2fs_unlock_op(sbi
);
1837 f2fs_put_page(page
, 1);
1839 f2fs_write_failed(mapping
, pos
+ len
);
1843 static int f2fs_write_end(struct file
*file
,
1844 struct address_space
*mapping
,
1845 loff_t pos
, unsigned len
, unsigned copied
,
1846 struct page
*page
, void *fsdata
)
1848 struct inode
*inode
= page
->mapping
->host
;
1850 trace_f2fs_write_end(inode
, pos
, len
, copied
);
1852 set_page_dirty(page
);
1854 if (pos
+ copied
> i_size_read(inode
)) {
1855 i_size_write(inode
, pos
+ copied
);
1856 mark_inode_dirty(inode
);
1857 update_inode_page(inode
);
1860 f2fs_put_page(page
, 1);
1864 static int check_direct_IO(struct inode
*inode
, struct iov_iter
*iter
,
1867 unsigned blocksize_mask
= inode
->i_sb
->s_blocksize
- 1;
1869 if (iov_iter_rw(iter
) == READ
)
1872 if (offset
& blocksize_mask
)
1875 if (iov_iter_alignment(iter
) & blocksize_mask
)
1881 static ssize_t
f2fs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
,
1884 struct file
*file
= iocb
->ki_filp
;
1885 struct address_space
*mapping
= file
->f_mapping
;
1886 struct inode
*inode
= mapping
->host
;
1887 size_t count
= iov_iter_count(iter
);
1890 /* we don't need to use inline_data strictly */
1891 if (f2fs_has_inline_data(inode
)) {
1892 err
= f2fs_convert_inline_inode(inode
);
1897 if (check_direct_IO(inode
, iter
, offset
))
1900 trace_f2fs_direct_IO_enter(inode
, offset
, count
, iov_iter_rw(iter
));
1902 if (iov_iter_rw(iter
) == WRITE
)
1903 __allocate_data_blocks(inode
, offset
, count
);
1905 err
= blockdev_direct_IO(iocb
, inode
, iter
, offset
, get_data_block
);
1906 if (err
< 0 && iov_iter_rw(iter
) == WRITE
)
1907 f2fs_write_failed(mapping
, offset
+ count
);
1909 trace_f2fs_direct_IO_exit(inode
, offset
, count
, iov_iter_rw(iter
), err
);
1914 void f2fs_invalidate_page(struct page
*page
, unsigned int offset
,
1915 unsigned int length
)
1917 struct inode
*inode
= page
->mapping
->host
;
1918 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1920 if (inode
->i_ino
>= F2FS_ROOT_INO(sbi
) &&
1921 (offset
% PAGE_CACHE_SIZE
|| length
!= PAGE_CACHE_SIZE
))
1924 if (PageDirty(page
)) {
1925 if (inode
->i_ino
== F2FS_META_INO(sbi
))
1926 dec_page_count(sbi
, F2FS_DIRTY_META
);
1927 else if (inode
->i_ino
== F2FS_NODE_INO(sbi
))
1928 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1930 inode_dec_dirty_pages(inode
);
1932 ClearPagePrivate(page
);
1935 int f2fs_release_page(struct page
*page
, gfp_t wait
)
1937 /* If this is dirty page, keep PagePrivate */
1938 if (PageDirty(page
))
1941 ClearPagePrivate(page
);
1945 static int f2fs_set_data_page_dirty(struct page
*page
)
1947 struct address_space
*mapping
= page
->mapping
;
1948 struct inode
*inode
= mapping
->host
;
1950 trace_f2fs_set_page_dirty(page
, DATA
);
1952 SetPageUptodate(page
);
1954 if (f2fs_is_atomic_file(inode
)) {
1955 register_inmem_page(inode
, page
);
1959 mark_inode_dirty(inode
);
1961 if (!PageDirty(page
)) {
1962 __set_page_dirty_nobuffers(page
);
1963 update_dirty_page(inode
, page
);
1969 static sector_t
f2fs_bmap(struct address_space
*mapping
, sector_t block
)
1971 struct inode
*inode
= mapping
->host
;
1973 /* we don't need to use inline_data strictly */
1974 if (f2fs_has_inline_data(inode
)) {
1975 int err
= f2fs_convert_inline_inode(inode
);
1979 return generic_block_bmap(mapping
, block
, get_data_block
);
1982 void init_extent_cache_info(struct f2fs_sb_info
*sbi
)
1984 INIT_RADIX_TREE(&sbi
->extent_tree_root
, GFP_NOIO
);
1985 init_rwsem(&sbi
->extent_tree_lock
);
1986 INIT_LIST_HEAD(&sbi
->extent_list
);
1987 spin_lock_init(&sbi
->extent_lock
);
1988 sbi
->total_ext_tree
= 0;
1989 atomic_set(&sbi
->total_ext_node
, 0);
1992 int __init
create_extent_cache(void)
1994 extent_tree_slab
= f2fs_kmem_cache_create("f2fs_extent_tree",
1995 sizeof(struct extent_tree
));
1996 if (!extent_tree_slab
)
1998 extent_node_slab
= f2fs_kmem_cache_create("f2fs_extent_node",
1999 sizeof(struct extent_node
));
2000 if (!extent_node_slab
) {
2001 kmem_cache_destroy(extent_tree_slab
);
2007 void destroy_extent_cache(void)
2009 kmem_cache_destroy(extent_node_slab
);
2010 kmem_cache_destroy(extent_tree_slab
);
2013 const struct address_space_operations f2fs_dblock_aops
= {
2014 .readpage
= f2fs_read_data_page
,
2015 .readpages
= f2fs_read_data_pages
,
2016 .writepage
= f2fs_write_data_page
,
2017 .writepages
= f2fs_write_data_pages
,
2018 .write_begin
= f2fs_write_begin
,
2019 .write_end
= f2fs_write_end
,
2020 .set_page_dirty
= f2fs_set_data_page_dirty
,
2021 .invalidatepage
= f2fs_invalidate_page
,
2022 .releasepage
= f2fs_release_page
,
2023 .direct_IO
= f2fs_direct_IO
,