4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
24 #include <trace/events/f2fs.h>
26 static int gc_thread_func(void *data
)
28 struct f2fs_sb_info
*sbi
= data
;
29 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
30 wait_queue_head_t
*wq
= &sbi
->gc_thread
->gc_wait_queue_head
;
33 wait_ms
= gc_th
->min_sleep_time
;
39 wait_event_interruptible_timeout(*wq
,
40 kthread_should_stop(),
41 msecs_to_jiffies(wait_ms
));
42 if (kthread_should_stop())
45 if (sbi
->sb
->s_writers
.frozen
>= SB_FREEZE_WRITE
) {
46 increase_sleep_time(gc_th
, &wait_ms
);
51 * [GC triggering condition]
52 * 0. GC is not conducted currently.
53 * 1. There are enough dirty segments.
54 * 2. IO subsystem is idle by checking the # of writeback pages.
55 * 3. IO subsystem is idle by checking the # of requests in
56 * bdev's request list.
58 * Note) We have to avoid triggering GCs frequently.
59 * Because it is possible that some segments can be
60 * invalidated soon after by user update or deletion.
61 * So, I'd like to wait some time to collect dirty segments.
63 if (!mutex_trylock(&sbi
->gc_mutex
))
67 increase_sleep_time(gc_th
, &wait_ms
);
68 mutex_unlock(&sbi
->gc_mutex
);
72 if (has_enough_invalid_blocks(sbi
))
73 decrease_sleep_time(gc_th
, &wait_ms
);
75 increase_sleep_time(gc_th
, &wait_ms
);
77 stat_inc_bggc_count(sbi
);
79 /* if return value is not zero, no victim was selected */
80 if (f2fs_gc(sbi
, test_opt(sbi
, FORCE_FG_GC
)))
81 wait_ms
= gc_th
->no_gc_sleep_time
;
83 trace_f2fs_background_gc(sbi
->sb
, wait_ms
,
84 prefree_segments(sbi
), free_segments(sbi
));
86 /* balancing f2fs's metadata periodically */
87 f2fs_balance_fs_bg(sbi
);
89 } while (!kthread_should_stop());
93 int start_gc_thread(struct f2fs_sb_info
*sbi
)
95 struct f2fs_gc_kthread
*gc_th
;
96 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
99 gc_th
= kmalloc(sizeof(struct f2fs_gc_kthread
), GFP_KERNEL
);
105 gc_th
->min_sleep_time
= DEF_GC_THREAD_MIN_SLEEP_TIME
;
106 gc_th
->max_sleep_time
= DEF_GC_THREAD_MAX_SLEEP_TIME
;
107 gc_th
->no_gc_sleep_time
= DEF_GC_THREAD_NOGC_SLEEP_TIME
;
111 sbi
->gc_thread
= gc_th
;
112 init_waitqueue_head(&sbi
->gc_thread
->gc_wait_queue_head
);
113 sbi
->gc_thread
->f2fs_gc_task
= kthread_run(gc_thread_func
, sbi
,
114 "f2fs_gc-%u:%u", MAJOR(dev
), MINOR(dev
));
115 if (IS_ERR(gc_th
->f2fs_gc_task
)) {
116 err
= PTR_ERR(gc_th
->f2fs_gc_task
);
118 sbi
->gc_thread
= NULL
;
124 void stop_gc_thread(struct f2fs_sb_info
*sbi
)
126 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
129 kthread_stop(gc_th
->f2fs_gc_task
);
131 sbi
->gc_thread
= NULL
;
134 static int select_gc_type(struct f2fs_gc_kthread
*gc_th
, int gc_type
)
136 int gc_mode
= (gc_type
== BG_GC
) ? GC_CB
: GC_GREEDY
;
138 if (gc_th
&& gc_th
->gc_idle
) {
139 if (gc_th
->gc_idle
== 1)
141 else if (gc_th
->gc_idle
== 2)
147 static void select_policy(struct f2fs_sb_info
*sbi
, int gc_type
,
148 int type
, struct victim_sel_policy
*p
)
150 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
152 if (p
->alloc_mode
== SSR
) {
153 p
->gc_mode
= GC_GREEDY
;
154 p
->dirty_segmap
= dirty_i
->dirty_segmap
[type
];
155 p
->max_search
= dirty_i
->nr_dirty
[type
];
158 p
->gc_mode
= select_gc_type(sbi
->gc_thread
, gc_type
);
159 p
->dirty_segmap
= dirty_i
->dirty_segmap
[DIRTY
];
160 p
->max_search
= dirty_i
->nr_dirty
[DIRTY
];
161 p
->ofs_unit
= sbi
->segs_per_sec
;
164 if (p
->max_search
> sbi
->max_victim_search
)
165 p
->max_search
= sbi
->max_victim_search
;
167 p
->offset
= sbi
->last_victim
[p
->gc_mode
];
170 static unsigned int get_max_cost(struct f2fs_sb_info
*sbi
,
171 struct victim_sel_policy
*p
)
173 /* SSR allocates in a segment unit */
174 if (p
->alloc_mode
== SSR
)
175 return sbi
->blocks_per_seg
;
176 if (p
->gc_mode
== GC_GREEDY
)
177 return sbi
->blocks_per_seg
* p
->ofs_unit
;
178 else if (p
->gc_mode
== GC_CB
)
180 else /* No other gc_mode */
184 static unsigned int check_bg_victims(struct f2fs_sb_info
*sbi
)
186 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
190 * If the gc_type is FG_GC, we can select victim segments
191 * selected by background GC before.
192 * Those segments guarantee they have small valid blocks.
194 for_each_set_bit(secno
, dirty_i
->victim_secmap
, MAIN_SECS(sbi
)) {
195 if (sec_usage_check(sbi
, secno
))
197 clear_bit(secno
, dirty_i
->victim_secmap
);
198 return secno
* sbi
->segs_per_sec
;
203 static unsigned int get_cb_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
)
205 struct sit_info
*sit_i
= SIT_I(sbi
);
206 unsigned int secno
= GET_SECNO(sbi
, segno
);
207 unsigned int start
= secno
* sbi
->segs_per_sec
;
208 unsigned long long mtime
= 0;
209 unsigned int vblocks
;
210 unsigned char age
= 0;
214 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
215 mtime
+= get_seg_entry(sbi
, start
+ i
)->mtime
;
216 vblocks
= get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
218 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
219 vblocks
= div_u64(vblocks
, sbi
->segs_per_sec
);
221 u
= (vblocks
* 100) >> sbi
->log_blocks_per_seg
;
223 /* Handle if the system time has changed by the user */
224 if (mtime
< sit_i
->min_mtime
)
225 sit_i
->min_mtime
= mtime
;
226 if (mtime
> sit_i
->max_mtime
)
227 sit_i
->max_mtime
= mtime
;
228 if (sit_i
->max_mtime
!= sit_i
->min_mtime
)
229 age
= 100 - div64_u64(100 * (mtime
- sit_i
->min_mtime
),
230 sit_i
->max_mtime
- sit_i
->min_mtime
);
232 return UINT_MAX
- ((100 * (100 - u
) * age
) / (100 + u
));
235 static inline unsigned int get_gc_cost(struct f2fs_sb_info
*sbi
,
236 unsigned int segno
, struct victim_sel_policy
*p
)
238 if (p
->alloc_mode
== SSR
)
239 return get_seg_entry(sbi
, segno
)->ckpt_valid_blocks
;
241 /* alloc_mode == LFS */
242 if (p
->gc_mode
== GC_GREEDY
)
243 return get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
245 return get_cb_cost(sbi
, segno
);
249 * This function is called from two paths.
250 * One is garbage collection and the other is SSR segment selection.
251 * When it is called during GC, it just gets a victim segment
252 * and it does not remove it from dirty seglist.
253 * When it is called from SSR segment selection, it finds a segment
254 * which has minimum valid blocks and removes it from dirty seglist.
256 static int get_victim_by_default(struct f2fs_sb_info
*sbi
,
257 unsigned int *result
, int gc_type
, int type
, char alloc_mode
)
259 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
260 struct victim_sel_policy p
;
261 unsigned int secno
, max_cost
;
262 unsigned int last_segment
= MAIN_SEGS(sbi
);
265 mutex_lock(&dirty_i
->seglist_lock
);
267 p
.alloc_mode
= alloc_mode
;
268 select_policy(sbi
, gc_type
, type
, &p
);
270 p
.min_segno
= NULL_SEGNO
;
271 p
.min_cost
= max_cost
= get_max_cost(sbi
, &p
);
273 if (p
.max_search
== 0)
276 if (p
.alloc_mode
== LFS
&& gc_type
== FG_GC
) {
277 p
.min_segno
= check_bg_victims(sbi
);
278 if (p
.min_segno
!= NULL_SEGNO
)
286 segno
= find_next_bit(p
.dirty_segmap
, last_segment
, p
.offset
);
287 if (segno
>= last_segment
) {
288 if (sbi
->last_victim
[p
.gc_mode
]) {
289 last_segment
= sbi
->last_victim
[p
.gc_mode
];
290 sbi
->last_victim
[p
.gc_mode
] = 0;
297 p
.offset
= segno
+ p
.ofs_unit
;
299 p
.offset
-= segno
% p
.ofs_unit
;
301 secno
= GET_SECNO(sbi
, segno
);
303 if (sec_usage_check(sbi
, secno
))
305 if (gc_type
== BG_GC
&& test_bit(secno
, dirty_i
->victim_secmap
))
308 cost
= get_gc_cost(sbi
, segno
, &p
);
310 if (p
.min_cost
> cost
) {
313 } else if (unlikely(cost
== max_cost
)) {
317 if (nsearched
++ >= p
.max_search
) {
318 sbi
->last_victim
[p
.gc_mode
] = segno
;
322 if (p
.min_segno
!= NULL_SEGNO
) {
324 if (p
.alloc_mode
== LFS
) {
325 secno
= GET_SECNO(sbi
, p
.min_segno
);
326 if (gc_type
== FG_GC
)
327 sbi
->cur_victim_sec
= secno
;
329 set_bit(secno
, dirty_i
->victim_secmap
);
331 *result
= (p
.min_segno
/ p
.ofs_unit
) * p
.ofs_unit
;
333 trace_f2fs_get_victim(sbi
->sb
, type
, gc_type
, &p
,
335 prefree_segments(sbi
), free_segments(sbi
));
338 mutex_unlock(&dirty_i
->seglist_lock
);
340 return (p
.min_segno
== NULL_SEGNO
) ? 0 : 1;
343 static const struct victim_selection default_v_ops
= {
344 .get_victim
= get_victim_by_default
,
347 static struct inode
*find_gc_inode(struct gc_inode_list
*gc_list
, nid_t ino
)
349 struct inode_entry
*ie
;
351 ie
= radix_tree_lookup(&gc_list
->iroot
, ino
);
357 static void add_gc_inode(struct gc_inode_list
*gc_list
, struct inode
*inode
)
359 struct inode_entry
*new_ie
;
361 if (inode
== find_gc_inode(gc_list
, inode
->i_ino
)) {
365 new_ie
= f2fs_kmem_cache_alloc(inode_entry_slab
, GFP_NOFS
);
366 new_ie
->inode
= inode
;
368 f2fs_radix_tree_insert(&gc_list
->iroot
, inode
->i_ino
, new_ie
);
369 list_add_tail(&new_ie
->list
, &gc_list
->ilist
);
372 static void put_gc_inode(struct gc_inode_list
*gc_list
)
374 struct inode_entry
*ie
, *next_ie
;
375 list_for_each_entry_safe(ie
, next_ie
, &gc_list
->ilist
, list
) {
376 radix_tree_delete(&gc_list
->iroot
, ie
->inode
->i_ino
);
379 kmem_cache_free(inode_entry_slab
, ie
);
383 static int check_valid_map(struct f2fs_sb_info
*sbi
,
384 unsigned int segno
, int offset
)
386 struct sit_info
*sit_i
= SIT_I(sbi
);
387 struct seg_entry
*sentry
;
390 mutex_lock(&sit_i
->sentry_lock
);
391 sentry
= get_seg_entry(sbi
, segno
);
392 ret
= f2fs_test_bit(offset
, sentry
->cur_valid_map
);
393 mutex_unlock(&sit_i
->sentry_lock
);
398 * This function compares node address got in summary with that in NAT.
399 * On validity, copy that node with cold status, otherwise (invalid node)
402 static int gc_node_segment(struct f2fs_sb_info
*sbi
,
403 struct f2fs_summary
*sum
, unsigned int segno
, int gc_type
)
406 struct f2fs_summary
*entry
;
410 start_addr
= START_BLOCK(sbi
, segno
);
415 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
416 nid_t nid
= le32_to_cpu(entry
->nid
);
417 struct page
*node_page
;
420 /* stop BG_GC if there is not enough free sections. */
421 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0))
424 if (check_valid_map(sbi
, segno
, off
) == 0)
428 ra_node_page(sbi
, nid
);
431 node_page
= get_node_page(sbi
, nid
);
432 if (IS_ERR(node_page
))
435 /* block may become invalid during get_node_page */
436 if (check_valid_map(sbi
, segno
, off
) == 0) {
437 f2fs_put_page(node_page
, 1);
441 get_node_info(sbi
, nid
, &ni
);
442 if (ni
.blk_addr
!= start_addr
+ off
) {
443 f2fs_put_page(node_page
, 1);
447 /* set page dirty and write it */
448 if (gc_type
== FG_GC
) {
449 f2fs_wait_on_page_writeback(node_page
, NODE
);
450 set_page_dirty(node_page
);
452 if (!PageWriteback(node_page
))
453 set_page_dirty(node_page
);
455 f2fs_put_page(node_page
, 1);
456 stat_inc_node_blk_count(sbi
, 1, gc_type
);
464 if (gc_type
== FG_GC
) {
465 struct writeback_control wbc
= {
466 .sync_mode
= WB_SYNC_ALL
,
467 .nr_to_write
= LONG_MAX
,
470 sync_node_pages(sbi
, 0, &wbc
);
472 /* return 1 only if FG_GC succefully reclaimed one */
473 if (get_valid_blocks(sbi
, segno
, 1) == 0)
480 * Calculate start block index indicating the given node offset.
481 * Be careful, caller should give this node offset only indicating direct node
482 * blocks. If any node offsets, which point the other types of node blocks such
483 * as indirect or double indirect node blocks, are given, it must be a caller's
486 block_t
start_bidx_of_node(unsigned int node_ofs
, struct f2fs_inode_info
*fi
)
488 unsigned int indirect_blks
= 2 * NIDS_PER_BLOCK
+ 4;
496 } else if (node_ofs
<= indirect_blks
) {
497 int dec
= (node_ofs
- 4) / (NIDS_PER_BLOCK
+ 1);
498 bidx
= node_ofs
- 2 - dec
;
500 int dec
= (node_ofs
- indirect_blks
- 3) / (NIDS_PER_BLOCK
+ 1);
501 bidx
= node_ofs
- 5 - dec
;
503 return bidx
* ADDRS_PER_BLOCK
+ ADDRS_PER_INODE(fi
);
506 static bool is_alive(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
507 struct node_info
*dni
, block_t blkaddr
, unsigned int *nofs
)
509 struct page
*node_page
;
511 unsigned int ofs_in_node
;
512 block_t source_blkaddr
;
514 nid
= le32_to_cpu(sum
->nid
);
515 ofs_in_node
= le16_to_cpu(sum
->ofs_in_node
);
517 node_page
= get_node_page(sbi
, nid
);
518 if (IS_ERR(node_page
))
521 get_node_info(sbi
, nid
, dni
);
523 if (sum
->version
!= dni
->version
) {
524 f2fs_put_page(node_page
, 1);
528 *nofs
= ofs_of_node(node_page
);
529 source_blkaddr
= datablock_addr(node_page
, ofs_in_node
);
530 f2fs_put_page(node_page
, 1);
532 if (source_blkaddr
!= blkaddr
)
537 static void move_encrypted_block(struct inode
*inode
, block_t bidx
)
539 struct f2fs_io_info fio
= {
540 .sbi
= F2FS_I_SB(inode
),
543 .encrypted_page
= NULL
,
545 struct dnode_of_data dn
;
546 struct f2fs_summary sum
;
551 /* do not read out */
552 page
= f2fs_grab_cache_page(inode
->i_mapping
, bidx
, false);
556 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
557 err
= get_dnode_of_data(&dn
, bidx
, LOOKUP_NODE
);
561 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
562 ClearPageUptodate(page
);
567 * don't cache encrypted data into meta inode until previous dirty
568 * data were writebacked to avoid racing between GC and flush.
570 f2fs_wait_on_page_writeback(page
, DATA
);
572 get_node_info(fio
.sbi
, dn
.nid
, &ni
);
573 set_summary(&sum
, dn
.nid
, dn
.ofs_in_node
, ni
.version
);
577 fio
.blk_addr
= dn
.data_blkaddr
;
579 fio
.encrypted_page
= pagecache_get_page(META_MAPPING(fio
.sbi
),
583 if (!fio
.encrypted_page
)
586 err
= f2fs_submit_page_bio(&fio
);
591 lock_page(fio
.encrypted_page
);
593 if (unlikely(!PageUptodate(fio
.encrypted_page
)))
595 if (unlikely(fio
.encrypted_page
->mapping
!= META_MAPPING(fio
.sbi
)))
598 set_page_dirty(fio
.encrypted_page
);
599 f2fs_wait_on_page_writeback(fio
.encrypted_page
, DATA
);
600 if (clear_page_dirty_for_io(fio
.encrypted_page
))
601 dec_page_count(fio
.sbi
, F2FS_DIRTY_META
);
603 set_page_writeback(fio
.encrypted_page
);
605 /* allocate block address */
606 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
);
607 allocate_data_block(fio
.sbi
, NULL
, fio
.blk_addr
,
608 &fio
.blk_addr
, &sum
, CURSEG_COLD_DATA
);
610 f2fs_submit_page_mbio(&fio
);
612 dn
.data_blkaddr
= fio
.blk_addr
;
613 set_data_blkaddr(&dn
);
614 f2fs_update_extent_cache(&dn
);
615 set_inode_flag(F2FS_I(inode
), FI_APPEND_WRITE
);
616 if (page
->index
== 0)
617 set_inode_flag(F2FS_I(inode
), FI_FIRST_BLOCK_WRITTEN
);
619 f2fs_put_page(fio
.encrypted_page
, 1);
623 f2fs_put_page(page
, 1);
626 static void move_data_page(struct inode
*inode
, block_t bidx
, int gc_type
)
630 page
= get_lock_data_page(inode
, bidx
, true);
634 if (gc_type
== BG_GC
) {
635 if (PageWriteback(page
))
637 set_page_dirty(page
);
640 struct f2fs_io_info fio
= {
641 .sbi
= F2FS_I_SB(inode
),
645 .encrypted_page
= NULL
,
647 set_page_dirty(page
);
648 f2fs_wait_on_page_writeback(page
, DATA
);
649 if (clear_page_dirty_for_io(page
))
650 inode_dec_dirty_pages(inode
);
652 do_write_data_page(&fio
);
653 clear_cold_data(page
);
656 f2fs_put_page(page
, 1);
660 * This function tries to get parent node of victim data block, and identifies
661 * data block validity. If the block is valid, copy that with cold status and
662 * modify parent node.
663 * If the parent node is not valid or the data block address is different,
664 * the victim data block is ignored.
666 static int gc_data_segment(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
667 struct gc_inode_list
*gc_list
, unsigned int segno
, int gc_type
)
669 struct super_block
*sb
= sbi
->sb
;
670 struct f2fs_summary
*entry
;
675 start_addr
= START_BLOCK(sbi
, segno
);
680 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
681 struct page
*data_page
;
683 struct node_info dni
; /* dnode info for the data */
684 unsigned int ofs_in_node
, nofs
;
687 /* stop BG_GC if there is not enough free sections. */
688 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0))
691 if (check_valid_map(sbi
, segno
, off
) == 0)
695 ra_node_page(sbi
, le32_to_cpu(entry
->nid
));
699 /* Get an inode by ino with checking validity */
700 if (!is_alive(sbi
, entry
, &dni
, start_addr
+ off
, &nofs
))
704 ra_node_page(sbi
, dni
.ino
);
708 ofs_in_node
= le16_to_cpu(entry
->ofs_in_node
);
711 inode
= f2fs_iget(sb
, dni
.ino
);
712 if (IS_ERR(inode
) || is_bad_inode(inode
))
715 /* if encrypted inode, let's go phase 3 */
716 if (f2fs_encrypted_inode(inode
) &&
717 S_ISREG(inode
->i_mode
)) {
718 add_gc_inode(gc_list
, inode
);
722 start_bidx
= start_bidx_of_node(nofs
, F2FS_I(inode
));
723 data_page
= get_read_data_page(inode
,
724 start_bidx
+ ofs_in_node
, READA
, true);
725 if (IS_ERR(data_page
)) {
730 f2fs_put_page(data_page
, 0);
731 add_gc_inode(gc_list
, inode
);
736 inode
= find_gc_inode(gc_list
, dni
.ino
);
738 start_bidx
= start_bidx_of_node(nofs
, F2FS_I(inode
))
740 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
741 move_encrypted_block(inode
, start_bidx
);
743 move_data_page(inode
, start_bidx
, gc_type
);
744 stat_inc_data_blk_count(sbi
, 1, gc_type
);
751 if (gc_type
== FG_GC
) {
752 f2fs_submit_merged_bio(sbi
, DATA
, WRITE
);
754 /* return 1 only if FG_GC succefully reclaimed one */
755 if (get_valid_blocks(sbi
, segno
, 1) == 0)
761 static int __get_victim(struct f2fs_sb_info
*sbi
, unsigned int *victim
,
764 struct sit_info
*sit_i
= SIT_I(sbi
);
767 mutex_lock(&sit_i
->sentry_lock
);
768 ret
= DIRTY_I(sbi
)->v_ops
->get_victim(sbi
, victim
, gc_type
,
770 mutex_unlock(&sit_i
->sentry_lock
);
774 static int do_garbage_collect(struct f2fs_sb_info
*sbi
, unsigned int segno
,
775 struct gc_inode_list
*gc_list
, int gc_type
)
777 struct page
*sum_page
;
778 struct f2fs_summary_block
*sum
;
779 struct blk_plug plug
;
782 /* read segment summary of victim */
783 sum_page
= get_sum_page(sbi
, segno
);
785 blk_start_plug(&plug
);
787 sum
= page_address(sum_page
);
790 * this is to avoid deadlock:
791 * - lock_page(sum_page) - f2fs_replace_block
792 * - check_valid_map() - mutex_lock(sentry_lock)
793 * - mutex_lock(sentry_lock) - change_curseg()
794 * - lock_page(sum_page)
796 unlock_page(sum_page
);
798 switch (GET_SUM_TYPE((&sum
->footer
))) {
800 nfree
= gc_node_segment(sbi
, sum
->entries
, segno
, gc_type
);
803 nfree
= gc_data_segment(sbi
, sum
->entries
, gc_list
,
807 blk_finish_plug(&plug
);
809 stat_inc_seg_count(sbi
, GET_SUM_TYPE((&sum
->footer
)), gc_type
);
810 stat_inc_call_count(sbi
->stat_info
);
812 f2fs_put_page(sum_page
, 0);
816 int f2fs_gc(struct f2fs_sb_info
*sbi
, bool sync
)
818 unsigned int segno
, i
;
819 int gc_type
= sync
? FG_GC
: BG_GC
;
822 struct cp_control cpc
;
823 struct gc_inode_list gc_list
= {
824 .ilist
= LIST_HEAD_INIT(gc_list
.ilist
),
825 .iroot
= RADIX_TREE_INIT(GFP_NOFS
),
828 cpc
.reason
= __get_cp_reason(sbi
);
832 if (unlikely(!(sbi
->sb
->s_flags
& MS_ACTIVE
)))
834 if (unlikely(f2fs_cp_error(sbi
))) {
839 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, sec_freed
)) {
841 if (__get_victim(sbi
, &segno
, gc_type
) || prefree_segments(sbi
))
842 write_checkpoint(sbi
, &cpc
);
845 if (segno
== NULL_SEGNO
&& !__get_victim(sbi
, &segno
, gc_type
))
849 /* readahead multi ssa blocks those have contiguous address */
850 if (sbi
->segs_per_sec
> 1)
851 ra_meta_pages(sbi
, GET_SUM_BLOCK(sbi
, segno
), sbi
->segs_per_sec
,
854 for (i
= 0; i
< sbi
->segs_per_sec
; i
++) {
856 * for FG_GC case, halt gcing left segments once failed one
857 * of segments in selected section to avoid long latency.
859 if (!do_garbage_collect(sbi
, segno
+ i
, &gc_list
, gc_type
) &&
864 if (i
== sbi
->segs_per_sec
&& gc_type
== FG_GC
)
867 if (gc_type
== FG_GC
)
868 sbi
->cur_victim_sec
= NULL_SEGNO
;
871 if (has_not_enough_free_secs(sbi
, sec_freed
))
874 if (gc_type
== FG_GC
)
875 write_checkpoint(sbi
, &cpc
);
878 mutex_unlock(&sbi
->gc_mutex
);
880 put_gc_inode(&gc_list
);
883 ret
= sec_freed
? 0 : -EAGAIN
;
887 void build_gc_manager(struct f2fs_sb_info
*sbi
)
889 DIRTY_I(sbi
)->v_ops
= &default_v_ops
;