4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19 #include <linux/blkdev.h>
25 #include <trace/events/f2fs.h>
27 static struct kmem_cache
*winode_slab
;
29 static int gc_thread_func(void *data
)
31 struct f2fs_sb_info
*sbi
= data
;
32 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
33 wait_queue_head_t
*wq
= &sbi
->gc_thread
->gc_wait_queue_head
;
36 wait_ms
= gc_th
->min_sleep_time
;
42 wait_event_interruptible_timeout(*wq
,
43 kthread_should_stop(),
44 msecs_to_jiffies(wait_ms
));
45 if (kthread_should_stop())
48 if (sbi
->sb
->s_writers
.frozen
>= SB_FREEZE_WRITE
) {
49 wait_ms
= increase_sleep_time(gc_th
, wait_ms
);
54 * [GC triggering condition]
55 * 0. GC is not conducted currently.
56 * 1. There are enough dirty segments.
57 * 2. IO subsystem is idle by checking the # of writeback pages.
58 * 3. IO subsystem is idle by checking the # of requests in
59 * bdev's request list.
61 * Note) We have to avoid triggering GCs too much frequently.
62 * Because it is possible that some segments can be
63 * invalidated soon after by user update or deletion.
64 * So, I'd like to wait some time to collect dirty segments.
66 if (!mutex_trylock(&sbi
->gc_mutex
))
70 wait_ms
= increase_sleep_time(gc_th
, wait_ms
);
71 mutex_unlock(&sbi
->gc_mutex
);
75 if (has_enough_invalid_blocks(sbi
))
76 wait_ms
= decrease_sleep_time(gc_th
, wait_ms
);
78 wait_ms
= increase_sleep_time(gc_th
, wait_ms
);
80 stat_inc_bggc_count(sbi
);
82 /* if return value is not zero, no victim was selected */
84 wait_ms
= gc_th
->no_gc_sleep_time
;
86 /* balancing f2fs's metadata periodically */
87 f2fs_balance_fs_bg(sbi
);
89 } while (!kthread_should_stop());
93 int start_gc_thread(struct f2fs_sb_info
*sbi
)
95 struct f2fs_gc_kthread
*gc_th
;
96 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
99 if (!test_opt(sbi
, BG_GC
))
101 gc_th
= kmalloc(sizeof(struct f2fs_gc_kthread
), GFP_KERNEL
);
107 gc_th
->min_sleep_time
= DEF_GC_THREAD_MIN_SLEEP_TIME
;
108 gc_th
->max_sleep_time
= DEF_GC_THREAD_MAX_SLEEP_TIME
;
109 gc_th
->no_gc_sleep_time
= DEF_GC_THREAD_NOGC_SLEEP_TIME
;
113 sbi
->gc_thread
= gc_th
;
114 init_waitqueue_head(&sbi
->gc_thread
->gc_wait_queue_head
);
115 sbi
->gc_thread
->f2fs_gc_task
= kthread_run(gc_thread_func
, sbi
,
116 "f2fs_gc-%u:%u", MAJOR(dev
), MINOR(dev
));
117 if (IS_ERR(gc_th
->f2fs_gc_task
)) {
118 err
= PTR_ERR(gc_th
->f2fs_gc_task
);
120 sbi
->gc_thread
= NULL
;
127 void stop_gc_thread(struct f2fs_sb_info
*sbi
)
129 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
132 kthread_stop(gc_th
->f2fs_gc_task
);
134 sbi
->gc_thread
= NULL
;
137 static int select_gc_type(struct f2fs_gc_kthread
*gc_th
, int gc_type
)
139 int gc_mode
= (gc_type
== BG_GC
) ? GC_CB
: GC_GREEDY
;
141 if (gc_th
&& gc_th
->gc_idle
) {
142 if (gc_th
->gc_idle
== 1)
144 else if (gc_th
->gc_idle
== 2)
150 static void select_policy(struct f2fs_sb_info
*sbi
, int gc_type
,
151 int type
, struct victim_sel_policy
*p
)
153 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
155 if (p
->alloc_mode
== SSR
) {
156 p
->gc_mode
= GC_GREEDY
;
157 p
->dirty_segmap
= dirty_i
->dirty_segmap
[type
];
158 p
->max_search
= dirty_i
->nr_dirty
[type
];
161 p
->gc_mode
= select_gc_type(sbi
->gc_thread
, gc_type
);
162 p
->dirty_segmap
= dirty_i
->dirty_segmap
[DIRTY
];
163 p
->max_search
= dirty_i
->nr_dirty
[DIRTY
];
164 p
->ofs_unit
= sbi
->segs_per_sec
;
167 if (p
->max_search
> MAX_VICTIM_SEARCH
)
168 p
->max_search
= MAX_VICTIM_SEARCH
;
170 p
->offset
= sbi
->last_victim
[p
->gc_mode
];
173 static unsigned int get_max_cost(struct f2fs_sb_info
*sbi
,
174 struct victim_sel_policy
*p
)
176 /* SSR allocates in a segment unit */
177 if (p
->alloc_mode
== SSR
)
178 return 1 << sbi
->log_blocks_per_seg
;
179 if (p
->gc_mode
== GC_GREEDY
)
180 return (1 << sbi
->log_blocks_per_seg
) * p
->ofs_unit
;
181 else if (p
->gc_mode
== GC_CB
)
183 else /* No other gc_mode */
187 static unsigned int check_bg_victims(struct f2fs_sb_info
*sbi
)
189 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
190 unsigned int hint
= 0;
194 * If the gc_type is FG_GC, we can select victim segments
195 * selected by background GC before.
196 * Those segments guarantee they have small valid blocks.
199 secno
= find_next_bit(dirty_i
->victim_secmap
, TOTAL_SECS(sbi
), hint
++);
200 if (secno
< TOTAL_SECS(sbi
)) {
201 if (sec_usage_check(sbi
, secno
))
203 clear_bit(secno
, dirty_i
->victim_secmap
);
204 return secno
* sbi
->segs_per_sec
;
209 static unsigned int get_cb_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
)
211 struct sit_info
*sit_i
= SIT_I(sbi
);
212 unsigned int secno
= GET_SECNO(sbi
, segno
);
213 unsigned int start
= secno
* sbi
->segs_per_sec
;
214 unsigned long long mtime
= 0;
215 unsigned int vblocks
;
216 unsigned char age
= 0;
220 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
221 mtime
+= get_seg_entry(sbi
, start
+ i
)->mtime
;
222 vblocks
= get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
224 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
225 vblocks
= div_u64(vblocks
, sbi
->segs_per_sec
);
227 u
= (vblocks
* 100) >> sbi
->log_blocks_per_seg
;
229 /* Handle if the system time is changed by user */
230 if (mtime
< sit_i
->min_mtime
)
231 sit_i
->min_mtime
= mtime
;
232 if (mtime
> sit_i
->max_mtime
)
233 sit_i
->max_mtime
= mtime
;
234 if (sit_i
->max_mtime
!= sit_i
->min_mtime
)
235 age
= 100 - div64_u64(100 * (mtime
- sit_i
->min_mtime
),
236 sit_i
->max_mtime
- sit_i
->min_mtime
);
238 return UINT_MAX
- ((100 * (100 - u
) * age
) / (100 + u
));
241 static inline unsigned int get_gc_cost(struct f2fs_sb_info
*sbi
,
242 unsigned int segno
, struct victim_sel_policy
*p
)
244 if (p
->alloc_mode
== SSR
)
245 return get_seg_entry(sbi
, segno
)->ckpt_valid_blocks
;
247 /* alloc_mode == LFS */
248 if (p
->gc_mode
== GC_GREEDY
)
249 return get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
);
251 return get_cb_cost(sbi
, segno
);
255 * This function is called from two paths.
256 * One is garbage collection and the other is SSR segment selection.
257 * When it is called during GC, it just gets a victim segment
258 * and it does not remove it from dirty seglist.
259 * When it is called from SSR segment selection, it finds a segment
260 * which has minimum valid blocks and removes it from dirty seglist.
262 static int get_victim_by_default(struct f2fs_sb_info
*sbi
,
263 unsigned int *result
, int gc_type
, int type
, char alloc_mode
)
265 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
266 struct victim_sel_policy p
;
267 unsigned int secno
, max_cost
;
270 p
.alloc_mode
= alloc_mode
;
271 select_policy(sbi
, gc_type
, type
, &p
);
273 p
.min_segno
= NULL_SEGNO
;
274 p
.min_cost
= max_cost
= get_max_cost(sbi
, &p
);
276 mutex_lock(&dirty_i
->seglist_lock
);
278 if (p
.alloc_mode
== LFS
&& gc_type
== FG_GC
) {
279 p
.min_segno
= check_bg_victims(sbi
);
280 if (p
.min_segno
!= NULL_SEGNO
)
288 segno
= find_next_bit(p
.dirty_segmap
,
289 TOTAL_SEGS(sbi
), p
.offset
);
290 if (segno
>= TOTAL_SEGS(sbi
)) {
291 if (sbi
->last_victim
[p
.gc_mode
]) {
292 sbi
->last_victim
[p
.gc_mode
] = 0;
299 p
.offset
= segno
+ p
.ofs_unit
;
301 p
.offset
-= segno
% p
.ofs_unit
;
303 secno
= GET_SECNO(sbi
, segno
);
305 if (sec_usage_check(sbi
, secno
))
307 if (gc_type
== BG_GC
&& test_bit(secno
, dirty_i
->victim_secmap
))
310 cost
= get_gc_cost(sbi
, segno
, &p
);
312 if (p
.min_cost
> cost
) {
315 } else if (unlikely(cost
== max_cost
)) {
319 if (nsearched
++ >= p
.max_search
) {
320 sbi
->last_victim
[p
.gc_mode
] = segno
;
324 if (p
.min_segno
!= NULL_SEGNO
) {
326 if (p
.alloc_mode
== LFS
) {
327 secno
= GET_SECNO(sbi
, p
.min_segno
);
328 if (gc_type
== FG_GC
)
329 sbi
->cur_victim_sec
= secno
;
331 set_bit(secno
, dirty_i
->victim_secmap
);
333 *result
= (p
.min_segno
/ p
.ofs_unit
) * p
.ofs_unit
;
335 trace_f2fs_get_victim(sbi
->sb
, type
, gc_type
, &p
,
337 prefree_segments(sbi
), free_segments(sbi
));
339 mutex_unlock(&dirty_i
->seglist_lock
);
341 return (p
.min_segno
== NULL_SEGNO
) ? 0 : 1;
344 static const struct victim_selection default_v_ops
= {
345 .get_victim
= get_victim_by_default
,
348 static struct inode
*find_gc_inode(nid_t ino
, struct list_head
*ilist
)
350 struct inode_entry
*ie
;
352 list_for_each_entry(ie
, ilist
, list
)
353 if (ie
->inode
->i_ino
== ino
)
358 static void add_gc_inode(struct inode
*inode
, struct list_head
*ilist
)
360 struct inode_entry
*new_ie
;
362 if (inode
== find_gc_inode(inode
->i_ino
, ilist
)) {
367 new_ie
= f2fs_kmem_cache_alloc(winode_slab
, GFP_NOFS
);
368 new_ie
->inode
= inode
;
369 list_add_tail(&new_ie
->list
, ilist
);
372 static void put_gc_inode(struct list_head
*ilist
)
374 struct inode_entry
*ie
, *next_ie
;
375 list_for_each_entry_safe(ie
, next_ie
, ilist
, list
) {
378 kmem_cache_free(winode_slab
, ie
);
382 static int check_valid_map(struct f2fs_sb_info
*sbi
,
383 unsigned int segno
, int offset
)
385 struct sit_info
*sit_i
= SIT_I(sbi
);
386 struct seg_entry
*sentry
;
389 mutex_lock(&sit_i
->sentry_lock
);
390 sentry
= get_seg_entry(sbi
, segno
);
391 ret
= f2fs_test_bit(offset
, sentry
->cur_valid_map
);
392 mutex_unlock(&sit_i
->sentry_lock
);
397 * This function compares node address got in summary with that in NAT.
398 * On validity, copy that node with cold status, otherwise (invalid node)
401 static void gc_node_segment(struct f2fs_sb_info
*sbi
,
402 struct f2fs_summary
*sum
, unsigned int segno
, int gc_type
)
405 struct f2fs_summary
*entry
;
411 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
412 nid_t nid
= le32_to_cpu(entry
->nid
);
413 struct page
*node_page
;
415 /* stop BG_GC if there is not enough free sections. */
416 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0))
419 if (check_valid_map(sbi
, segno
, off
) == 0)
423 ra_node_page(sbi
, nid
);
426 node_page
= get_node_page(sbi
, nid
);
427 if (IS_ERR(node_page
))
430 /* set page dirty and write it */
431 if (gc_type
== FG_GC
) {
432 f2fs_wait_on_page_writeback(node_page
, NODE
, true);
433 set_page_dirty(node_page
);
435 if (!PageWriteback(node_page
))
436 set_page_dirty(node_page
);
438 f2fs_put_page(node_page
, 1);
439 stat_inc_node_blk_count(sbi
, 1);
447 if (gc_type
== FG_GC
) {
448 struct writeback_control wbc
= {
449 .sync_mode
= WB_SYNC_ALL
,
450 .nr_to_write
= LONG_MAX
,
453 sync_node_pages(sbi
, 0, &wbc
);
456 * In the case of FG_GC, it'd be better to reclaim this victim
459 if (get_valid_blocks(sbi
, segno
, 1) != 0)
465 * Calculate start block index indicating the given node offset.
466 * Be careful, caller should give this node offset only indicating direct node
467 * blocks. If any node offsets, which point the other types of node blocks such
468 * as indirect or double indirect node blocks, are given, it must be a caller's
471 block_t
start_bidx_of_node(unsigned int node_ofs
, struct f2fs_inode_info
*fi
)
473 unsigned int indirect_blks
= 2 * NIDS_PER_BLOCK
+ 4;
481 } else if (node_ofs
<= indirect_blks
) {
482 int dec
= (node_ofs
- 4) / (NIDS_PER_BLOCK
+ 1);
483 bidx
= node_ofs
- 2 - dec
;
485 int dec
= (node_ofs
- indirect_blks
- 3) / (NIDS_PER_BLOCK
+ 1);
486 bidx
= node_ofs
- 5 - dec
;
488 return bidx
* ADDRS_PER_BLOCK
+ ADDRS_PER_INODE(fi
);
491 static int check_dnode(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
492 struct node_info
*dni
, block_t blkaddr
, unsigned int *nofs
)
494 struct page
*node_page
;
496 unsigned int ofs_in_node
;
497 block_t source_blkaddr
;
499 nid
= le32_to_cpu(sum
->nid
);
500 ofs_in_node
= le16_to_cpu(sum
->ofs_in_node
);
502 node_page
= get_node_page(sbi
, nid
);
503 if (IS_ERR(node_page
))
506 get_node_info(sbi
, nid
, dni
);
508 if (sum
->version
!= dni
->version
) {
509 f2fs_put_page(node_page
, 1);
513 *nofs
= ofs_of_node(node_page
);
514 source_blkaddr
= datablock_addr(node_page
, ofs_in_node
);
515 f2fs_put_page(node_page
, 1);
517 if (source_blkaddr
!= blkaddr
)
522 static void move_data_page(struct inode
*inode
, struct page
*page
, int gc_type
)
524 if (gc_type
== BG_GC
) {
525 if (PageWriteback(page
))
527 set_page_dirty(page
);
530 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
532 f2fs_wait_on_page_writeback(page
, DATA
, true);
534 if (clear_page_dirty_for_io(page
) &&
535 S_ISDIR(inode
->i_mode
)) {
536 dec_page_count(sbi
, F2FS_DIRTY_DENTS
);
537 inode_dec_dirty_dents(inode
);
540 do_write_data_page(page
);
541 clear_cold_data(page
);
544 f2fs_put_page(page
, 1);
548 * This function tries to get parent node of victim data block, and identifies
549 * data block validity. If the block is valid, copy that with cold status and
550 * modify parent node.
551 * If the parent node is not valid or the data block address is different,
552 * the victim data block is ignored.
554 static void gc_data_segment(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
555 struct list_head
*ilist
, unsigned int segno
, int gc_type
)
557 struct super_block
*sb
= sbi
->sb
;
558 struct f2fs_summary
*entry
;
563 start_addr
= START_BLOCK(sbi
, segno
);
568 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
569 struct page
*data_page
;
571 struct node_info dni
; /* dnode info for the data */
572 unsigned int ofs_in_node
, nofs
;
575 /* stop BG_GC if there is not enough free sections. */
576 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0))
579 if (check_valid_map(sbi
, segno
, off
) == 0)
583 ra_node_page(sbi
, le32_to_cpu(entry
->nid
));
587 /* Get an inode by ino with checking validity */
588 if (check_dnode(sbi
, entry
, &dni
, start_addr
+ off
, &nofs
) == 0)
592 ra_node_page(sbi
, dni
.ino
);
596 ofs_in_node
= le16_to_cpu(entry
->ofs_in_node
);
599 inode
= f2fs_iget(sb
, dni
.ino
);
603 start_bidx
= start_bidx_of_node(nofs
, F2FS_I(inode
));
605 data_page
= find_data_page(inode
,
606 start_bidx
+ ofs_in_node
, false);
607 if (IS_ERR(data_page
))
610 f2fs_put_page(data_page
, 0);
611 add_gc_inode(inode
, ilist
);
613 inode
= find_gc_inode(dni
.ino
, ilist
);
615 start_bidx
= start_bidx_of_node(nofs
,
617 data_page
= get_lock_data_page(inode
,
618 start_bidx
+ ofs_in_node
);
619 if (IS_ERR(data_page
))
621 move_data_page(inode
, data_page
, gc_type
);
622 stat_inc_data_blk_count(sbi
, 1);
633 if (gc_type
== FG_GC
) {
634 f2fs_submit_bio(sbi
, DATA
, true);
637 * In the case of FG_GC, it'd be better to reclaim this victim
640 if (get_valid_blocks(sbi
, segno
, 1) != 0) {
647 static int __get_victim(struct f2fs_sb_info
*sbi
, unsigned int *victim
,
648 int gc_type
, int type
)
650 struct sit_info
*sit_i
= SIT_I(sbi
);
652 mutex_lock(&sit_i
->sentry_lock
);
653 ret
= DIRTY_I(sbi
)->v_ops
->get_victim(sbi
, victim
, gc_type
, type
, LFS
);
654 mutex_unlock(&sit_i
->sentry_lock
);
658 static void do_garbage_collect(struct f2fs_sb_info
*sbi
, unsigned int segno
,
659 struct list_head
*ilist
, int gc_type
)
661 struct page
*sum_page
;
662 struct f2fs_summary_block
*sum
;
663 struct blk_plug plug
;
665 /* read segment summary of victim */
666 sum_page
= get_sum_page(sbi
, segno
);
667 if (IS_ERR(sum_page
))
670 blk_start_plug(&plug
);
672 sum
= page_address(sum_page
);
674 switch (GET_SUM_TYPE((&sum
->footer
))) {
676 gc_node_segment(sbi
, sum
->entries
, segno
, gc_type
);
679 gc_data_segment(sbi
, sum
->entries
, ilist
, segno
, gc_type
);
682 blk_finish_plug(&plug
);
684 stat_inc_seg_count(sbi
, GET_SUM_TYPE((&sum
->footer
)));
685 stat_inc_call_count(sbi
->stat_info
);
687 f2fs_put_page(sum_page
, 1);
690 int f2fs_gc(struct f2fs_sb_info
*sbi
)
692 struct list_head ilist
;
693 unsigned int segno
, i
;
698 INIT_LIST_HEAD(&ilist
);
700 if (!(sbi
->sb
->s_flags
& MS_ACTIVE
))
703 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, nfree
)) {
705 write_checkpoint(sbi
, false);
708 if (!__get_victim(sbi
, &segno
, gc_type
, NO_CHECK_TYPE
))
712 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
713 do_garbage_collect(sbi
, segno
+ i
, &ilist
, gc_type
);
715 if (gc_type
== FG_GC
) {
716 sbi
->cur_victim_sec
= NULL_SEGNO
;
718 WARN_ON(get_valid_blocks(sbi
, segno
, sbi
->segs_per_sec
));
721 if (has_not_enough_free_secs(sbi
, nfree
))
724 if (gc_type
== FG_GC
)
725 write_checkpoint(sbi
, false);
727 mutex_unlock(&sbi
->gc_mutex
);
729 put_gc_inode(&ilist
);
733 void build_gc_manager(struct f2fs_sb_info
*sbi
)
735 DIRTY_I(sbi
)->v_ops
= &default_v_ops
;
738 int __init
create_gc_caches(void)
740 winode_slab
= f2fs_kmem_cache_create("f2fs_gc_inodes",
741 sizeof(struct inode_entry
), NULL
);
747 void destroy_gc_caches(void)
749 kmem_cache_destroy(winode_slab
);