f2fs: use extent_cache by default
[deliverable/linux.git] / fs / f2fs / shrinker.c
CommitLineData
2658e50d
JK
1/*
2 * f2fs shrinker support
3 * the basic infra was copied from fs/ubifs/shrinker.c
4 *
5 * Copyright (c) 2015 Motorola Mobility
6 * Copyright (c) 2015 Jaegeuk Kim <jaegeuk@kernel.org>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/fs.h>
13#include <linux/f2fs_fs.h>
14
15#include "f2fs.h"
16
17static LIST_HEAD(f2fs_list);
18static DEFINE_SPINLOCK(f2fs_list_lock);
19static unsigned int shrinker_run_no;
20
1b38dc8e
JK
21static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
22{
23 return NM_I(sbi)->nat_cnt - NM_I(sbi)->dirty_nat_cnt;
24}
25
554df79e
JK
26static unsigned long __count_extent_cache(struct f2fs_sb_info *sbi)
27{
28 return sbi->total_ext_tree + atomic_read(&sbi->total_ext_node);
29}
30
2658e50d
JK
31unsigned long f2fs_shrink_count(struct shrinker *shrink,
32 struct shrink_control *sc)
33{
34 struct f2fs_sb_info *sbi;
35 struct list_head *p;
36 unsigned long count = 0;
37
38 spin_lock(&f2fs_list_lock);
39 p = f2fs_list.next;
40 while (p != &f2fs_list) {
41 sbi = list_entry(p, struct f2fs_sb_info, s_list);
42
43 /* stop f2fs_put_super */
44 if (!mutex_trylock(&sbi->umount_mutex)) {
45 p = p->next;
46 continue;
47 }
48 spin_unlock(&f2fs_list_lock);
49
554df79e
JK
50 /* count extent cache entries */
51 count += __count_extent_cache(sbi);
52
1b38dc8e
JK
53 /* shrink clean nat cache entries */
54 count += __count_nat_entries(sbi);
2658e50d
JK
55
56 spin_lock(&f2fs_list_lock);
57 p = p->next;
58 mutex_unlock(&sbi->umount_mutex);
59 }
60 spin_unlock(&f2fs_list_lock);
61 return count;
62}
63
64unsigned long f2fs_shrink_scan(struct shrinker *shrink,
65 struct shrink_control *sc)
66{
67 unsigned long nr = sc->nr_to_scan;
68 struct f2fs_sb_info *sbi;
69 struct list_head *p;
70 unsigned int run_no;
71 unsigned long freed = 0;
72
73 spin_lock(&f2fs_list_lock);
74 do {
75 run_no = ++shrinker_run_no;
76 } while (run_no == 0);
77 p = f2fs_list.next;
78 while (p != &f2fs_list) {
79 sbi = list_entry(p, struct f2fs_sb_info, s_list);
80
81 if (sbi->shrinker_run_no == run_no)
82 break;
83
84 /* stop f2fs_put_super */
85 if (!mutex_trylock(&sbi->umount_mutex)) {
86 p = p->next;
87 continue;
88 }
89 spin_unlock(&f2fs_list_lock);
90
91 sbi->shrinker_run_no = run_no;
92
554df79e
JK
93 /* shrink extent cache entries */
94 freed += f2fs_shrink_extent_tree(sbi, nr >> 1);
95
1b38dc8e 96 /* shrink clean nat cache entries */
554df79e
JK
97 if (freed < nr)
98 freed += try_to_free_nats(sbi, nr - freed);
2658e50d
JK
99
100 spin_lock(&f2fs_list_lock);
101 p = p->next;
102 list_move_tail(&sbi->s_list, &f2fs_list);
103 mutex_unlock(&sbi->umount_mutex);
104 if (freed >= nr)
105 break;
106 }
107 spin_unlock(&f2fs_list_lock);
108 return freed;
109}
110
111void f2fs_join_shrinker(struct f2fs_sb_info *sbi)
112{
113 spin_lock(&f2fs_list_lock);
114 list_add_tail(&sbi->s_list, &f2fs_list);
115 spin_unlock(&f2fs_list_lock);
116}
117
118void f2fs_leave_shrinker(struct f2fs_sb_info *sbi)
119{
3e72f721
JK
120 f2fs_shrink_extent_tree(sbi, __count_extent_cache(sbi));
121
2658e50d
JK
122 spin_lock(&f2fs_list_lock);
123 list_del(&sbi->s_list);
124 spin_unlock(&f2fs_list_lock);
125}
This page took 0.12502 seconds and 5 git commands to generate.