Commit | Line | Data |
---|---|---|
8f6e39a7 MC |
1 | /* |
2 | * fs/ext4/mballoc.h | |
3 | * | |
4 | * Written by: Alex Tomas <alex@clusterfs.com> | |
5 | * | |
6 | */ | |
7 | #ifndef _EXT4_MBALLOC_H | |
8 | #define _EXT4_MBALLOC_H | |
9 | ||
10 | #include <linux/time.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/namei.h> | |
13 | #include <linux/quotaops.h> | |
14 | #include <linux/buffer_head.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/swap.h> | |
17 | #include <linux/proc_fs.h> | |
18 | #include <linux/pagemap.h> | |
19 | #include <linux/seq_file.h> | |
20 | #include <linux/version.h> | |
8a0aba73 TT |
21 | #include <linux/blkdev.h> |
22 | #include <linux/marker.h> | |
920313a7 | 23 | #include <linux/mutex.h> |
8f6e39a7 MC |
24 | #include "ext4_jbd2.h" |
25 | #include "ext4.h" | |
26 | #include "group.h" | |
27 | ||
28 | /* | |
29 | * with AGGRESSIVE_CHECK allocator runs consistency checks over | |
30 | * structures. these checks slow things down a lot | |
31 | */ | |
32 | #define AGGRESSIVE_CHECK__ | |
33 | ||
34 | /* | |
35 | * with DOUBLE_CHECK defined mballoc creates persistent in-core | |
36 | * bitmaps, maintains and uses them to check for double allocations | |
37 | */ | |
38 | #define DOUBLE_CHECK__ | |
39 | ||
40 | /* | |
41 | */ | |
42 | #define MB_DEBUG__ | |
43 | #ifdef MB_DEBUG | |
44 | #define mb_debug(fmt, a...) printk(fmt, ##a) | |
45 | #else | |
46 | #define mb_debug(fmt, a...) | |
47 | #endif | |
48 | ||
49 | /* | |
50 | * with EXT4_MB_HISTORY mballoc stores last N allocations in memory | |
51 | * and you can monitor it in /proc/fs/ext4/<dev>/mb_history | |
52 | */ | |
53 | #define EXT4_MB_HISTORY | |
54 | #define EXT4_MB_HISTORY_ALLOC 1 /* allocation */ | |
55 | #define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */ | |
56 | #define EXT4_MB_HISTORY_DISCARD 4 /* preallocation discarded */ | |
57 | #define EXT4_MB_HISTORY_FREE 8 /* free */ | |
58 | ||
59 | #define EXT4_MB_HISTORY_DEFAULT (EXT4_MB_HISTORY_ALLOC | \ | |
60 | EXT4_MB_HISTORY_PREALLOC) | |
61 | ||
62 | /* | |
63 | * How long mballoc can look for a best extent (in found extents) | |
64 | */ | |
65 | #define MB_DEFAULT_MAX_TO_SCAN 200 | |
66 | ||
67 | /* | |
68 | * How long mballoc must look for a best extent | |
69 | */ | |
70 | #define MB_DEFAULT_MIN_TO_SCAN 10 | |
71 | ||
72 | /* | |
73 | * How many groups mballoc will scan looking for the best chunk | |
74 | */ | |
75 | #define MB_DEFAULT_MAX_GROUPS_TO_SCAN 5 | |
76 | ||
77 | /* | |
78 | * with 'ext4_mb_stats' allocator will collect stats that will be | |
79 | * shown at umount. The collecting costs though! | |
80 | */ | |
81 | #define MB_DEFAULT_STATS 1 | |
82 | ||
83 | /* | |
84 | * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served | |
85 | * by the stream allocator, which purpose is to pack requests | |
86 | * as close each to other as possible to produce smooth I/O traffic | |
87 | * We use locality group prealloc space for stream request. | |
88 | * We can tune the same via /proc/fs/ext4/<parition>/stream_req | |
89 | */ | |
90 | #define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */ | |
91 | ||
92 | /* | |
93 | * for which requests use 2^N search using buddies | |
94 | */ | |
95 | #define MB_DEFAULT_ORDER2_REQS 2 | |
96 | ||
97 | /* | |
98 | * default group prealloc size 512 blocks | |
99 | */ | |
100 | #define MB_DEFAULT_GROUP_PREALLOC 512 | |
101 | ||
102 | static struct kmem_cache *ext4_pspace_cachep; | |
103 | static struct kmem_cache *ext4_ac_cachep; | |
c894058d | 104 | static struct kmem_cache *ext4_free_ext_cachep; |
8f6e39a7 | 105 | |
c894058d AK |
106 | struct ext4_free_data { |
107 | /* this links the free block information from group_info */ | |
108 | struct rb_node node; | |
8f6e39a7 | 109 | |
c894058d | 110 | /* this links the free block information from ext4_sb_info */ |
8f6e39a7 | 111 | struct list_head list; |
c894058d AK |
112 | |
113 | /* group which free block extent belongs */ | |
114 | ext4_group_t group; | |
115 | ||
116 | /* free block extent */ | |
117 | ext4_grpblk_t start_blk; | |
118 | ext4_grpblk_t count; | |
119 | ||
120 | /* transaction which freed this extent */ | |
121 | tid_t t_tid; | |
8f6e39a7 MC |
122 | }; |
123 | ||
124 | struct ext4_group_info { | |
125 | unsigned long bb_state; | |
c894058d | 126 | struct rb_root bb_free_root; |
8f6e39a7 MC |
127 | unsigned short bb_first_free; |
128 | unsigned short bb_free; | |
129 | unsigned short bb_fragments; | |
130 | struct list_head bb_prealloc_list; | |
131 | #ifdef DOUBLE_CHECK | |
132 | void *bb_bitmap; | |
133 | #endif | |
920313a7 | 134 | struct rw_semaphore alloc_sem; |
8f6e39a7 MC |
135 | unsigned short bb_counters[]; |
136 | }; | |
137 | ||
138 | #define EXT4_GROUP_INFO_NEED_INIT_BIT 0 | |
139 | #define EXT4_GROUP_INFO_LOCKED_BIT 1 | |
140 | ||
141 | #define EXT4_MB_GRP_NEED_INIT(grp) \ | |
142 | (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state))) | |
143 | ||
144 | ||
145 | struct ext4_prealloc_space { | |
146 | struct list_head pa_inode_list; | |
147 | struct list_head pa_group_list; | |
148 | union { | |
149 | struct list_head pa_tmp_list; | |
150 | struct rcu_head pa_rcu; | |
151 | } u; | |
152 | spinlock_t pa_lock; | |
153 | atomic_t pa_count; | |
154 | unsigned pa_deleted; | |
155 | ext4_fsblk_t pa_pstart; /* phys. block */ | |
156 | ext4_lblk_t pa_lstart; /* log. block */ | |
157 | unsigned short pa_len; /* len of preallocated chunk */ | |
158 | unsigned short pa_free; /* how many blocks are free */ | |
159 | unsigned short pa_linear; /* consumed in one direction | |
160 | * strictly, for grp prealloc */ | |
161 | spinlock_t *pa_obj_lock; | |
162 | struct inode *pa_inode; /* hack, for history only */ | |
163 | }; | |
164 | ||
165 | ||
166 | struct ext4_free_extent { | |
167 | ext4_lblk_t fe_logical; | |
168 | ext4_grpblk_t fe_start; | |
169 | ext4_group_t fe_group; | |
170 | int fe_len; | |
171 | }; | |
172 | ||
173 | /* | |
174 | * Locality group: | |
175 | * we try to group all related changes together | |
176 | * so that writeback can flush/allocate them together as well | |
6be2ded1 AK |
177 | * Size of lg_prealloc_list hash is determined by MB_DEFAULT_GROUP_PREALLOC |
178 | * (512). We store prealloc space into the hash based on the pa_free blocks | |
179 | * order value.ie, fls(pa_free)-1; | |
8f6e39a7 | 180 | */ |
6be2ded1 | 181 | #define PREALLOC_TB_SIZE 10 |
8f6e39a7 MC |
182 | struct ext4_locality_group { |
183 | /* for allocator */ | |
6be2ded1 AK |
184 | /* to serialize allocates */ |
185 | struct mutex lg_mutex; | |
186 | /* list of preallocations */ | |
187 | struct list_head lg_prealloc_list[PREALLOC_TB_SIZE]; | |
8f6e39a7 MC |
188 | spinlock_t lg_prealloc_lock; |
189 | }; | |
190 | ||
191 | struct ext4_allocation_context { | |
192 | struct inode *ac_inode; | |
193 | struct super_block *ac_sb; | |
194 | ||
195 | /* original request */ | |
196 | struct ext4_free_extent ac_o_ex; | |
197 | ||
198 | /* goal request (after normalization) */ | |
199 | struct ext4_free_extent ac_g_ex; | |
200 | ||
201 | /* the best found extent */ | |
202 | struct ext4_free_extent ac_b_ex; | |
203 | ||
204 | /* copy of the bext found extent taken before preallocation efforts */ | |
205 | struct ext4_free_extent ac_f_ex; | |
206 | ||
207 | /* number of iterations done. we have to track to limit searching */ | |
208 | unsigned long ac_ex_scanned; | |
209 | __u16 ac_groups_scanned; | |
210 | __u16 ac_found; | |
211 | __u16 ac_tail; | |
212 | __u16 ac_buddy; | |
213 | __u16 ac_flags; /* allocation hints */ | |
214 | __u8 ac_status; | |
215 | __u8 ac_criteria; | |
216 | __u8 ac_repeats; | |
217 | __u8 ac_2order; /* if request is to allocate 2^N blocks and | |
218 | * N > 0, the field stores N, otherwise 0 */ | |
219 | __u8 ac_op; /* operation, for history only */ | |
220 | struct page *ac_bitmap_page; | |
221 | struct page *ac_buddy_page; | |
222 | struct ext4_prealloc_space *ac_pa; | |
223 | struct ext4_locality_group *ac_lg; | |
224 | }; | |
225 | ||
226 | #define AC_STATUS_CONTINUE 1 | |
227 | #define AC_STATUS_FOUND 2 | |
228 | #define AC_STATUS_BREAK 3 | |
229 | ||
230 | struct ext4_mb_history { | |
231 | struct ext4_free_extent orig; /* orig allocation */ | |
232 | struct ext4_free_extent goal; /* goal allocation */ | |
233 | struct ext4_free_extent result; /* result allocation */ | |
234 | unsigned pid; | |
235 | unsigned ino; | |
236 | __u16 found; /* how many extents have been found */ | |
237 | __u16 groups; /* how many groups have been scanned */ | |
238 | __u16 tail; /* what tail broke some buddy */ | |
239 | __u16 buddy; /* buddy the tail ^^^ broke */ | |
240 | __u16 flags; | |
241 | __u8 cr:3; /* which phase the result extent was found at */ | |
242 | __u8 op:4; | |
243 | __u8 merged:1; | |
244 | }; | |
245 | ||
246 | struct ext4_buddy { | |
247 | struct page *bd_buddy_page; | |
248 | void *bd_buddy; | |
249 | struct page *bd_bitmap_page; | |
250 | void *bd_bitmap; | |
251 | struct ext4_group_info *bd_info; | |
252 | struct super_block *bd_sb; | |
253 | __u16 bd_blkbits; | |
254 | ext4_group_t bd_group; | |
920313a7 | 255 | struct rw_semaphore *alloc_semp; |
8f6e39a7 MC |
256 | }; |
257 | #define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) | |
258 | #define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) | |
259 | ||
260 | #ifndef EXT4_MB_HISTORY | |
261 | static inline void ext4_mb_store_history(struct ext4_allocation_context *ac) | |
262 | { | |
263 | return; | |
264 | } | |
265 | #else | |
266 | static void ext4_mb_store_history(struct ext4_allocation_context *ac); | |
267 | #endif | |
268 | ||
269 | #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1) | |
270 | ||
8f6e39a7 MC |
271 | struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t); |
272 | ||
273 | static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, | |
274 | ext4_group_t group); | |
8f6e39a7 MC |
275 | static void ext4_mb_return_to_preallocation(struct inode *inode, |
276 | struct ext4_buddy *e4b, sector_t block, | |
277 | int count); | |
278 | static void ext4_mb_put_pa(struct ext4_allocation_context *, | |
279 | struct super_block *, struct ext4_prealloc_space *pa); | |
280 | static int ext4_mb_init_per_dev_proc(struct super_block *sb); | |
281 | static int ext4_mb_destroy_per_dev_proc(struct super_block *sb); | |
3e624fc7 | 282 | static void release_blocks_on_commit(journal_t *journal, transaction_t *txn); |
8f6e39a7 MC |
283 | |
284 | ||
285 | static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group) | |
286 | { | |
287 | struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); | |
288 | ||
289 | bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state)); | |
290 | } | |
291 | ||
292 | static inline void ext4_unlock_group(struct super_block *sb, | |
293 | ext4_group_t group) | |
294 | { | |
295 | struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); | |
296 | ||
297 | bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state)); | |
298 | } | |
299 | ||
300 | static inline int ext4_is_group_locked(struct super_block *sb, | |
301 | ext4_group_t group) | |
302 | { | |
303 | struct ext4_group_info *grinfo = ext4_get_group_info(sb, group); | |
304 | ||
305 | return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT, | |
306 | &(grinfo->bb_state)); | |
307 | } | |
308 | ||
309 | static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb, | |
310 | struct ext4_free_extent *fex) | |
311 | { | |
312 | ext4_fsblk_t block; | |
313 | ||
314 | block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb) | |
315 | + fex->fe_start | |
316 | + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | |
317 | return block; | |
318 | } | |
319 | #endif |