Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / fs / f2fs / segment.h
1 /*
2 * fs/f2fs/segment.h
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/blkdev.h>
12
13 /* constant macro */
14 #define NULL_SEGNO ((unsigned int)(~0))
15 #define NULL_SECNO ((unsigned int)(~0))
16
17 #define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
18
19 /* L: Logical segment # in volume, R: Relative segment # in main area */
20 #define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
21 #define GET_R2L_SEGNO(free_i, segno) (segno + free_i->start_segno)
22
23 #define IS_DATASEG(t) (t <= CURSEG_COLD_DATA)
24 #define IS_NODESEG(t) (t >= CURSEG_HOT_NODE)
25
26 #define IS_CURSEG(sbi, seg) \
27 ((seg == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno) || \
28 (seg == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno) || \
29 (seg == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno) || \
30 (seg == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno) || \
31 (seg == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno) || \
32 (seg == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno))
33
34 #define IS_CURSEC(sbi, secno) \
35 ((secno == CURSEG_I(sbi, CURSEG_HOT_DATA)->segno / \
36 sbi->segs_per_sec) || \
37 (secno == CURSEG_I(sbi, CURSEG_WARM_DATA)->segno / \
38 sbi->segs_per_sec) || \
39 (secno == CURSEG_I(sbi, CURSEG_COLD_DATA)->segno / \
40 sbi->segs_per_sec) || \
41 (secno == CURSEG_I(sbi, CURSEG_HOT_NODE)->segno / \
42 sbi->segs_per_sec) || \
43 (secno == CURSEG_I(sbi, CURSEG_WARM_NODE)->segno / \
44 sbi->segs_per_sec) || \
45 (secno == CURSEG_I(sbi, CURSEG_COLD_NODE)->segno / \
46 sbi->segs_per_sec)) \
47
48 #define START_BLOCK(sbi, segno) \
49 (SM_I(sbi)->seg0_blkaddr + \
50 (GET_R2L_SEGNO(FREE_I(sbi), segno) << sbi->log_blocks_per_seg))
51 #define NEXT_FREE_BLKADDR(sbi, curseg) \
52 (START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff)
53
54 #define MAIN_BASE_BLOCK(sbi) (SM_I(sbi)->main_blkaddr)
55
56 #define GET_SEGOFF_FROM_SEG0(sbi, blk_addr) \
57 ((blk_addr) - SM_I(sbi)->seg0_blkaddr)
58 #define GET_SEGNO_FROM_SEG0(sbi, blk_addr) \
59 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) >> sbi->log_blocks_per_seg)
60 #define GET_BLKOFF_FROM_SEG0(sbi, blk_addr) \
61 (GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & (sbi->blocks_per_seg - 1))
62
63 #define GET_SEGNO(sbi, blk_addr) \
64 (((blk_addr == NULL_ADDR) || (blk_addr == NEW_ADDR)) ? \
65 NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
66 GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
67 #define GET_SECNO(sbi, segno) \
68 ((segno) / sbi->segs_per_sec)
69 #define GET_ZONENO_FROM_SEGNO(sbi, segno) \
70 ((segno / sbi->segs_per_sec) / sbi->secs_per_zone)
71
72 #define GET_SUM_BLOCK(sbi, segno) \
73 ((sbi->sm_info->ssa_blkaddr) + segno)
74
75 #define GET_SUM_TYPE(footer) ((footer)->entry_type)
76 #define SET_SUM_TYPE(footer, type) ((footer)->entry_type = type)
77
78 #define SIT_ENTRY_OFFSET(sit_i, segno) \
79 (segno % sit_i->sents_per_block)
80 #define SIT_BLOCK_OFFSET(sit_i, segno) \
81 (segno / SIT_ENTRY_PER_BLOCK)
82 #define START_SEGNO(sit_i, segno) \
83 (SIT_BLOCK_OFFSET(sit_i, segno) * SIT_ENTRY_PER_BLOCK)
84 #define SIT_BLK_CNT(sbi) \
85 ((TOTAL_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK)
86 #define f2fs_bitmap_size(nr) \
87 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
88 #define TOTAL_SEGS(sbi) (SM_I(sbi)->main_segments)
89 #define TOTAL_SECS(sbi) (sbi->total_sections)
90
91 #define SECTOR_FROM_BLOCK(sbi, blk_addr) \
92 (((sector_t)blk_addr) << (sbi)->log_sectors_per_block)
93 #define SECTOR_TO_BLOCK(sbi, sectors) \
94 (sectors >> (sbi)->log_sectors_per_block)
95 #define MAX_BIO_BLOCKS(max_hw_blocks) \
96 (min((int)max_hw_blocks, BIO_MAX_PAGES))
97
98 /*
99 * indicate a block allocation direction: RIGHT and LEFT.
100 * RIGHT means allocating new sections towards the end of volume.
101 * LEFT means the opposite direction.
102 */
103 enum {
104 ALLOC_RIGHT = 0,
105 ALLOC_LEFT
106 };
107
108 /*
109 * In the victim_sel_policy->alloc_mode, there are two block allocation modes.
110 * LFS writes data sequentially with cleaning operations.
111 * SSR (Slack Space Recycle) reuses obsolete space without cleaning operations.
112 */
113 enum {
114 LFS = 0,
115 SSR
116 };
117
118 /*
119 * In the victim_sel_policy->gc_mode, there are two gc, aka cleaning, modes.
120 * GC_CB is based on cost-benefit algorithm.
121 * GC_GREEDY is based on greedy algorithm.
122 */
123 enum {
124 GC_CB = 0,
125 GC_GREEDY
126 };
127
128 /*
129 * BG_GC means the background cleaning job.
130 * FG_GC means the on-demand cleaning job.
131 */
132 enum {
133 BG_GC = 0,
134 FG_GC
135 };
136
137 /* for a function parameter to select a victim segment */
138 struct victim_sel_policy {
139 int alloc_mode; /* LFS or SSR */
140 int gc_mode; /* GC_CB or GC_GREEDY */
141 unsigned long *dirty_segmap; /* dirty segment bitmap */
142 unsigned int max_search; /* maximum # of segments to search */
143 unsigned int offset; /* last scanned bitmap offset */
144 unsigned int ofs_unit; /* bitmap search unit */
145 unsigned int min_cost; /* minimum cost */
146 unsigned int min_segno; /* segment # having min. cost */
147 };
148
149 struct seg_entry {
150 unsigned short valid_blocks; /* # of valid blocks */
151 unsigned char *cur_valid_map; /* validity bitmap of blocks */
152 /*
153 * # of valid blocks and the validity bitmap stored in the the last
154 * checkpoint pack. This information is used by the SSR mode.
155 */
156 unsigned short ckpt_valid_blocks;
157 unsigned char *ckpt_valid_map;
158 unsigned char type; /* segment type like CURSEG_XXX_TYPE */
159 unsigned long long mtime; /* modification time of the segment */
160 };
161
162 struct sec_entry {
163 unsigned int valid_blocks; /* # of valid blocks in a section */
164 };
165
166 struct segment_allocation {
167 void (*allocate_segment)(struct f2fs_sb_info *, int, bool);
168 };
169
170 struct sit_info {
171 const struct segment_allocation *s_ops;
172
173 block_t sit_base_addr; /* start block address of SIT area */
174 block_t sit_blocks; /* # of blocks used by SIT area */
175 block_t written_valid_blocks; /* # of valid blocks in main area */
176 char *sit_bitmap; /* SIT bitmap pointer */
177 unsigned int bitmap_size; /* SIT bitmap size */
178
179 unsigned long *dirty_sentries_bitmap; /* bitmap for dirty sentries */
180 unsigned int dirty_sentries; /* # of dirty sentries */
181 unsigned int sents_per_block; /* # of SIT entries per block */
182 struct mutex sentry_lock; /* to protect SIT cache */
183 struct seg_entry *sentries; /* SIT segment-level cache */
184 struct sec_entry *sec_entries; /* SIT section-level cache */
185
186 /* for cost-benefit algorithm in cleaning procedure */
187 unsigned long long elapsed_time; /* elapsed time after mount */
188 unsigned long long mounted_time; /* mount time */
189 unsigned long long min_mtime; /* min. modification time */
190 unsigned long long max_mtime; /* max. modification time */
191 };
192
193 struct free_segmap_info {
194 unsigned int start_segno; /* start segment number logically */
195 unsigned int free_segments; /* # of free segments */
196 unsigned int free_sections; /* # of free sections */
197 rwlock_t segmap_lock; /* free segmap lock */
198 unsigned long *free_segmap; /* free segment bitmap */
199 unsigned long *free_secmap; /* free section bitmap */
200 };
201
202 /* Notice: The order of dirty type is same with CURSEG_XXX in f2fs.h */
203 enum dirty_type {
204 DIRTY_HOT_DATA, /* dirty segments assigned as hot data logs */
205 DIRTY_WARM_DATA, /* dirty segments assigned as warm data logs */
206 DIRTY_COLD_DATA, /* dirty segments assigned as cold data logs */
207 DIRTY_HOT_NODE, /* dirty segments assigned as hot node logs */
208 DIRTY_WARM_NODE, /* dirty segments assigned as warm node logs */
209 DIRTY_COLD_NODE, /* dirty segments assigned as cold node logs */
210 DIRTY, /* to count # of dirty segments */
211 PRE, /* to count # of entirely obsolete segments */
212 NR_DIRTY_TYPE
213 };
214
215 struct dirty_seglist_info {
216 const struct victim_selection *v_ops; /* victim selction operation */
217 unsigned long *dirty_segmap[NR_DIRTY_TYPE];
218 struct mutex seglist_lock; /* lock for segment bitmaps */
219 int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */
220 unsigned long *victim_secmap; /* background GC victims */
221 };
222
223 /* victim selection function for cleaning and SSR */
224 struct victim_selection {
225 int (*get_victim)(struct f2fs_sb_info *, unsigned int *,
226 int, int, char);
227 };
228
229 /* for active log information */
230 struct curseg_info {
231 struct mutex curseg_mutex; /* lock for consistency */
232 struct f2fs_summary_block *sum_blk; /* cached summary block */
233 unsigned char alloc_type; /* current allocation type */
234 unsigned int segno; /* current segment number */
235 unsigned short next_blkoff; /* next block offset to write */
236 unsigned int zone; /* current zone number */
237 unsigned int next_segno; /* preallocated segment */
238 };
239
240 /*
241 * inline functions
242 */
243 static inline struct curseg_info *CURSEG_I(struct f2fs_sb_info *sbi, int type)
244 {
245 return (struct curseg_info *)(SM_I(sbi)->curseg_array + type);
246 }
247
248 static inline struct seg_entry *get_seg_entry(struct f2fs_sb_info *sbi,
249 unsigned int segno)
250 {
251 struct sit_info *sit_i = SIT_I(sbi);
252 return &sit_i->sentries[segno];
253 }
254
255 static inline struct sec_entry *get_sec_entry(struct f2fs_sb_info *sbi,
256 unsigned int segno)
257 {
258 struct sit_info *sit_i = SIT_I(sbi);
259 return &sit_i->sec_entries[GET_SECNO(sbi, segno)];
260 }
261
262 static inline unsigned int get_valid_blocks(struct f2fs_sb_info *sbi,
263 unsigned int segno, int section)
264 {
265 /*
266 * In order to get # of valid blocks in a section instantly from many
267 * segments, f2fs manages two counting structures separately.
268 */
269 if (section > 1)
270 return get_sec_entry(sbi, segno)->valid_blocks;
271 else
272 return get_seg_entry(sbi, segno)->valid_blocks;
273 }
274
275 static inline void seg_info_from_raw_sit(struct seg_entry *se,
276 struct f2fs_sit_entry *rs)
277 {
278 se->valid_blocks = GET_SIT_VBLOCKS(rs);
279 se->ckpt_valid_blocks = GET_SIT_VBLOCKS(rs);
280 memcpy(se->cur_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
281 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
282 se->type = GET_SIT_TYPE(rs);
283 se->mtime = le64_to_cpu(rs->mtime);
284 }
285
286 static inline void seg_info_to_raw_sit(struct seg_entry *se,
287 struct f2fs_sit_entry *rs)
288 {
289 unsigned short raw_vblocks = (se->type << SIT_VBLOCKS_SHIFT) |
290 se->valid_blocks;
291 rs->vblocks = cpu_to_le16(raw_vblocks);
292 memcpy(rs->valid_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
293 memcpy(se->ckpt_valid_map, rs->valid_map, SIT_VBLOCK_MAP_SIZE);
294 se->ckpt_valid_blocks = se->valid_blocks;
295 rs->mtime = cpu_to_le64(se->mtime);
296 }
297
298 static inline unsigned int find_next_inuse(struct free_segmap_info *free_i,
299 unsigned int max, unsigned int segno)
300 {
301 unsigned int ret;
302 read_lock(&free_i->segmap_lock);
303 ret = find_next_bit(free_i->free_segmap, max, segno);
304 read_unlock(&free_i->segmap_lock);
305 return ret;
306 }
307
308 static inline void __set_free(struct f2fs_sb_info *sbi, unsigned int segno)
309 {
310 struct free_segmap_info *free_i = FREE_I(sbi);
311 unsigned int secno = segno / sbi->segs_per_sec;
312 unsigned int start_segno = secno * sbi->segs_per_sec;
313 unsigned int next;
314
315 write_lock(&free_i->segmap_lock);
316 clear_bit(segno, free_i->free_segmap);
317 free_i->free_segments++;
318
319 next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi), start_segno);
320 if (next >= start_segno + sbi->segs_per_sec) {
321 clear_bit(secno, free_i->free_secmap);
322 free_i->free_sections++;
323 }
324 write_unlock(&free_i->segmap_lock);
325 }
326
327 static inline void __set_inuse(struct f2fs_sb_info *sbi,
328 unsigned int segno)
329 {
330 struct free_segmap_info *free_i = FREE_I(sbi);
331 unsigned int secno = segno / sbi->segs_per_sec;
332 set_bit(segno, free_i->free_segmap);
333 free_i->free_segments--;
334 if (!test_and_set_bit(secno, free_i->free_secmap))
335 free_i->free_sections--;
336 }
337
338 static inline void __set_test_and_free(struct f2fs_sb_info *sbi,
339 unsigned int segno)
340 {
341 struct free_segmap_info *free_i = FREE_I(sbi);
342 unsigned int secno = segno / sbi->segs_per_sec;
343 unsigned int start_segno = secno * sbi->segs_per_sec;
344 unsigned int next;
345
346 write_lock(&free_i->segmap_lock);
347 if (test_and_clear_bit(segno, free_i->free_segmap)) {
348 free_i->free_segments++;
349
350 next = find_next_bit(free_i->free_segmap, TOTAL_SEGS(sbi),
351 start_segno);
352 if (next >= start_segno + sbi->segs_per_sec) {
353 if (test_and_clear_bit(secno, free_i->free_secmap))
354 free_i->free_sections++;
355 }
356 }
357 write_unlock(&free_i->segmap_lock);
358 }
359
360 static inline void __set_test_and_inuse(struct f2fs_sb_info *sbi,
361 unsigned int segno)
362 {
363 struct free_segmap_info *free_i = FREE_I(sbi);
364 unsigned int secno = segno / sbi->segs_per_sec;
365 write_lock(&free_i->segmap_lock);
366 if (!test_and_set_bit(segno, free_i->free_segmap)) {
367 free_i->free_segments--;
368 if (!test_and_set_bit(secno, free_i->free_secmap))
369 free_i->free_sections--;
370 }
371 write_unlock(&free_i->segmap_lock);
372 }
373
374 static inline void get_sit_bitmap(struct f2fs_sb_info *sbi,
375 void *dst_addr)
376 {
377 struct sit_info *sit_i = SIT_I(sbi);
378 memcpy(dst_addr, sit_i->sit_bitmap, sit_i->bitmap_size);
379 }
380
381 static inline block_t written_block_count(struct f2fs_sb_info *sbi)
382 {
383 return SIT_I(sbi)->written_valid_blocks;
384 }
385
386 static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
387 {
388 return FREE_I(sbi)->free_segments;
389 }
390
391 static inline int reserved_segments(struct f2fs_sb_info *sbi)
392 {
393 return SM_I(sbi)->reserved_segments;
394 }
395
396 static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
397 {
398 return FREE_I(sbi)->free_sections;
399 }
400
401 static inline unsigned int prefree_segments(struct f2fs_sb_info *sbi)
402 {
403 return DIRTY_I(sbi)->nr_dirty[PRE];
404 }
405
406 static inline unsigned int dirty_segments(struct f2fs_sb_info *sbi)
407 {
408 return DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_DATA] +
409 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_DATA] +
410 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_DATA] +
411 DIRTY_I(sbi)->nr_dirty[DIRTY_HOT_NODE] +
412 DIRTY_I(sbi)->nr_dirty[DIRTY_WARM_NODE] +
413 DIRTY_I(sbi)->nr_dirty[DIRTY_COLD_NODE];
414 }
415
416 static inline int overprovision_segments(struct f2fs_sb_info *sbi)
417 {
418 return SM_I(sbi)->ovp_segments;
419 }
420
421 static inline int overprovision_sections(struct f2fs_sb_info *sbi)
422 {
423 return ((unsigned int) overprovision_segments(sbi)) / sbi->segs_per_sec;
424 }
425
426 static inline int reserved_sections(struct f2fs_sb_info *sbi)
427 {
428 return ((unsigned int) reserved_segments(sbi)) / sbi->segs_per_sec;
429 }
430
431 static inline bool need_SSR(struct f2fs_sb_info *sbi)
432 {
433 return (prefree_segments(sbi) / sbi->segs_per_sec)
434 + free_sections(sbi) < overprovision_sections(sbi);
435 }
436
437 static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
438 {
439 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
440 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
441
442 if (unlikely(sbi->por_doing))
443 return false;
444
445 return (free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +
446 reserved_sections(sbi));
447 }
448
449 static inline bool excess_prefree_segs(struct f2fs_sb_info *sbi)
450 {
451 return prefree_segments(sbi) > SM_I(sbi)->rec_prefree_segments;
452 }
453
454 static inline int utilization(struct f2fs_sb_info *sbi)
455 {
456 return div_u64((u64)valid_user_blocks(sbi) * 100,
457 sbi->user_block_count);
458 }
459
460 /*
461 * Sometimes f2fs may be better to drop out-of-place update policy.
462 * And, users can control the policy through sysfs entries.
463 * There are five policies with triggering conditions as follows.
464 * F2FS_IPU_FORCE - all the time,
465 * F2FS_IPU_SSR - if SSR mode is activated,
466 * F2FS_IPU_UTIL - if FS utilization is over threashold,
467 * F2FS_IPU_SSR_UTIL - if SSR mode is activated and FS utilization is over
468 * threashold,
469 * F2FS_IPUT_DISABLE - disable IPU. (=default option)
470 */
471 #define DEF_MIN_IPU_UTIL 70
472
473 enum {
474 F2FS_IPU_FORCE,
475 F2FS_IPU_SSR,
476 F2FS_IPU_UTIL,
477 F2FS_IPU_SSR_UTIL,
478 F2FS_IPU_DISABLE,
479 };
480
481 static inline bool need_inplace_update(struct inode *inode)
482 {
483 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
484
485 /* IPU can be done only for the user data */
486 if (S_ISDIR(inode->i_mode))
487 return false;
488
489 switch (SM_I(sbi)->ipu_policy) {
490 case F2FS_IPU_FORCE:
491 return true;
492 case F2FS_IPU_SSR:
493 if (need_SSR(sbi))
494 return true;
495 break;
496 case F2FS_IPU_UTIL:
497 if (utilization(sbi) > SM_I(sbi)->min_ipu_util)
498 return true;
499 break;
500 case F2FS_IPU_SSR_UTIL:
501 if (need_SSR(sbi) && utilization(sbi) > SM_I(sbi)->min_ipu_util)
502 return true;
503 break;
504 case F2FS_IPU_DISABLE:
505 break;
506 }
507 return false;
508 }
509
510 static inline unsigned int curseg_segno(struct f2fs_sb_info *sbi,
511 int type)
512 {
513 struct curseg_info *curseg = CURSEG_I(sbi, type);
514 return curseg->segno;
515 }
516
517 static inline unsigned char curseg_alloc_type(struct f2fs_sb_info *sbi,
518 int type)
519 {
520 struct curseg_info *curseg = CURSEG_I(sbi, type);
521 return curseg->alloc_type;
522 }
523
524 static inline unsigned short curseg_blkoff(struct f2fs_sb_info *sbi, int type)
525 {
526 struct curseg_info *curseg = CURSEG_I(sbi, type);
527 return curseg->next_blkoff;
528 }
529
530 #ifdef CONFIG_F2FS_CHECK_FS
531 static inline void check_seg_range(struct f2fs_sb_info *sbi, unsigned int segno)
532 {
533 unsigned int end_segno = SM_I(sbi)->segment_count - 1;
534 BUG_ON(segno > end_segno);
535 }
536
537 static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
538 {
539 struct f2fs_sm_info *sm_info = SM_I(sbi);
540 block_t total_blks = sm_info->segment_count << sbi->log_blocks_per_seg;
541 block_t start_addr = sm_info->seg0_blkaddr;
542 block_t end_addr = start_addr + total_blks - 1;
543 BUG_ON(blk_addr < start_addr);
544 BUG_ON(blk_addr > end_addr);
545 }
546
547 /*
548 * Summary block is always treated as invalid block
549 */
550 static inline void check_block_count(struct f2fs_sb_info *sbi,
551 int segno, struct f2fs_sit_entry *raw_sit)
552 {
553 struct f2fs_sm_info *sm_info = SM_I(sbi);
554 unsigned int end_segno = sm_info->segment_count - 1;
555 bool is_valid = test_bit_le(0, raw_sit->valid_map) ? true : false;
556 int valid_blocks = 0;
557 int cur_pos = 0, next_pos;
558
559 /* check segment usage */
560 BUG_ON(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg);
561
562 /* check boundary of a given segment number */
563 BUG_ON(segno > end_segno);
564
565 /* check bitmap with valid block count */
566 do {
567 if (is_valid) {
568 next_pos = find_next_zero_bit_le(&raw_sit->valid_map,
569 sbi->blocks_per_seg,
570 cur_pos);
571 valid_blocks += next_pos - cur_pos;
572 } else
573 next_pos = find_next_bit_le(&raw_sit->valid_map,
574 sbi->blocks_per_seg,
575 cur_pos);
576 cur_pos = next_pos;
577 is_valid = !is_valid;
578 } while (cur_pos < sbi->blocks_per_seg);
579 BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
580 }
581 #else
582 #define check_seg_range(sbi, segno)
583 #define verify_block_addr(sbi, blk_addr)
584 #define check_block_count(sbi, segno, raw_sit)
585 #endif
586
587 static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
588 unsigned int start)
589 {
590 struct sit_info *sit_i = SIT_I(sbi);
591 unsigned int offset = SIT_BLOCK_OFFSET(sit_i, start);
592 block_t blk_addr = sit_i->sit_base_addr + offset;
593
594 check_seg_range(sbi, start);
595
596 /* calculate sit block address */
597 if (f2fs_test_bit(offset, sit_i->sit_bitmap))
598 blk_addr += sit_i->sit_blocks;
599
600 return blk_addr;
601 }
602
603 static inline pgoff_t next_sit_addr(struct f2fs_sb_info *sbi,
604 pgoff_t block_addr)
605 {
606 struct sit_info *sit_i = SIT_I(sbi);
607 block_addr -= sit_i->sit_base_addr;
608 if (block_addr < sit_i->sit_blocks)
609 block_addr += sit_i->sit_blocks;
610 else
611 block_addr -= sit_i->sit_blocks;
612
613 return block_addr + sit_i->sit_base_addr;
614 }
615
616 static inline void set_to_next_sit(struct sit_info *sit_i, unsigned int start)
617 {
618 unsigned int block_off = SIT_BLOCK_OFFSET(sit_i, start);
619
620 if (f2fs_test_bit(block_off, sit_i->sit_bitmap))
621 f2fs_clear_bit(block_off, sit_i->sit_bitmap);
622 else
623 f2fs_set_bit(block_off, sit_i->sit_bitmap);
624 }
625
626 static inline unsigned long long get_mtime(struct f2fs_sb_info *sbi)
627 {
628 struct sit_info *sit_i = SIT_I(sbi);
629 return sit_i->elapsed_time + CURRENT_TIME_SEC.tv_sec -
630 sit_i->mounted_time;
631 }
632
633 static inline void set_summary(struct f2fs_summary *sum, nid_t nid,
634 unsigned int ofs_in_node, unsigned char version)
635 {
636 sum->nid = cpu_to_le32(nid);
637 sum->ofs_in_node = cpu_to_le16(ofs_in_node);
638 sum->version = version;
639 }
640
641 static inline block_t start_sum_block(struct f2fs_sb_info *sbi)
642 {
643 return __start_cp_addr(sbi) +
644 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_start_sum);
645 }
646
647 static inline block_t sum_blk_addr(struct f2fs_sb_info *sbi, int base, int type)
648 {
649 return __start_cp_addr(sbi) +
650 le32_to_cpu(F2FS_CKPT(sbi)->cp_pack_total_block_count)
651 - (base + 1) + type;
652 }
653
654 static inline bool sec_usage_check(struct f2fs_sb_info *sbi, unsigned int secno)
655 {
656 if (IS_CURSEC(sbi, secno) || (sbi->cur_victim_sec == secno))
657 return true;
658 return false;
659 }
660
661 static inline unsigned int max_hw_blocks(struct f2fs_sb_info *sbi)
662 {
663 struct block_device *bdev = sbi->sb->s_bdev;
664 struct request_queue *q = bdev_get_queue(bdev);
665 return SECTOR_TO_BLOCK(sbi, queue_max_sectors(q));
666 }
667
668 /*
669 * It is very important to gather dirty pages and write at once, so that we can
670 * submit a big bio without interfering other data writes.
671 * By default, 512 pages for directory data,
672 * 512 pages (2MB) * 3 for three types of nodes, and
673 * max_bio_blocks for meta are set.
674 */
675 static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
676 {
677 if (type == DATA)
678 return sbi->blocks_per_seg;
679 else if (type == NODE)
680 return 3 * sbi->blocks_per_seg;
681 else if (type == META)
682 return MAX_BIO_BLOCKS(max_hw_blocks(sbi));
683 else
684 return 0;
685 }
686
687 /*
688 * When writing pages, it'd better align nr_to_write for segment size.
689 */
690 static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
691 struct writeback_control *wbc)
692 {
693 long nr_to_write, desired;
694
695 if (wbc->sync_mode != WB_SYNC_NONE)
696 return 0;
697
698 nr_to_write = wbc->nr_to_write;
699
700 if (type == DATA)
701 desired = 4096;
702 else if (type == NODE)
703 desired = 3 * max_hw_blocks(sbi);
704 else
705 desired = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
706
707 wbc->nr_to_write = desired;
708 return desired - nr_to_write;
709 }
This page took 0.054398 seconds and 5 git commands to generate.