4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
14 #include <linux/types.h>
15 #include <linux/page-flags.h>
16 #include <linux/buffer_head.h>
17 #include <linux/slab.h>
18 #include <linux/crc32.h>
19 #include <linux/magic.h>
20 #include <linux/kobject.h>
25 #define F2FS_MOUNT_BG_GC 0x00000001
26 #define F2FS_MOUNT_DISABLE_ROLL_FORWARD 0x00000002
27 #define F2FS_MOUNT_DISCARD 0x00000004
28 #define F2FS_MOUNT_NOHEAP 0x00000008
29 #define F2FS_MOUNT_XATTR_USER 0x00000010
30 #define F2FS_MOUNT_POSIX_ACL 0x00000020
31 #define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
33 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
34 #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
35 #define test_opt(sbi, option) (sbi->mount_opt.opt & F2FS_MOUNT_##option)
37 #define ver_after(a, b) (typecheck(unsigned long long, a) && \
38 typecheck(unsigned long long, b) && \
39 ((long long)((a) - (b)) > 0))
41 typedef u32 block_t
; /*
42 * should not change u32, since it is the on-disk block
43 * address format, __le32.
47 struct f2fs_mount_info
{
51 #define CRCPOLY_LE 0xedb88320
53 static inline __u32
f2fs_crc32(void *buf
, size_t len
)
55 unsigned char *p
= (unsigned char *)buf
;
56 __u32 crc
= F2FS_SUPER_MAGIC
;
61 for (i
= 0; i
< 8; i
++)
62 crc
= (crc
>> 1) ^ ((crc
& 1) ? CRCPOLY_LE
: 0);
67 static inline bool f2fs_crc_valid(__u32 blk_crc
, void *buf
, size_t buf_size
)
69 return f2fs_crc32(buf
, buf_size
) == blk_crc
;
73 * For checkpoint manager
80 /* for the list of orphan inodes */
81 struct orphan_inode_entry
{
82 struct list_head list
; /* list head */
83 nid_t ino
; /* inode number */
86 /* for the list of directory inodes */
87 struct dir_inode_entry
{
88 struct list_head list
; /* list head */
89 struct inode
*inode
; /* vfs inode pointer */
92 /* for the list of fsync inodes, used only during recovery */
93 struct fsync_inode_entry
{
94 struct list_head list
; /* list head */
95 struct inode
*inode
; /* vfs inode pointer */
96 block_t blkaddr
; /* block address locating the last inode */
99 #define nats_in_cursum(sum) (le16_to_cpu(sum->n_nats))
100 #define sits_in_cursum(sum) (le16_to_cpu(sum->n_sits))
102 #define nat_in_journal(sum, i) (sum->nat_j.entries[i].ne)
103 #define nid_in_journal(sum, i) (sum->nat_j.entries[i].nid)
104 #define sit_in_journal(sum, i) (sum->sit_j.entries[i].se)
105 #define segno_in_journal(sum, i) (sum->sit_j.entries[i].segno)
107 static inline int update_nats_in_cursum(struct f2fs_summary_block
*rs
, int i
)
109 int before
= nats_in_cursum(rs
);
110 rs
->n_nats
= cpu_to_le16(before
+ i
);
114 static inline int update_sits_in_cursum(struct f2fs_summary_block
*rs
, int i
)
116 int before
= sits_in_cursum(rs
);
117 rs
->n_sits
= cpu_to_le16(before
+ i
);
124 #define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS
125 #define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS
127 #if defined(__KERNEL__) && defined(CONFIG_COMPAT)
129 * ioctl commands in 32 bit emulation
131 #define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS
132 #define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS
136 * For INODE and NODE manager
139 * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
140 * as its node offset to distinguish from index node blocks.
141 * But some bits are used to mark the node block.
143 #define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
146 ALLOC_NODE
, /* allocate a new node page if needed */
147 LOOKUP_NODE
, /* look up a node without readahead */
149 * look up a node with readahead called
150 * by get_datablock_ro.
154 #define F2FS_LINK_MAX 32000 /* maximum link count per file */
156 /* for in-memory extent cache entry */
158 rwlock_t ext_lock
; /* rwlock for consistency */
159 unsigned int fofs
; /* start offset in a file */
160 u32 blk_addr
; /* start block address of the extent */
161 unsigned int len
; /* length of the extent */
165 * i_advise uses FADVISE_XXX_BIT. We can add additional hints later.
167 #define FADVISE_COLD_BIT 0x01
168 #define FADVISE_LOST_PINO_BIT 0x02
170 struct f2fs_inode_info
{
171 struct inode vfs_inode
; /* serve a vfs inode */
172 unsigned long i_flags
; /* keep an inode flags for ioctl */
173 unsigned char i_advise
; /* use to give file attribute hints */
174 unsigned int i_current_depth
; /* use only in directory structure */
175 unsigned int i_pino
; /* parent inode number */
176 umode_t i_acl_mode
; /* keep file acl mode temporarily */
178 /* Use below internally in f2fs*/
179 unsigned long flags
; /* use to pass per-file flags */
180 atomic_t dirty_dents
; /* # of dirty dentry pages */
181 f2fs_hash_t chash
; /* hash value of given file name */
182 unsigned int clevel
; /* maximum level of given file name */
183 nid_t i_xattr_nid
; /* node id that contains xattrs */
184 struct extent_info ext
; /* in-memory extent cache entry */
187 static inline void get_extent_info(struct extent_info
*ext
,
188 struct f2fs_extent i_ext
)
190 write_lock(&ext
->ext_lock
);
191 ext
->fofs
= le32_to_cpu(i_ext
.fofs
);
192 ext
->blk_addr
= le32_to_cpu(i_ext
.blk_addr
);
193 ext
->len
= le32_to_cpu(i_ext
.len
);
194 write_unlock(&ext
->ext_lock
);
197 static inline void set_raw_extent(struct extent_info
*ext
,
198 struct f2fs_extent
*i_ext
)
200 read_lock(&ext
->ext_lock
);
201 i_ext
->fofs
= cpu_to_le32(ext
->fofs
);
202 i_ext
->blk_addr
= cpu_to_le32(ext
->blk_addr
);
203 i_ext
->len
= cpu_to_le32(ext
->len
);
204 read_unlock(&ext
->ext_lock
);
207 struct f2fs_nm_info
{
208 block_t nat_blkaddr
; /* base disk address of NAT */
209 nid_t max_nid
; /* maximum possible node ids */
210 nid_t next_scan_nid
; /* the next nid to be scanned */
212 /* NAT cache management */
213 struct radix_tree_root nat_root
;/* root of the nat entry cache */
214 rwlock_t nat_tree_lock
; /* protect nat_tree_lock */
215 unsigned int nat_cnt
; /* the # of cached nat entries */
216 struct list_head nat_entries
; /* cached nat entry list (clean) */
217 struct list_head dirty_nat_entries
; /* cached nat entry list (dirty) */
219 /* free node ids management */
220 struct list_head free_nid_list
; /* a list for free nids */
221 spinlock_t free_nid_list_lock
; /* protect free nid list */
222 unsigned int fcnt
; /* the number of free node id */
223 struct mutex build_lock
; /* lock for build free nids */
226 char *nat_bitmap
; /* NAT bitmap pointer */
227 int bitmap_size
; /* bitmap size */
231 * this structure is used as one of function parameters.
232 * all the information are dedicated to a given direct node block determined
233 * by the data offset in a file.
235 struct dnode_of_data
{
236 struct inode
*inode
; /* vfs inode pointer */
237 struct page
*inode_page
; /* its inode page, NULL is possible */
238 struct page
*node_page
; /* cached direct node page */
239 nid_t nid
; /* node id of the direct node block */
240 unsigned int ofs_in_node
; /* data offset in the node page */
241 bool inode_page_locked
; /* inode page is locked or not */
242 block_t data_blkaddr
; /* block address of the node block */
245 static inline void set_new_dnode(struct dnode_of_data
*dn
, struct inode
*inode
,
246 struct page
*ipage
, struct page
*npage
, nid_t nid
)
248 memset(dn
, 0, sizeof(*dn
));
250 dn
->inode_page
= ipage
;
251 dn
->node_page
= npage
;
258 * By default, there are 6 active log areas across the whole main area.
259 * When considering hot and cold data separation to reduce cleaning overhead,
260 * we split 3 for data logs and 3 for node logs as hot, warm, and cold types,
262 * In the current design, you should not change the numbers intentionally.
263 * Instead, as a mount option such as active_logs=x, you can use 2, 4, and 6
264 * logs individually according to the underlying devices. (default: 6)
265 * Just in case, on-disk layout covers maximum 16 logs that consist of 8 for
266 * data and 8 for node logs.
268 #define NR_CURSEG_DATA_TYPE (3)
269 #define NR_CURSEG_NODE_TYPE (3)
270 #define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
273 CURSEG_HOT_DATA
= 0, /* directory entry blocks */
274 CURSEG_WARM_DATA
, /* data blocks */
275 CURSEG_COLD_DATA
, /* multimedia or GCed data blocks */
276 CURSEG_HOT_NODE
, /* direct node blocks of directory files */
277 CURSEG_WARM_NODE
, /* direct node blocks of normal files */
278 CURSEG_COLD_NODE
, /* indirect node blocks */
282 struct f2fs_sm_info
{
283 struct sit_info
*sit_info
; /* whole segment information */
284 struct free_segmap_info
*free_info
; /* free segment information */
285 struct dirty_seglist_info
*dirty_info
; /* dirty segment information */
286 struct curseg_info
*curseg_array
; /* active segment information */
288 struct list_head wblist_head
; /* list of under-writeback pages */
289 spinlock_t wblist_lock
; /* lock for checkpoint */
291 block_t seg0_blkaddr
; /* block address of 0'th segment */
292 block_t main_blkaddr
; /* start block address of main area */
293 block_t ssa_blkaddr
; /* start block address of SSA area */
295 unsigned int segment_count
; /* total # of segments */
296 unsigned int main_segments
; /* # of segments in main area */
297 unsigned int reserved_segments
; /* # of reserved segments */
298 unsigned int ovp_segments
; /* # of overprovision segments */
302 * For directory operation
304 #define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1)
305 #define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2)
306 #define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3)
307 #define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4)
308 #define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5)
314 * COUNT_TYPE for monitoring
316 * f2fs monitors the number of several block types such as on-writeback,
317 * dirty dentry blocks, dirty node blocks, and dirty meta blocks.
328 * Uses as sbi->fs_lock[NR_GLOBAL_LOCKS].
329 * The checkpoint procedure blocks all the locks in this fs_lock array.
330 * Some FS operations grab free locks, and if there is no free lock,
331 * then wait to grab a lock in a round-robin manner.
333 #define NR_GLOBAL_LOCKS 8
336 * The below are the page types of bios used in submti_bio().
337 * The available types are:
338 * DATA User data pages. It operates as async mode.
339 * NODE Node pages. It operates as async mode.
340 * META FS metadata pages such as SIT, NAT, CP.
341 * NR_PAGE_TYPE The number of page types.
342 * META_FLUSH Make sure the previous pages are written
343 * with waiting the bio's completion
344 * ... Only can be used with META.
354 struct f2fs_sb_info
{
355 struct super_block
*sb
; /* pointer to VFS super block */
356 struct proc_dir_entry
*s_proc
; /* proc entry */
357 struct buffer_head
*raw_super_buf
; /* buffer head of raw sb */
358 struct f2fs_super_block
*raw_super
; /* raw super block pointer */
359 int s_dirty
; /* dirty flag for checkpoint */
361 /* for node-related operations */
362 struct f2fs_nm_info
*nm_info
; /* node manager */
363 struct inode
*node_inode
; /* cache node blocks */
365 /* for segment-related operations */
366 struct f2fs_sm_info
*sm_info
; /* segment manager */
367 struct bio
*bio
[NR_PAGE_TYPE
]; /* bios to merge */
368 sector_t last_block_in_bio
[NR_PAGE_TYPE
]; /* last block number */
369 struct rw_semaphore bio_sem
; /* IO semaphore */
372 struct f2fs_checkpoint
*ckpt
; /* raw checkpoint pointer */
373 struct inode
*meta_inode
; /* cache meta blocks */
374 struct mutex cp_mutex
; /* checkpoint procedure lock */
375 struct mutex fs_lock
[NR_GLOBAL_LOCKS
]; /* blocking FS operations */
376 struct mutex node_write
; /* locking node writes */
377 struct mutex writepages
; /* mutex for writepages() */
378 unsigned char next_lock_num
; /* round-robin global locks */
379 int por_doing
; /* recovery is doing or not */
380 int on_build_free_nids
; /* build_free_nids is doing */
382 /* for orphan inode management */
383 struct list_head orphan_inode_list
; /* orphan inode list */
384 struct mutex orphan_inode_mutex
; /* for orphan inode list */
385 unsigned int n_orphans
; /* # of orphan inodes */
387 /* for directory inode management */
388 struct list_head dir_inode_list
; /* dir inode list */
389 spinlock_t dir_inode_lock
; /* for dir inode list lock */
391 /* basic file system units */
392 unsigned int log_sectors_per_block
; /* log2 sectors per block */
393 unsigned int log_blocksize
; /* log2 block size */
394 unsigned int blocksize
; /* block size */
395 unsigned int root_ino_num
; /* root inode number*/
396 unsigned int node_ino_num
; /* node inode number*/
397 unsigned int meta_ino_num
; /* meta inode number*/
398 unsigned int log_blocks_per_seg
; /* log2 blocks per segment */
399 unsigned int blocks_per_seg
; /* blocks per segment */
400 unsigned int segs_per_sec
; /* segments per section */
401 unsigned int secs_per_zone
; /* sections per zone */
402 unsigned int total_sections
; /* total section count */
403 unsigned int total_node_count
; /* total node block count */
404 unsigned int total_valid_node_count
; /* valid node block count */
405 unsigned int total_valid_inode_count
; /* valid inode count */
406 int active_logs
; /* # of active logs */
408 block_t user_block_count
; /* # of user blocks */
409 block_t total_valid_block_count
; /* # of valid blocks */
410 block_t alloc_valid_block_count
; /* # of allocated blocks */
411 block_t last_valid_block_count
; /* for recovery */
412 u32 s_next_generation
; /* for NFS support */
413 atomic_t nr_pages
[NR_COUNT_TYPE
]; /* # of pages, see count_type */
415 struct f2fs_mount_info mount_opt
; /* mount options */
417 /* for cleaning operations */
418 struct mutex gc_mutex
; /* mutex for GC */
419 struct f2fs_gc_kthread
*gc_thread
; /* GC thread */
420 unsigned int cur_victim_sec
; /* current victim section num */
423 * for stat information.
424 * one is for the LFS mode, and the other is for the SSR mode.
426 #ifdef CONFIG_F2FS_STAT_FS
427 struct f2fs_stat_info
*stat_info
; /* FS status information */
428 unsigned int segment_count
[2]; /* # of allocated segments */
429 unsigned int block_count
[2]; /* # of allocated blocks */
430 int total_hit_ext
, read_hit_ext
; /* extent cache hit ratio */
431 int bg_gc
; /* background gc calls */
432 unsigned int n_dirty_dirs
; /* # of dir inodes */
434 unsigned int last_victim
[2]; /* last victim segment # */
435 spinlock_t stat_lock
; /* lock for stat operations */
437 /* For sysfs suppport */
438 struct kobject s_kobj
;
439 struct completion s_kobj_unregister
;
445 static inline struct f2fs_inode_info
*F2FS_I(struct inode
*inode
)
447 return container_of(inode
, struct f2fs_inode_info
, vfs_inode
);
450 static inline struct f2fs_sb_info
*F2FS_SB(struct super_block
*sb
)
452 return sb
->s_fs_info
;
455 static inline struct f2fs_super_block
*F2FS_RAW_SUPER(struct f2fs_sb_info
*sbi
)
457 return (struct f2fs_super_block
*)(sbi
->raw_super
);
460 static inline struct f2fs_checkpoint
*F2FS_CKPT(struct f2fs_sb_info
*sbi
)
462 return (struct f2fs_checkpoint
*)(sbi
->ckpt
);
465 static inline struct f2fs_node
*F2FS_NODE(struct page
*page
)
467 return (struct f2fs_node
*)page_address(page
);
470 static inline struct f2fs_nm_info
*NM_I(struct f2fs_sb_info
*sbi
)
472 return (struct f2fs_nm_info
*)(sbi
->nm_info
);
475 static inline struct f2fs_sm_info
*SM_I(struct f2fs_sb_info
*sbi
)
477 return (struct f2fs_sm_info
*)(sbi
->sm_info
);
480 static inline struct sit_info
*SIT_I(struct f2fs_sb_info
*sbi
)
482 return (struct sit_info
*)(SM_I(sbi
)->sit_info
);
485 static inline struct free_segmap_info
*FREE_I(struct f2fs_sb_info
*sbi
)
487 return (struct free_segmap_info
*)(SM_I(sbi
)->free_info
);
490 static inline struct dirty_seglist_info
*DIRTY_I(struct f2fs_sb_info
*sbi
)
492 return (struct dirty_seglist_info
*)(SM_I(sbi
)->dirty_info
);
495 static inline void F2FS_SET_SB_DIRT(struct f2fs_sb_info
*sbi
)
500 static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info
*sbi
)
505 static inline bool is_set_ckpt_flags(struct f2fs_checkpoint
*cp
, unsigned int f
)
507 unsigned int ckpt_flags
= le32_to_cpu(cp
->ckpt_flags
);
508 return ckpt_flags
& f
;
511 static inline void set_ckpt_flags(struct f2fs_checkpoint
*cp
, unsigned int f
)
513 unsigned int ckpt_flags
= le32_to_cpu(cp
->ckpt_flags
);
515 cp
->ckpt_flags
= cpu_to_le32(ckpt_flags
);
518 static inline void clear_ckpt_flags(struct f2fs_checkpoint
*cp
, unsigned int f
)
520 unsigned int ckpt_flags
= le32_to_cpu(cp
->ckpt_flags
);
522 cp
->ckpt_flags
= cpu_to_le32(ckpt_flags
);
525 static inline void mutex_lock_all(struct f2fs_sb_info
*sbi
)
529 for (i
= 0; i
< NR_GLOBAL_LOCKS
; i
++) {
531 * This is the only time we take multiple fs_lock[]
532 * instances; the order is immaterial since we
533 * always hold cp_mutex, which serializes multiple
536 mutex_lock_nest_lock(&sbi
->fs_lock
[i
], &sbi
->cp_mutex
);
540 static inline void mutex_unlock_all(struct f2fs_sb_info
*sbi
)
543 for (; i
< NR_GLOBAL_LOCKS
; i
++)
544 mutex_unlock(&sbi
->fs_lock
[i
]);
547 static inline int mutex_lock_op(struct f2fs_sb_info
*sbi
)
549 unsigned char next_lock
= sbi
->next_lock_num
% NR_GLOBAL_LOCKS
;
552 for (; i
< NR_GLOBAL_LOCKS
; i
++)
553 if (mutex_trylock(&sbi
->fs_lock
[i
]))
556 mutex_lock(&sbi
->fs_lock
[next_lock
]);
557 sbi
->next_lock_num
++;
561 static inline void mutex_unlock_op(struct f2fs_sb_info
*sbi
, int ilock
)
565 BUG_ON(ilock
>= NR_GLOBAL_LOCKS
);
566 mutex_unlock(&sbi
->fs_lock
[ilock
]);
570 * Check whether the given nid is within node id range.
572 static inline int check_nid_range(struct f2fs_sb_info
*sbi
, nid_t nid
)
574 WARN_ON((nid
>= NM_I(sbi
)->max_nid
));
575 if (nid
>= NM_I(sbi
)->max_nid
)
580 #define F2FS_DEFAULT_ALLOCATED_BLOCKS 1
583 * Check whether the inode has blocks or not
585 static inline int F2FS_HAS_BLOCKS(struct inode
*inode
)
587 if (F2FS_I(inode
)->i_xattr_nid
)
588 return (inode
->i_blocks
> F2FS_DEFAULT_ALLOCATED_BLOCKS
+ 1);
590 return (inode
->i_blocks
> F2FS_DEFAULT_ALLOCATED_BLOCKS
);
593 static inline bool inc_valid_block_count(struct f2fs_sb_info
*sbi
,
594 struct inode
*inode
, blkcnt_t count
)
596 block_t valid_block_count
;
598 spin_lock(&sbi
->stat_lock
);
600 sbi
->total_valid_block_count
+ (block_t
)count
;
601 if (valid_block_count
> sbi
->user_block_count
) {
602 spin_unlock(&sbi
->stat_lock
);
605 inode
->i_blocks
+= count
;
606 sbi
->total_valid_block_count
= valid_block_count
;
607 sbi
->alloc_valid_block_count
+= (block_t
)count
;
608 spin_unlock(&sbi
->stat_lock
);
612 static inline int dec_valid_block_count(struct f2fs_sb_info
*sbi
,
616 spin_lock(&sbi
->stat_lock
);
617 BUG_ON(sbi
->total_valid_block_count
< (block_t
) count
);
618 BUG_ON(inode
->i_blocks
< count
);
619 inode
->i_blocks
-= count
;
620 sbi
->total_valid_block_count
-= (block_t
)count
;
621 spin_unlock(&sbi
->stat_lock
);
625 static inline void inc_page_count(struct f2fs_sb_info
*sbi
, int count_type
)
627 atomic_inc(&sbi
->nr_pages
[count_type
]);
628 F2FS_SET_SB_DIRT(sbi
);
631 static inline void inode_inc_dirty_dents(struct inode
*inode
)
633 atomic_inc(&F2FS_I(inode
)->dirty_dents
);
636 static inline void dec_page_count(struct f2fs_sb_info
*sbi
, int count_type
)
638 atomic_dec(&sbi
->nr_pages
[count_type
]);
641 static inline void inode_dec_dirty_dents(struct inode
*inode
)
643 atomic_dec(&F2FS_I(inode
)->dirty_dents
);
646 static inline int get_pages(struct f2fs_sb_info
*sbi
, int count_type
)
648 return atomic_read(&sbi
->nr_pages
[count_type
]);
651 static inline int get_blocktype_secs(struct f2fs_sb_info
*sbi
, int block_type
)
653 unsigned int pages_per_sec
= sbi
->segs_per_sec
*
654 (1 << sbi
->log_blocks_per_seg
);
655 return ((get_pages(sbi
, block_type
) + pages_per_sec
- 1)
656 >> sbi
->log_blocks_per_seg
) / sbi
->segs_per_sec
;
659 static inline block_t
valid_user_blocks(struct f2fs_sb_info
*sbi
)
662 spin_lock(&sbi
->stat_lock
);
663 ret
= sbi
->total_valid_block_count
;
664 spin_unlock(&sbi
->stat_lock
);
668 static inline unsigned long __bitmap_size(struct f2fs_sb_info
*sbi
, int flag
)
670 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
672 /* return NAT or SIT bitmap */
673 if (flag
== NAT_BITMAP
)
674 return le32_to_cpu(ckpt
->nat_ver_bitmap_bytesize
);
675 else if (flag
== SIT_BITMAP
)
676 return le32_to_cpu(ckpt
->sit_ver_bitmap_bytesize
);
681 static inline void *__bitmap_ptr(struct f2fs_sb_info
*sbi
, int flag
)
683 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
684 int offset
= (flag
== NAT_BITMAP
) ?
685 le32_to_cpu(ckpt
->sit_ver_bitmap_bytesize
) : 0;
686 return &ckpt
->sit_nat_version_bitmap
+ offset
;
689 static inline block_t
__start_cp_addr(struct f2fs_sb_info
*sbi
)
692 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
693 unsigned long long ckpt_version
= le64_to_cpu(ckpt
->checkpoint_ver
);
695 start_addr
= le32_to_cpu(F2FS_RAW_SUPER(sbi
)->cp_blkaddr
);
698 * odd numbered checkpoint should at cp segment 0
699 * and even segent must be at cp segment 1
701 if (!(ckpt_version
& 1))
702 start_addr
+= sbi
->blocks_per_seg
;
707 static inline block_t
__start_sum_addr(struct f2fs_sb_info
*sbi
)
709 return le32_to_cpu(F2FS_CKPT(sbi
)->cp_pack_start_sum
);
712 static inline bool inc_valid_node_count(struct f2fs_sb_info
*sbi
,
716 block_t valid_block_count
;
717 unsigned int valid_node_count
;
719 spin_lock(&sbi
->stat_lock
);
721 valid_block_count
= sbi
->total_valid_block_count
+ (block_t
)count
;
722 sbi
->alloc_valid_block_count
+= (block_t
)count
;
723 valid_node_count
= sbi
->total_valid_node_count
+ count
;
725 if (valid_block_count
> sbi
->user_block_count
) {
726 spin_unlock(&sbi
->stat_lock
);
730 if (valid_node_count
> sbi
->total_node_count
) {
731 spin_unlock(&sbi
->stat_lock
);
736 inode
->i_blocks
+= count
;
737 sbi
->total_valid_node_count
= valid_node_count
;
738 sbi
->total_valid_block_count
= valid_block_count
;
739 spin_unlock(&sbi
->stat_lock
);
744 static inline void dec_valid_node_count(struct f2fs_sb_info
*sbi
,
748 spin_lock(&sbi
->stat_lock
);
750 BUG_ON(sbi
->total_valid_block_count
< count
);
751 BUG_ON(sbi
->total_valid_node_count
< count
);
752 BUG_ON(inode
->i_blocks
< count
);
754 inode
->i_blocks
-= count
;
755 sbi
->total_valid_node_count
-= count
;
756 sbi
->total_valid_block_count
-= (block_t
)count
;
758 spin_unlock(&sbi
->stat_lock
);
761 static inline unsigned int valid_node_count(struct f2fs_sb_info
*sbi
)
764 spin_lock(&sbi
->stat_lock
);
765 ret
= sbi
->total_valid_node_count
;
766 spin_unlock(&sbi
->stat_lock
);
770 static inline void inc_valid_inode_count(struct f2fs_sb_info
*sbi
)
772 spin_lock(&sbi
->stat_lock
);
773 BUG_ON(sbi
->total_valid_inode_count
== sbi
->total_node_count
);
774 sbi
->total_valid_inode_count
++;
775 spin_unlock(&sbi
->stat_lock
);
778 static inline int dec_valid_inode_count(struct f2fs_sb_info
*sbi
)
780 spin_lock(&sbi
->stat_lock
);
781 BUG_ON(!sbi
->total_valid_inode_count
);
782 sbi
->total_valid_inode_count
--;
783 spin_unlock(&sbi
->stat_lock
);
787 static inline unsigned int valid_inode_count(struct f2fs_sb_info
*sbi
)
790 spin_lock(&sbi
->stat_lock
);
791 ret
= sbi
->total_valid_inode_count
;
792 spin_unlock(&sbi
->stat_lock
);
796 static inline void f2fs_put_page(struct page
*page
, int unlock
)
798 if (!page
|| IS_ERR(page
))
802 BUG_ON(!PageLocked(page
));
805 page_cache_release(page
);
808 static inline void f2fs_put_dnode(struct dnode_of_data
*dn
)
811 f2fs_put_page(dn
->node_page
, 1);
812 if (dn
->inode_page
&& dn
->node_page
!= dn
->inode_page
)
813 f2fs_put_page(dn
->inode_page
, 0);
814 dn
->node_page
= NULL
;
815 dn
->inode_page
= NULL
;
818 static inline struct kmem_cache
*f2fs_kmem_cache_create(const char *name
,
819 size_t size
, void (*ctor
)(void *))
821 return kmem_cache_create(name
, size
, 0, SLAB_RECLAIM_ACCOUNT
, ctor
);
824 #define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
826 static inline bool IS_INODE(struct page
*page
)
828 struct f2fs_node
*p
= F2FS_NODE(page
);
829 return RAW_IS_INODE(p
);
832 static inline __le32
*blkaddr_in_node(struct f2fs_node
*node
)
834 return RAW_IS_INODE(node
) ? node
->i
.i_addr
: node
->dn
.addr
;
837 static inline block_t
datablock_addr(struct page
*node_page
,
840 struct f2fs_node
*raw_node
;
842 raw_node
= F2FS_NODE(node_page
);
843 addr_array
= blkaddr_in_node(raw_node
);
844 return le32_to_cpu(addr_array
[offset
]);
847 static inline int f2fs_test_bit(unsigned int nr
, char *addr
)
852 mask
= 1 << (7 - (nr
& 0x07));
856 static inline int f2fs_set_bit(unsigned int nr
, char *addr
)
862 mask
= 1 << (7 - (nr
& 0x07));
868 static inline int f2fs_clear_bit(unsigned int nr
, char *addr
)
874 mask
= 1 << (7 - (nr
& 0x07));
880 /* used for f2fs_inode_info->flags */
882 FI_NEW_INODE
, /* indicate newly allocated inode */
883 FI_DIRTY_INODE
, /* indicate inode is dirty or not */
884 FI_INC_LINK
, /* need to increment i_nlink */
885 FI_ACL_MODE
, /* indicate acl mode */
886 FI_NO_ALLOC
, /* should not allocate any blocks */
887 FI_UPDATE_DIR
, /* should update inode block for consistency */
888 FI_DELAY_IPUT
, /* used for the recovery */
891 static inline void set_inode_flag(struct f2fs_inode_info
*fi
, int flag
)
893 set_bit(flag
, &fi
->flags
);
896 static inline int is_inode_flag_set(struct f2fs_inode_info
*fi
, int flag
)
898 return test_bit(flag
, &fi
->flags
);
901 static inline void clear_inode_flag(struct f2fs_inode_info
*fi
, int flag
)
903 clear_bit(flag
, &fi
->flags
);
906 static inline void set_acl_inode(struct f2fs_inode_info
*fi
, umode_t mode
)
908 fi
->i_acl_mode
= mode
;
909 set_inode_flag(fi
, FI_ACL_MODE
);
912 static inline int cond_clear_inode_flag(struct f2fs_inode_info
*fi
, int flag
)
914 if (is_inode_flag_set(fi
, FI_ACL_MODE
)) {
915 clear_inode_flag(fi
, FI_ACL_MODE
);
921 static inline int f2fs_readonly(struct super_block
*sb
)
923 return sb
->s_flags
& MS_RDONLY
;
929 int f2fs_sync_file(struct file
*, loff_t
, loff_t
, int);
930 void truncate_data_blocks(struct dnode_of_data
*);
931 void f2fs_truncate(struct inode
*);
932 int f2fs_getattr(struct vfsmount
*, struct dentry
*, struct kstat
*);
933 int f2fs_setattr(struct dentry
*, struct iattr
*);
934 int truncate_hole(struct inode
*, pgoff_t
, pgoff_t
);
935 int truncate_data_blocks_range(struct dnode_of_data
*, int);
936 long f2fs_ioctl(struct file
*, unsigned int, unsigned long);
937 long f2fs_compat_ioctl(struct file
*, unsigned int, unsigned long);
942 void f2fs_set_inode_flags(struct inode
*);
943 struct inode
*f2fs_iget(struct super_block
*, unsigned long);
944 void update_inode(struct inode
*, struct page
*);
945 int update_inode_page(struct inode
*);
946 int f2fs_write_inode(struct inode
*, struct writeback_control
*);
947 void f2fs_evict_inode(struct inode
*);
952 struct dentry
*f2fs_get_parent(struct dentry
*child
);
957 struct f2fs_dir_entry
*f2fs_find_entry(struct inode
*, struct qstr
*,
959 struct f2fs_dir_entry
*f2fs_parent_dir(struct inode
*, struct page
**);
960 ino_t
f2fs_inode_by_name(struct inode
*, struct qstr
*);
961 void f2fs_set_link(struct inode
*, struct f2fs_dir_entry
*,
962 struct page
*, struct inode
*);
963 int update_dent_inode(struct inode
*, const struct qstr
*);
964 int __f2fs_add_link(struct inode
*, const struct qstr
*, struct inode
*);
965 void f2fs_delete_entry(struct f2fs_dir_entry
*, struct page
*, struct inode
*);
966 int f2fs_make_empty(struct inode
*, struct inode
*);
967 bool f2fs_empty_dir(struct inode
*);
969 static inline int f2fs_add_link(struct dentry
*dentry
, struct inode
*inode
)
971 return __f2fs_add_link(dentry
->d_parent
->d_inode
, &dentry
->d_name
,
978 int f2fs_sync_fs(struct super_block
*, int);
979 extern __printf(3, 4)
980 void f2fs_msg(struct super_block
*, const char *, const char *, ...);
985 f2fs_hash_t
f2fs_dentry_hash(const char *, size_t);
990 struct dnode_of_data
;
993 int is_checkpointed_node(struct f2fs_sb_info
*, nid_t
);
994 void get_node_info(struct f2fs_sb_info
*, nid_t
, struct node_info
*);
995 int get_dnode_of_data(struct dnode_of_data
*, pgoff_t
, int);
996 int truncate_inode_blocks(struct inode
*, pgoff_t
);
997 int remove_inode_page(struct inode
*);
998 struct page
*new_inode_page(struct inode
*, const struct qstr
*);
999 struct page
*new_node_page(struct dnode_of_data
*, unsigned int, struct page
*);
1000 void ra_node_page(struct f2fs_sb_info
*, nid_t
);
1001 struct page
*get_node_page(struct f2fs_sb_info
*, pgoff_t
);
1002 struct page
*get_node_page_ra(struct page
*, int);
1003 void sync_inode_page(struct dnode_of_data
*);
1004 int sync_node_pages(struct f2fs_sb_info
*, nid_t
, struct writeback_control
*);
1005 bool alloc_nid(struct f2fs_sb_info
*, nid_t
*);
1006 void alloc_nid_done(struct f2fs_sb_info
*, nid_t
);
1007 void alloc_nid_failed(struct f2fs_sb_info
*, nid_t
);
1008 void recover_node_page(struct f2fs_sb_info
*, struct page
*,
1009 struct f2fs_summary
*, struct node_info
*, block_t
);
1010 int recover_inode_page(struct f2fs_sb_info
*, struct page
*);
1011 int restore_node_summary(struct f2fs_sb_info
*, unsigned int,
1012 struct f2fs_summary_block
*);
1013 void flush_nat_entries(struct f2fs_sb_info
*);
1014 int build_node_manager(struct f2fs_sb_info
*);
1015 void destroy_node_manager(struct f2fs_sb_info
*);
1016 int __init
create_node_manager_caches(void);
1017 void destroy_node_manager_caches(void);
1022 void f2fs_balance_fs(struct f2fs_sb_info
*);
1023 void invalidate_blocks(struct f2fs_sb_info
*, block_t
);
1024 void clear_prefree_segments(struct f2fs_sb_info
*);
1025 int npages_for_summary_flush(struct f2fs_sb_info
*);
1026 void allocate_new_segments(struct f2fs_sb_info
*);
1027 struct page
*get_sum_page(struct f2fs_sb_info
*, unsigned int);
1028 struct bio
*f2fs_bio_alloc(struct block_device
*, int);
1029 void f2fs_submit_bio(struct f2fs_sb_info
*, enum page_type
, bool);
1030 void f2fs_wait_on_page_writeback(struct page
*, enum page_type
, bool);
1031 void write_meta_page(struct f2fs_sb_info
*, struct page
*);
1032 void write_node_page(struct f2fs_sb_info
*, struct page
*, unsigned int,
1033 block_t
, block_t
*);
1034 void write_data_page(struct inode
*, struct page
*, struct dnode_of_data
*,
1035 block_t
, block_t
*);
1036 void rewrite_data_page(struct f2fs_sb_info
*, struct page
*, block_t
);
1037 void recover_data_page(struct f2fs_sb_info
*, struct page
*,
1038 struct f2fs_summary
*, block_t
, block_t
);
1039 void rewrite_node_page(struct f2fs_sb_info
*, struct page
*,
1040 struct f2fs_summary
*, block_t
, block_t
);
1041 void write_data_summaries(struct f2fs_sb_info
*, block_t
);
1042 void write_node_summaries(struct f2fs_sb_info
*, block_t
);
1043 int lookup_journal_in_cursum(struct f2fs_summary_block
*,
1044 int, unsigned int, int);
1045 void flush_sit_entries(struct f2fs_sb_info
*);
1046 int build_segment_manager(struct f2fs_sb_info
*);
1047 void destroy_segment_manager(struct f2fs_sb_info
*);
1052 struct page
*grab_meta_page(struct f2fs_sb_info
*, pgoff_t
);
1053 struct page
*get_meta_page(struct f2fs_sb_info
*, pgoff_t
);
1054 long sync_meta_pages(struct f2fs_sb_info
*, enum page_type
, long);
1055 int acquire_orphan_inode(struct f2fs_sb_info
*);
1056 void release_orphan_inode(struct f2fs_sb_info
*);
1057 void add_orphan_inode(struct f2fs_sb_info
*, nid_t
);
1058 void remove_orphan_inode(struct f2fs_sb_info
*, nid_t
);
1059 int recover_orphan_inodes(struct f2fs_sb_info
*);
1060 int get_valid_checkpoint(struct f2fs_sb_info
*);
1061 void set_dirty_dir_page(struct inode
*, struct page
*);
1062 void add_dirty_dir_inode(struct inode
*);
1063 void remove_dirty_dir_inode(struct inode
*);
1064 struct inode
*check_dirty_dir_inode(struct f2fs_sb_info
*, nid_t
);
1065 void sync_dirty_dir_inodes(struct f2fs_sb_info
*);
1066 void write_checkpoint(struct f2fs_sb_info
*, bool);
1067 void init_orphan_info(struct f2fs_sb_info
*);
1068 int __init
create_checkpoint_caches(void);
1069 void destroy_checkpoint_caches(void);
1074 int reserve_new_block(struct dnode_of_data
*);
1075 void update_extent_cache(block_t
, struct dnode_of_data
*);
1076 struct page
*find_data_page(struct inode
*, pgoff_t
, bool);
1077 struct page
*get_lock_data_page(struct inode
*, pgoff_t
);
1078 struct page
*get_new_data_page(struct inode
*, struct page
*, pgoff_t
, bool);
1079 int f2fs_readpage(struct f2fs_sb_info
*, struct page
*, block_t
, int);
1080 int do_write_data_page(struct page
*);
1085 int start_gc_thread(struct f2fs_sb_info
*);
1086 void stop_gc_thread(struct f2fs_sb_info
*);
1087 block_t
start_bidx_of_node(unsigned int);
1088 int f2fs_gc(struct f2fs_sb_info
*);
1089 void build_gc_manager(struct f2fs_sb_info
*);
1090 int __init
create_gc_caches(void);
1091 void destroy_gc_caches(void);
1096 int recover_fsync_data(struct f2fs_sb_info
*);
1097 bool space_for_roll_forward(struct f2fs_sb_info
*);
1102 #ifdef CONFIG_F2FS_STAT_FS
1103 struct f2fs_stat_info
{
1104 struct list_head stat_list
;
1105 struct f2fs_sb_info
*sbi
;
1106 struct mutex stat_lock
;
1107 int all_area_segs
, sit_area_segs
, nat_area_segs
, ssa_area_segs
;
1108 int main_area_segs
, main_area_sections
, main_area_zones
;
1109 int hit_ext
, total_ext
;
1110 int ndirty_node
, ndirty_dent
, ndirty_dirs
, ndirty_meta
;
1111 int nats
, sits
, fnids
;
1112 int total_count
, utilization
;
1114 unsigned int valid_count
, valid_node_count
, valid_inode_count
;
1115 unsigned int bimodal
, avg_vblocks
;
1116 int util_free
, util_valid
, util_invalid
;
1117 int rsvd_segs
, overp_segs
;
1118 int dirty_count
, node_pages
, meta_pages
;
1119 int prefree_count
, call_count
;
1120 int tot_segs
, node_segs
, data_segs
, free_segs
, free_secs
;
1121 int tot_blks
, data_blks
, node_blks
;
1122 int curseg
[NR_CURSEG_TYPE
];
1123 int cursec
[NR_CURSEG_TYPE
];
1124 int curzone
[NR_CURSEG_TYPE
];
1126 unsigned int segment_count
[2];
1127 unsigned int block_count
[2];
1128 unsigned base_mem
, cache_mem
;
1131 static inline struct f2fs_stat_info
*F2FS_STAT(struct f2fs_sb_info
*sbi
)
1133 return (struct f2fs_stat_info
*)sbi
->stat_info
;
1136 #define stat_inc_call_count(si) ((si)->call_count++)
1138 #define stat_inc_seg_count(sbi, type) \
1140 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
1142 if (type == SUM_TYPE_DATA) \
1148 #define stat_inc_tot_blk_count(si, blks) \
1149 (si->tot_blks += (blks))
1151 #define stat_inc_data_blk_count(sbi, blks) \
1153 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
1154 stat_inc_tot_blk_count(si, blks); \
1155 si->data_blks += (blks); \
1158 #define stat_inc_node_blk_count(sbi, blks) \
1160 struct f2fs_stat_info *si = F2FS_STAT(sbi); \
1161 stat_inc_tot_blk_count(si, blks); \
1162 si->node_blks += (blks); \
1165 int f2fs_build_stats(struct f2fs_sb_info
*);
1166 void f2fs_destroy_stats(struct f2fs_sb_info
*);
1167 void __init
f2fs_create_root_stats(void);
1168 void f2fs_destroy_root_stats(void);
1170 #define stat_inc_call_count(si)
1171 #define stat_inc_seg_count(si, type)
1172 #define stat_inc_tot_blk_count(si, blks)
1173 #define stat_inc_data_blk_count(si, blks)
1174 #define stat_inc_node_blk_count(sbi, blks)
1176 static inline int f2fs_build_stats(struct f2fs_sb_info
*sbi
) { return 0; }
1177 static inline void f2fs_destroy_stats(struct f2fs_sb_info
*sbi
) { }
1178 static inline void __init
f2fs_create_root_stats(void) { }
1179 static inline void f2fs_destroy_root_stats(void) { }
1182 extern const struct file_operations f2fs_dir_operations
;
1183 extern const struct file_operations f2fs_file_operations
;
1184 extern const struct inode_operations f2fs_file_inode_operations
;
1185 extern const struct address_space_operations f2fs_dblock_aops
;
1186 extern const struct address_space_operations f2fs_node_aops
;
1187 extern const struct address_space_operations f2fs_meta_aops
;
1188 extern const struct inode_operations f2fs_dir_inode_operations
;
1189 extern const struct inode_operations f2fs_symlink_inode_operations
;
1190 extern const struct inode_operations f2fs_special_inode_operations
;