4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 #include <linux/sysfs.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
38 static struct proc_dir_entry
*f2fs_proc_root
;
39 static struct kmem_cache
*f2fs_inode_cachep
;
40 static struct kset
*f2fs_kset
;
42 /* f2fs-wide shrinker description */
43 static struct shrinker f2fs_shrinker_info
= {
44 .scan_objects
= f2fs_shrink_scan
,
45 .count_objects
= f2fs_shrink_count
,
46 .seeks
= DEFAULT_SEEKS
,
51 Opt_disable_roll_forward
,
60 Opt_disable_ext_identify
,
74 static match_table_t f2fs_tokens
= {
75 {Opt_gc_background
, "background_gc=%s"},
76 {Opt_disable_roll_forward
, "disable_roll_forward"},
77 {Opt_norecovery
, "norecovery"},
78 {Opt_discard
, "discard"},
79 {Opt_noheap
, "no_heap"},
80 {Opt_user_xattr
, "user_xattr"},
81 {Opt_nouser_xattr
, "nouser_xattr"},
84 {Opt_active_logs
, "active_logs=%u"},
85 {Opt_disable_ext_identify
, "disable_ext_identify"},
86 {Opt_inline_xattr
, "inline_xattr"},
87 {Opt_inline_data
, "inline_data"},
88 {Opt_inline_dentry
, "inline_dentry"},
89 {Opt_flush_merge
, "flush_merge"},
90 {Opt_nobarrier
, "nobarrier"},
91 {Opt_fastboot
, "fastboot"},
92 {Opt_extent_cache
, "extent_cache"},
93 {Opt_noextent_cache
, "noextent_cache"},
94 {Opt_noinline_data
, "noinline_data"},
95 {Opt_data_flush
, "data_flush"},
99 /* Sysfs support for f2fs */
101 GC_THREAD
, /* struct f2fs_gc_thread */
102 SM_INFO
, /* struct f2fs_sm_info */
103 NM_INFO
, /* struct f2fs_nm_info */
104 F2FS_SBI
, /* struct f2fs_sb_info */
108 struct attribute attr
;
109 ssize_t (*show
)(struct f2fs_attr
*, struct f2fs_sb_info
*, char *);
110 ssize_t (*store
)(struct f2fs_attr
*, struct f2fs_sb_info
*,
111 const char *, size_t);
116 static unsigned char *__struct_ptr(struct f2fs_sb_info
*sbi
, int struct_type
)
118 if (struct_type
== GC_THREAD
)
119 return (unsigned char *)sbi
->gc_thread
;
120 else if (struct_type
== SM_INFO
)
121 return (unsigned char *)SM_I(sbi
);
122 else if (struct_type
== NM_INFO
)
123 return (unsigned char *)NM_I(sbi
);
124 else if (struct_type
== F2FS_SBI
)
125 return (unsigned char *)sbi
;
129 static ssize_t
lifetime_write_kbytes_show(struct f2fs_attr
*a
,
130 struct f2fs_sb_info
*sbi
, char *buf
)
132 struct super_block
*sb
= sbi
->sb
;
134 if (!sb
->s_bdev
->bd_part
)
135 return snprintf(buf
, PAGE_SIZE
, "0\n");
137 return snprintf(buf
, PAGE_SIZE
, "%llu\n",
138 (unsigned long long)(sbi
->kbytes_written
+
139 BD_PART_WRITTEN(sbi
)));
142 static ssize_t
f2fs_sbi_show(struct f2fs_attr
*a
,
143 struct f2fs_sb_info
*sbi
, char *buf
)
145 unsigned char *ptr
= NULL
;
148 ptr
= __struct_ptr(sbi
, a
->struct_type
);
152 ui
= (unsigned int *)(ptr
+ a
->offset
);
154 return snprintf(buf
, PAGE_SIZE
, "%u\n", *ui
);
157 static ssize_t
f2fs_sbi_store(struct f2fs_attr
*a
,
158 struct f2fs_sb_info
*sbi
,
159 const char *buf
, size_t count
)
166 ptr
= __struct_ptr(sbi
, a
->struct_type
);
170 ui
= (unsigned int *)(ptr
+ a
->offset
);
172 ret
= kstrtoul(skip_spaces(buf
), 0, &t
);
179 static ssize_t
f2fs_attr_show(struct kobject
*kobj
,
180 struct attribute
*attr
, char *buf
)
182 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
184 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
186 return a
->show
? a
->show(a
, sbi
, buf
) : 0;
189 static ssize_t
f2fs_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
190 const char *buf
, size_t len
)
192 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
194 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
196 return a
->store
? a
->store(a
, sbi
, buf
, len
) : 0;
199 static void f2fs_sb_release(struct kobject
*kobj
)
201 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
203 complete(&sbi
->s_kobj_unregister
);
206 #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
207 static struct f2fs_attr f2fs_attr_##_name = { \
208 .attr = {.name = __stringify(_name), .mode = _mode }, \
211 .struct_type = _struct_type, \
215 #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
216 F2FS_ATTR_OFFSET(struct_type, name, 0644, \
217 f2fs_sbi_show, f2fs_sbi_store, \
218 offsetof(struct struct_name, elname))
220 #define F2FS_GENERAL_RO_ATTR(name) \
221 static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
223 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_min_sleep_time
, min_sleep_time
);
224 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_max_sleep_time
, max_sleep_time
);
225 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_no_gc_sleep_time
, no_gc_sleep_time
);
226 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_idle
, gc_idle
);
227 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, reclaim_segments
, rec_prefree_segments
);
228 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, max_small_discards
, max_discards
);
229 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, batched_trim_sections
, trim_sections
);
230 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, ipu_policy
, ipu_policy
);
231 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_ipu_util
, min_ipu_util
);
232 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_fsync_blocks
, min_fsync_blocks
);
233 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, ram_thresh
, ram_thresh
);
234 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, ra_nid_pages
, ra_nid_pages
);
235 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, dirty_nats_ratio
, dirty_nats_ratio
);
236 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, max_victim_search
, max_victim_search
);
237 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, dir_level
, dir_level
);
238 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, cp_interval
, interval_time
[CP_TIME
]);
239 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, idle_interval
, interval_time
[REQ_TIME
]);
240 F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes
);
242 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
243 static struct attribute
*f2fs_attrs
[] = {
244 ATTR_LIST(gc_min_sleep_time
),
245 ATTR_LIST(gc_max_sleep_time
),
246 ATTR_LIST(gc_no_gc_sleep_time
),
248 ATTR_LIST(reclaim_segments
),
249 ATTR_LIST(max_small_discards
),
250 ATTR_LIST(batched_trim_sections
),
251 ATTR_LIST(ipu_policy
),
252 ATTR_LIST(min_ipu_util
),
253 ATTR_LIST(min_fsync_blocks
),
254 ATTR_LIST(max_victim_search
),
255 ATTR_LIST(dir_level
),
256 ATTR_LIST(ram_thresh
),
257 ATTR_LIST(ra_nid_pages
),
258 ATTR_LIST(dirty_nats_ratio
),
259 ATTR_LIST(cp_interval
),
260 ATTR_LIST(idle_interval
),
261 ATTR_LIST(lifetime_write_kbytes
),
265 static const struct sysfs_ops f2fs_attr_ops
= {
266 .show
= f2fs_attr_show
,
267 .store
= f2fs_attr_store
,
270 static struct kobj_type f2fs_ktype
= {
271 .default_attrs
= f2fs_attrs
,
272 .sysfs_ops
= &f2fs_attr_ops
,
273 .release
= f2fs_sb_release
,
276 void f2fs_msg(struct super_block
*sb
, const char *level
, const char *fmt
, ...)
278 struct va_format vaf
;
284 printk("%sF2FS-fs (%s): %pV\n", level
, sb
->s_id
, &vaf
);
288 static void init_once(void *foo
)
290 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
292 inode_init_once(&fi
->vfs_inode
);
295 static int parse_options(struct super_block
*sb
, char *options
)
297 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
298 struct request_queue
*q
;
299 substring_t args
[MAX_OPT_ARGS
];
306 while ((p
= strsep(&options
, ",")) != NULL
) {
311 * Initialize args struct so we know whether arg was
312 * found; some options take optional arguments.
314 args
[0].to
= args
[0].from
= NULL
;
315 token
= match_token(p
, f2fs_tokens
, args
);
318 case Opt_gc_background
:
319 name
= match_strdup(&args
[0]);
323 if (strlen(name
) == 2 && !strncmp(name
, "on", 2)) {
325 clear_opt(sbi
, FORCE_FG_GC
);
326 } else if (strlen(name
) == 3 && !strncmp(name
, "off", 3)) {
327 clear_opt(sbi
, BG_GC
);
328 clear_opt(sbi
, FORCE_FG_GC
);
329 } else if (strlen(name
) == 4 && !strncmp(name
, "sync", 4)) {
331 set_opt(sbi
, FORCE_FG_GC
);
338 case Opt_disable_roll_forward
:
339 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
342 /* this option mounts f2fs with ro */
343 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
344 if (!f2fs_readonly(sb
))
348 q
= bdev_get_queue(sb
->s_bdev
);
349 if (blk_queue_discard(q
)) {
350 set_opt(sbi
, DISCARD
);
352 f2fs_msg(sb
, KERN_WARNING
,
353 "mounting with \"discard\" option, but "
354 "the device does not support discard");
358 set_opt(sbi
, NOHEAP
);
360 #ifdef CONFIG_F2FS_FS_XATTR
362 set_opt(sbi
, XATTR_USER
);
364 case Opt_nouser_xattr
:
365 clear_opt(sbi
, XATTR_USER
);
367 case Opt_inline_xattr
:
368 set_opt(sbi
, INLINE_XATTR
);
372 f2fs_msg(sb
, KERN_INFO
,
373 "user_xattr options not supported");
375 case Opt_nouser_xattr
:
376 f2fs_msg(sb
, KERN_INFO
,
377 "nouser_xattr options not supported");
379 case Opt_inline_xattr
:
380 f2fs_msg(sb
, KERN_INFO
,
381 "inline_xattr options not supported");
384 #ifdef CONFIG_F2FS_FS_POSIX_ACL
386 set_opt(sbi
, POSIX_ACL
);
389 clear_opt(sbi
, POSIX_ACL
);
393 f2fs_msg(sb
, KERN_INFO
, "acl options not supported");
396 f2fs_msg(sb
, KERN_INFO
, "noacl options not supported");
399 case Opt_active_logs
:
400 if (args
->from
&& match_int(args
, &arg
))
402 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
404 sbi
->active_logs
= arg
;
406 case Opt_disable_ext_identify
:
407 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
409 case Opt_inline_data
:
410 set_opt(sbi
, INLINE_DATA
);
412 case Opt_inline_dentry
:
413 set_opt(sbi
, INLINE_DENTRY
);
415 case Opt_flush_merge
:
416 set_opt(sbi
, FLUSH_MERGE
);
419 set_opt(sbi
, NOBARRIER
);
422 set_opt(sbi
, FASTBOOT
);
424 case Opt_extent_cache
:
425 set_opt(sbi
, EXTENT_CACHE
);
427 case Opt_noextent_cache
:
428 clear_opt(sbi
, EXTENT_CACHE
);
430 case Opt_noinline_data
:
431 clear_opt(sbi
, INLINE_DATA
);
434 set_opt(sbi
, DATA_FLUSH
);
437 f2fs_msg(sb
, KERN_ERR
,
438 "Unrecognized mount option \"%s\" or missing value",
446 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
448 struct f2fs_inode_info
*fi
;
450 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
454 init_once((void *) fi
);
456 /* Initialize f2fs-specific inode info */
457 fi
->vfs_inode
.i_version
= 1;
458 atomic_set(&fi
->dirty_pages
, 0);
459 fi
->i_current_depth
= 1;
461 init_rwsem(&fi
->i_sem
);
462 INIT_LIST_HEAD(&fi
->dirty_list
);
463 INIT_LIST_HEAD(&fi
->inmem_pages
);
464 mutex_init(&fi
->inmem_lock
);
466 set_inode_flag(fi
, FI_NEW_INODE
);
468 if (test_opt(F2FS_SB(sb
), INLINE_XATTR
))
469 set_inode_flag(fi
, FI_INLINE_XATTR
);
471 /* Will be used by directory only */
472 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
473 return &fi
->vfs_inode
;
476 static int f2fs_drop_inode(struct inode
*inode
)
479 * This is to avoid a deadlock condition like below.
480 * writeback_single_inode(inode)
481 * - f2fs_write_data_page
482 * - f2fs_gc -> iput -> evict
483 * - inode_wait_for_writeback(inode)
485 if (!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
) {
486 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
487 /* to avoid evict_inode call simultaneously */
488 atomic_inc(&inode
->i_count
);
489 spin_unlock(&inode
->i_lock
);
491 /* some remained atomic pages should discarded */
492 if (f2fs_is_atomic_file(inode
))
493 drop_inmem_pages(inode
);
495 /* should remain fi->extent_tree for writepage */
496 f2fs_destroy_extent_node(inode
);
498 sb_start_intwrite(inode
->i_sb
);
499 i_size_write(inode
, 0);
501 if (F2FS_HAS_BLOCKS(inode
))
502 f2fs_truncate(inode
, true);
504 sb_end_intwrite(inode
->i_sb
);
506 fscrypt_put_encryption_info(inode
, NULL
);
507 spin_lock(&inode
->i_lock
);
508 atomic_dec(&inode
->i_count
);
512 return generic_drop_inode(inode
);
516 * f2fs_dirty_inode() is called from __mark_inode_dirty()
518 * We should call set_dirty_inode to write the dirty inode through write_inode.
520 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
522 set_inode_flag(F2FS_I(inode
), FI_DIRTY_INODE
);
525 static void f2fs_i_callback(struct rcu_head
*head
)
527 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
528 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
531 static void f2fs_destroy_inode(struct inode
*inode
)
533 call_rcu(&inode
->i_rcu
, f2fs_i_callback
);
536 static void f2fs_put_super(struct super_block
*sb
)
538 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
541 remove_proc_entry("segment_info", sbi
->s_proc
);
542 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
544 kobject_del(&sbi
->s_kobj
);
548 /* prevent remaining shrinker jobs */
549 mutex_lock(&sbi
->umount_mutex
);
552 * We don't need to do checkpoint when superblock is clean.
553 * But, the previous checkpoint was not done by umount, it needs to do
554 * clean checkpoint again.
556 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
557 !is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_UMOUNT_FLAG
)) {
558 struct cp_control cpc
= {
561 write_checkpoint(sbi
, &cpc
);
564 /* write_checkpoint can update stat informaion */
565 f2fs_destroy_stats(sbi
);
568 * normally superblock is clean, so we need to release this.
569 * In addition, EIO will skip do checkpoint, we need this as well.
571 release_ino_entry(sbi
);
572 release_discard_addrs(sbi
);
574 f2fs_leave_shrinker(sbi
);
575 mutex_unlock(&sbi
->umount_mutex
);
577 /* our cp_error case, we can wait for any writeback page */
578 if (get_pages(sbi
, F2FS_WRITEBACK
))
579 f2fs_flush_merged_bios(sbi
);
581 iput(sbi
->node_inode
);
582 iput(sbi
->meta_inode
);
584 /* destroy f2fs internal modules */
585 destroy_node_manager(sbi
);
586 destroy_segment_manager(sbi
);
589 kobject_put(&sbi
->s_kobj
);
590 wait_for_completion(&sbi
->s_kobj_unregister
);
592 sb
->s_fs_info
= NULL
;
593 kfree(sbi
->raw_super
);
597 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
599 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
602 trace_f2fs_sync_fs(sb
, sync
);
605 struct cp_control cpc
;
607 cpc
.reason
= __get_cp_reason(sbi
);
609 mutex_lock(&sbi
->gc_mutex
);
610 err
= write_checkpoint(sbi
, &cpc
);
611 mutex_unlock(&sbi
->gc_mutex
);
613 f2fs_trace_ios(NULL
, 1);
618 static int f2fs_freeze(struct super_block
*sb
)
622 if (f2fs_readonly(sb
))
625 err
= f2fs_sync_fs(sb
, 1);
629 static int f2fs_unfreeze(struct super_block
*sb
)
634 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
636 struct super_block
*sb
= dentry
->d_sb
;
637 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
638 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
639 block_t total_count
, user_block_count
, start_count
, ovp_count
;
641 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
642 user_block_count
= sbi
->user_block_count
;
643 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
644 ovp_count
= SM_I(sbi
)->ovp_segments
<< sbi
->log_blocks_per_seg
;
645 buf
->f_type
= F2FS_SUPER_MAGIC
;
646 buf
->f_bsize
= sbi
->blocksize
;
648 buf
->f_blocks
= total_count
- start_count
;
649 buf
->f_bfree
= buf
->f_blocks
- valid_user_blocks(sbi
) - ovp_count
;
650 buf
->f_bavail
= user_block_count
- valid_user_blocks(sbi
);
652 buf
->f_files
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
653 buf
->f_ffree
= buf
->f_files
- valid_inode_count(sbi
);
655 buf
->f_namelen
= F2FS_NAME_LEN
;
656 buf
->f_fsid
.val
[0] = (u32
)id
;
657 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
662 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
664 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
666 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, BG_GC
)) {
667 if (test_opt(sbi
, FORCE_FG_GC
))
668 seq_printf(seq
, ",background_gc=%s", "sync");
670 seq_printf(seq
, ",background_gc=%s", "on");
672 seq_printf(seq
, ",background_gc=%s", "off");
674 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
675 seq_puts(seq
, ",disable_roll_forward");
676 if (test_opt(sbi
, DISCARD
))
677 seq_puts(seq
, ",discard");
678 if (test_opt(sbi
, NOHEAP
))
679 seq_puts(seq
, ",no_heap_alloc");
680 #ifdef CONFIG_F2FS_FS_XATTR
681 if (test_opt(sbi
, XATTR_USER
))
682 seq_puts(seq
, ",user_xattr");
684 seq_puts(seq
, ",nouser_xattr");
685 if (test_opt(sbi
, INLINE_XATTR
))
686 seq_puts(seq
, ",inline_xattr");
688 #ifdef CONFIG_F2FS_FS_POSIX_ACL
689 if (test_opt(sbi
, POSIX_ACL
))
690 seq_puts(seq
, ",acl");
692 seq_puts(seq
, ",noacl");
694 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
695 seq_puts(seq
, ",disable_ext_identify");
696 if (test_opt(sbi
, INLINE_DATA
))
697 seq_puts(seq
, ",inline_data");
699 seq_puts(seq
, ",noinline_data");
700 if (test_opt(sbi
, INLINE_DENTRY
))
701 seq_puts(seq
, ",inline_dentry");
702 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
703 seq_puts(seq
, ",flush_merge");
704 if (test_opt(sbi
, NOBARRIER
))
705 seq_puts(seq
, ",nobarrier");
706 if (test_opt(sbi
, FASTBOOT
))
707 seq_puts(seq
, ",fastboot");
708 if (test_opt(sbi
, EXTENT_CACHE
))
709 seq_puts(seq
, ",extent_cache");
711 seq_puts(seq
, ",noextent_cache");
712 if (test_opt(sbi
, DATA_FLUSH
))
713 seq_puts(seq
, ",data_flush");
714 seq_printf(seq
, ",active_logs=%u", sbi
->active_logs
);
719 static int segment_info_seq_show(struct seq_file
*seq
, void *offset
)
721 struct super_block
*sb
= seq
->private;
722 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
723 unsigned int total_segs
=
724 le32_to_cpu(sbi
->raw_super
->segment_count_main
);
727 seq_puts(seq
, "format: segment_type|valid_blocks\n"
728 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
730 for (i
= 0; i
< total_segs
; i
++) {
731 struct seg_entry
*se
= get_seg_entry(sbi
, i
);
734 seq_printf(seq
, "%-10d", i
);
735 seq_printf(seq
, "%d|%-3u", se
->type
,
736 get_valid_blocks(sbi
, i
, 1));
737 if ((i
% 10) == 9 || i
== (total_segs
- 1))
746 static int segment_info_open_fs(struct inode
*inode
, struct file
*file
)
748 return single_open(file
, segment_info_seq_show
, PDE_DATA(inode
));
751 static const struct file_operations f2fs_seq_segment_info_fops
= {
752 .owner
= THIS_MODULE
,
753 .open
= segment_info_open_fs
,
756 .release
= single_release
,
759 static void default_options(struct f2fs_sb_info
*sbi
)
761 /* init some FS parameters */
762 sbi
->active_logs
= NR_CURSEG_TYPE
;
765 set_opt(sbi
, INLINE_DATA
);
766 set_opt(sbi
, EXTENT_CACHE
);
768 #ifdef CONFIG_F2FS_FS_XATTR
769 set_opt(sbi
, XATTR_USER
);
771 #ifdef CONFIG_F2FS_FS_POSIX_ACL
772 set_opt(sbi
, POSIX_ACL
);
776 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
778 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
779 struct f2fs_mount_info org_mount_opt
;
780 int err
, active_logs
;
781 bool need_restart_gc
= false;
782 bool need_stop_gc
= false;
783 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
786 * Save the old mount options in case we
787 * need to restore them.
789 org_mount_opt
= sbi
->mount_opt
;
790 active_logs
= sbi
->active_logs
;
792 if (*flags
& MS_RDONLY
) {
793 set_opt(sbi
, FASTBOOT
);
794 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
799 sbi
->mount_opt
.opt
= 0;
800 default_options(sbi
);
802 /* parse mount options */
803 err
= parse_options(sb
, data
);
808 * Previous and new state of filesystem is RO,
809 * so skip checking GC and FLUSH_MERGE conditions.
811 if (f2fs_readonly(sb
) && (*flags
& MS_RDONLY
))
814 /* disallow enable/disable extent_cache dynamically */
815 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
817 f2fs_msg(sbi
->sb
, KERN_WARNING
,
818 "switch extent_cache option is not allowed");
823 * We stop the GC thread if FS is mounted as RO
824 * or if background_gc = off is passed in mount
825 * option. Also sync the filesystem.
827 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
828 if (sbi
->gc_thread
) {
831 need_restart_gc
= true;
833 } else if (!sbi
->gc_thread
) {
834 err
= start_gc_thread(sbi
);
841 * We stop issue flush thread if FS is mounted as RO
842 * or if flush_merge is not passed in mount option.
844 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
845 destroy_flush_cmd_control(sbi
);
846 } else if (!SM_I(sbi
)->cmd_control_info
) {
847 err
= create_flush_cmd_control(sbi
);
852 /* Update the POSIXACL Flag */
853 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
854 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
857 if (need_restart_gc
) {
858 if (start_gc_thread(sbi
))
859 f2fs_msg(sbi
->sb
, KERN_WARNING
,
860 "background gc thread has stopped");
861 } else if (need_stop_gc
) {
865 sbi
->mount_opt
= org_mount_opt
;
866 sbi
->active_logs
= active_logs
;
870 static struct super_operations f2fs_sops
= {
871 .alloc_inode
= f2fs_alloc_inode
,
872 .drop_inode
= f2fs_drop_inode
,
873 .destroy_inode
= f2fs_destroy_inode
,
874 .write_inode
= f2fs_write_inode
,
875 .dirty_inode
= f2fs_dirty_inode
,
876 .show_options
= f2fs_show_options
,
877 .evict_inode
= f2fs_evict_inode
,
878 .put_super
= f2fs_put_super
,
879 .sync_fs
= f2fs_sync_fs
,
880 .freeze_fs
= f2fs_freeze
,
881 .unfreeze_fs
= f2fs_unfreeze
,
882 .statfs
= f2fs_statfs
,
883 .remount_fs
= f2fs_remount
,
886 #ifdef CONFIG_F2FS_FS_ENCRYPTION
887 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
889 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
890 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
894 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
897 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
898 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
899 ctx
, len
, fs_data
, XATTR_CREATE
);
902 static unsigned f2fs_max_namelen(struct inode
*inode
)
904 return S_ISLNK(inode
->i_mode
) ?
905 inode
->i_sb
->s_blocksize
: F2FS_NAME_LEN
;
908 static struct fscrypt_operations f2fs_cryptops
= {
909 .get_context
= f2fs_get_context
,
910 .set_context
= f2fs_set_context
,
911 .is_encrypted
= f2fs_encrypted_inode
,
912 .empty_dir
= f2fs_empty_dir
,
913 .max_namelen
= f2fs_max_namelen
,
916 static struct fscrypt_operations f2fs_cryptops
= {
917 .is_encrypted
= f2fs_encrypted_inode
,
921 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
922 u64 ino
, u32 generation
)
924 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
927 if (check_nid_range(sbi
, ino
))
928 return ERR_PTR(-ESTALE
);
931 * f2fs_iget isn't quite right if the inode is currently unallocated!
932 * However f2fs_iget currently does appropriate checks to handle stale
933 * inodes so everything is OK.
935 inode
= f2fs_iget(sb
, ino
);
937 return ERR_CAST(inode
);
938 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
939 /* we didn't find the right inode.. */
941 return ERR_PTR(-ESTALE
);
946 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
947 int fh_len
, int fh_type
)
949 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
953 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
954 int fh_len
, int fh_type
)
956 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
960 static const struct export_operations f2fs_export_ops
= {
961 .fh_to_dentry
= f2fs_fh_to_dentry
,
962 .fh_to_parent
= f2fs_fh_to_parent
,
963 .get_parent
= f2fs_get_parent
,
966 static loff_t
max_file_blocks(void)
968 loff_t result
= (DEF_ADDRS_PER_INODE
- F2FS_INLINE_XATTR_ADDRS
);
969 loff_t leaf_count
= ADDRS_PER_BLOCK
;
971 /* two direct node blocks */
972 result
+= (leaf_count
* 2);
974 /* two indirect node blocks */
975 leaf_count
*= NIDS_PER_BLOCK
;
976 result
+= (leaf_count
* 2);
978 /* one double indirect node block */
979 leaf_count
*= NIDS_PER_BLOCK
;
980 result
+= leaf_count
;
985 static inline bool sanity_check_area_boundary(struct super_block
*sb
,
986 struct f2fs_super_block
*raw_super
)
988 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
989 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
990 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
991 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
992 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
993 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
994 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
995 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
996 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
997 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
998 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
999 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
1000 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1002 if (segment0_blkaddr
!= cp_blkaddr
) {
1003 f2fs_msg(sb
, KERN_INFO
,
1004 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1005 segment0_blkaddr
, cp_blkaddr
);
1009 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
1011 f2fs_msg(sb
, KERN_INFO
,
1012 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1013 cp_blkaddr
, sit_blkaddr
,
1014 segment_count_ckpt
<< log_blocks_per_seg
);
1018 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
1020 f2fs_msg(sb
, KERN_INFO
,
1021 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1022 sit_blkaddr
, nat_blkaddr
,
1023 segment_count_sit
<< log_blocks_per_seg
);
1027 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
1029 f2fs_msg(sb
, KERN_INFO
,
1030 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1031 nat_blkaddr
, ssa_blkaddr
,
1032 segment_count_nat
<< log_blocks_per_seg
);
1036 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
1038 f2fs_msg(sb
, KERN_INFO
,
1039 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1040 ssa_blkaddr
, main_blkaddr
,
1041 segment_count_ssa
<< log_blocks_per_seg
);
1045 if (main_blkaddr
+ (segment_count_main
<< log_blocks_per_seg
) !=
1046 segment0_blkaddr
+ (segment_count
<< log_blocks_per_seg
)) {
1047 f2fs_msg(sb
, KERN_INFO
,
1048 "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
1050 segment0_blkaddr
+ (segment_count
<< log_blocks_per_seg
),
1051 segment_count_main
<< log_blocks_per_seg
);
1058 static int sanity_check_raw_super(struct super_block
*sb
,
1059 struct f2fs_super_block
*raw_super
)
1061 unsigned int blocksize
;
1063 if (F2FS_SUPER_MAGIC
!= le32_to_cpu(raw_super
->magic
)) {
1064 f2fs_msg(sb
, KERN_INFO
,
1065 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1066 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
1070 /* Currently, support only 4KB page cache size */
1071 if (F2FS_BLKSIZE
!= PAGE_CACHE_SIZE
) {
1072 f2fs_msg(sb
, KERN_INFO
,
1073 "Invalid page_cache_size (%lu), supports only 4KB\n",
1078 /* Currently, support only 4KB block size */
1079 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
1080 if (blocksize
!= F2FS_BLKSIZE
) {
1081 f2fs_msg(sb
, KERN_INFO
,
1082 "Invalid blocksize (%u), supports only 4KB\n",
1087 /* check log blocks per segment */
1088 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
1089 f2fs_msg(sb
, KERN_INFO
,
1090 "Invalid log blocks per segment (%u)\n",
1091 le32_to_cpu(raw_super
->log_blocks_per_seg
));
1095 /* Currently, support 512/1024/2048/4096 bytes sector size */
1096 if (le32_to_cpu(raw_super
->log_sectorsize
) >
1097 F2FS_MAX_LOG_SECTOR_SIZE
||
1098 le32_to_cpu(raw_super
->log_sectorsize
) <
1099 F2FS_MIN_LOG_SECTOR_SIZE
) {
1100 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectorsize (%u)",
1101 le32_to_cpu(raw_super
->log_sectorsize
));
1104 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
1105 le32_to_cpu(raw_super
->log_sectorsize
) !=
1106 F2FS_MAX_LOG_SECTOR_SIZE
) {
1107 f2fs_msg(sb
, KERN_INFO
,
1108 "Invalid log sectors per block(%u) log sectorsize(%u)",
1109 le32_to_cpu(raw_super
->log_sectors_per_block
),
1110 le32_to_cpu(raw_super
->log_sectorsize
));
1114 /* check reserved ino info */
1115 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
1116 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
1117 le32_to_cpu(raw_super
->root_ino
) != 3) {
1118 f2fs_msg(sb
, KERN_INFO
,
1119 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1120 le32_to_cpu(raw_super
->node_ino
),
1121 le32_to_cpu(raw_super
->meta_ino
),
1122 le32_to_cpu(raw_super
->root_ino
));
1126 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1127 if (sanity_check_area_boundary(sb
, raw_super
))
1133 int sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
1135 unsigned int total
, fsmeta
;
1136 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1137 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1139 total
= le32_to_cpu(raw_super
->segment_count
);
1140 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1141 fsmeta
+= le32_to_cpu(raw_super
->segment_count_sit
);
1142 fsmeta
+= le32_to_cpu(raw_super
->segment_count_nat
);
1143 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
1144 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
1146 if (unlikely(fsmeta
>= total
))
1149 if (unlikely(f2fs_cp_error(sbi
))) {
1150 f2fs_msg(sbi
->sb
, KERN_ERR
, "A bug case: need to run fsck");
1156 static void init_sb_info(struct f2fs_sb_info
*sbi
)
1158 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
1161 sbi
->log_sectors_per_block
=
1162 le32_to_cpu(raw_super
->log_sectors_per_block
);
1163 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
1164 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
1165 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1166 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
1167 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
1168 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
1169 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
1170 sbi
->total_node_count
=
1171 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
1172 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
1173 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
1174 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
1175 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
1176 sbi
->cur_victim_sec
= NULL_SECNO
;
1177 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
1179 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
1180 atomic_set(&sbi
->nr_pages
[i
], 0);
1182 sbi
->dir_level
= DEF_DIR_LEVEL
;
1183 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
1184 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
1185 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
1187 INIT_LIST_HEAD(&sbi
->s_list
);
1188 mutex_init(&sbi
->umount_mutex
);
1192 * Read f2fs raw super block.
1193 * Because we have two copies of super block, so read both of them
1194 * to get the first valid one. If any one of them is broken, we pass
1195 * them recovery flag back to the caller.
1197 static int read_raw_super_block(struct super_block
*sb
,
1198 struct f2fs_super_block
**raw_super
,
1199 int *valid_super_block
, int *recovery
)
1202 struct buffer_head
*bh
;
1203 struct f2fs_super_block
*super
, *buf
;
1206 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
1210 for (block
= 0; block
< 2; block
++) {
1211 bh
= sb_bread(sb
, block
);
1213 f2fs_msg(sb
, KERN_ERR
, "Unable to read %dth superblock",
1219 buf
= (struct f2fs_super_block
*)
1220 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
1222 /* sanity checking of raw super */
1223 if (sanity_check_raw_super(sb
, buf
)) {
1224 f2fs_msg(sb
, KERN_ERR
,
1225 "Can't find valid F2FS filesystem in %dth superblock",
1233 memcpy(super
, buf
, sizeof(*super
));
1234 *valid_super_block
= block
;
1240 /* Fail to read any one of the superblocks*/
1244 /* No valid superblock */
1253 static int __f2fs_commit_super(struct f2fs_sb_info
*sbi
, int block
)
1255 struct f2fs_super_block
*super
= F2FS_RAW_SUPER(sbi
);
1256 struct buffer_head
*bh
;
1259 bh
= sb_getblk(sbi
->sb
, block
);
1264 memcpy(bh
->b_data
+ F2FS_SUPER_OFFSET
, super
, sizeof(*super
));
1265 set_buffer_uptodate(bh
);
1266 set_buffer_dirty(bh
);
1269 /* it's rare case, we can do fua all the time */
1270 err
= __sync_dirty_buffer(bh
, WRITE_FLUSH_FUA
);
1276 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
1280 /* write back-up superblock first */
1281 err
= __f2fs_commit_super(sbi
, sbi
->valid_super_block
? 0 : 1);
1283 /* if we are in recovery path, skip writing valid superblock */
1287 /* write current valid superblock */
1288 return __f2fs_commit_super(sbi
, sbi
->valid_super_block
);
1291 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
1293 struct f2fs_sb_info
*sbi
;
1294 struct f2fs_super_block
*raw_super
;
1297 bool retry
= true, need_fsck
= false;
1298 char *options
= NULL
;
1299 int recovery
, i
, valid_super_block
;
1300 struct curseg_info
*seg_i
;
1305 valid_super_block
= -1;
1308 /* allocate memory for f2fs-specific super block info */
1309 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
1313 /* set a block size */
1314 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
1315 f2fs_msg(sb
, KERN_ERR
, "unable to set blocksize");
1319 err
= read_raw_super_block(sb
, &raw_super
, &valid_super_block
,
1324 sb
->s_fs_info
= sbi
;
1325 default_options(sbi
);
1326 /* parse mount options */
1327 options
= kstrdup((const char *)data
, GFP_KERNEL
);
1328 if (data
&& !options
) {
1333 err
= parse_options(sb
, options
);
1337 sbi
->max_file_blocks
= max_file_blocks();
1338 sb
->s_maxbytes
= sbi
->max_file_blocks
<<
1339 le32_to_cpu(raw_super
->log_blocksize
);
1340 sb
->s_max_links
= F2FS_LINK_MAX
;
1341 get_random_bytes(&sbi
->s_next_generation
, sizeof(u32
));
1343 sb
->s_op
= &f2fs_sops
;
1344 sb
->s_cop
= &f2fs_cryptops
;
1345 sb
->s_xattr
= f2fs_xattr_handlers
;
1346 sb
->s_export_op
= &f2fs_export_ops
;
1347 sb
->s_magic
= F2FS_SUPER_MAGIC
;
1348 sb
->s_time_gran
= 1;
1349 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
1350 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
1351 memcpy(sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
1353 /* init f2fs-specific super block info */
1355 sbi
->raw_super
= raw_super
;
1356 sbi
->valid_super_block
= valid_super_block
;
1357 mutex_init(&sbi
->gc_mutex
);
1358 mutex_init(&sbi
->writepages
);
1359 mutex_init(&sbi
->cp_mutex
);
1360 init_rwsem(&sbi
->node_write
);
1362 /* disallow all the data/node/meta page writes */
1363 set_sbi_flag(sbi
, SBI_POR_DOING
);
1364 spin_lock_init(&sbi
->stat_lock
);
1366 init_rwsem(&sbi
->read_io
.io_rwsem
);
1367 sbi
->read_io
.sbi
= sbi
;
1368 sbi
->read_io
.bio
= NULL
;
1369 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
1370 init_rwsem(&sbi
->write_io
[i
].io_rwsem
);
1371 sbi
->write_io
[i
].sbi
= sbi
;
1372 sbi
->write_io
[i
].bio
= NULL
;
1375 init_rwsem(&sbi
->cp_rwsem
);
1376 init_waitqueue_head(&sbi
->cp_wait
);
1379 /* get an inode for meta space */
1380 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
1381 if (IS_ERR(sbi
->meta_inode
)) {
1382 f2fs_msg(sb
, KERN_ERR
, "Failed to read F2FS meta data inode");
1383 err
= PTR_ERR(sbi
->meta_inode
);
1387 err
= get_valid_checkpoint(sbi
);
1389 f2fs_msg(sb
, KERN_ERR
, "Failed to get valid F2FS checkpoint");
1390 goto free_meta_inode
;
1393 sbi
->total_valid_node_count
=
1394 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
1395 sbi
->total_valid_inode_count
=
1396 le32_to_cpu(sbi
->ckpt
->valid_inode_count
);
1397 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
1398 sbi
->total_valid_block_count
=
1399 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
1400 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
1401 sbi
->alloc_valid_block_count
= 0;
1402 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
1403 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
1404 spin_lock_init(&sbi
->inode_lock
[i
]);
1407 init_extent_cache_info(sbi
);
1409 init_ino_entry_info(sbi
);
1411 /* setup f2fs internal modules */
1412 err
= build_segment_manager(sbi
);
1414 f2fs_msg(sb
, KERN_ERR
,
1415 "Failed to initialize F2FS segment manager");
1418 err
= build_node_manager(sbi
);
1420 f2fs_msg(sb
, KERN_ERR
,
1421 "Failed to initialize F2FS node manager");
1425 /* For write statistics */
1426 if (sb
->s_bdev
->bd_part
)
1427 sbi
->sectors_written_start
=
1428 (u64
)part_stat_read(sb
->s_bdev
->bd_part
, sectors
[1]);
1430 /* Read accumulated write IO statistics if exists */
1431 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
1432 if (__exist_node_summaries(sbi
))
1433 sbi
->kbytes_written
=
1434 le64_to_cpu(seg_i
->sum_blk
->journal
.info
.kbytes_written
);
1436 build_gc_manager(sbi
);
1438 /* get an inode for node space */
1439 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
1440 if (IS_ERR(sbi
->node_inode
)) {
1441 f2fs_msg(sb
, KERN_ERR
, "Failed to read node inode");
1442 err
= PTR_ERR(sbi
->node_inode
);
1446 f2fs_join_shrinker(sbi
);
1448 /* if there are nt orphan nodes free them */
1449 err
= recover_orphan_inodes(sbi
);
1451 goto free_node_inode
;
1453 /* read root inode and dentry */
1454 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
1456 f2fs_msg(sb
, KERN_ERR
, "Failed to read root inode");
1457 err
= PTR_ERR(root
);
1458 goto free_node_inode
;
1460 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
|| !root
->i_size
) {
1463 goto free_node_inode
;
1466 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
1469 goto free_root_inode
;
1472 err
= f2fs_build_stats(sbi
);
1474 goto free_root_inode
;
1477 sbi
->s_proc
= proc_mkdir(sb
->s_id
, f2fs_proc_root
);
1480 proc_create_data("segment_info", S_IRUGO
, sbi
->s_proc
,
1481 &f2fs_seq_segment_info_fops
, sb
);
1483 sbi
->s_kobj
.kset
= f2fs_kset
;
1484 init_completion(&sbi
->s_kobj_unregister
);
1485 err
= kobject_init_and_add(&sbi
->s_kobj
, &f2fs_ktype
, NULL
,
1490 /* recover fsynced data */
1491 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
)) {
1493 * mount should be failed, when device has readonly mode, and
1494 * previous checkpoint was not done by clean system shutdown.
1496 if (bdev_read_only(sb
->s_bdev
) &&
1497 !is_set_ckpt_flags(sbi
->ckpt
, CP_UMOUNT_FLAG
)) {
1503 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
1505 err
= recover_fsync_data(sbi
);
1508 f2fs_msg(sb
, KERN_ERR
,
1509 "Cannot recover all fsync data errno=%ld", err
);
1513 /* recover_fsync_data() cleared this already */
1514 clear_sbi_flag(sbi
, SBI_POR_DOING
);
1517 * If filesystem is not mounted as read-only then
1518 * do start the gc_thread.
1520 if (test_opt(sbi
, BG_GC
) && !f2fs_readonly(sb
)) {
1521 /* After POR, we can run background GC thread.*/
1522 err
= start_gc_thread(sbi
);
1528 /* recover broken superblock */
1529 if (recovery
&& !f2fs_readonly(sb
) && !bdev_read_only(sb
->s_bdev
)) {
1530 err
= f2fs_commit_super(sbi
, true);
1531 f2fs_msg(sb
, KERN_INFO
,
1532 "Try to recover %dth superblock, ret: %ld",
1533 sbi
->valid_super_block
? 1 : 2, err
);
1536 f2fs_update_time(sbi
, CP_TIME
);
1537 f2fs_update_time(sbi
, REQ_TIME
);
1541 kobject_del(&sbi
->s_kobj
);
1542 kobject_put(&sbi
->s_kobj
);
1543 wait_for_completion(&sbi
->s_kobj_unregister
);
1546 remove_proc_entry("segment_info", sbi
->s_proc
);
1547 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
1549 f2fs_destroy_stats(sbi
);
1554 mutex_lock(&sbi
->umount_mutex
);
1555 f2fs_leave_shrinker(sbi
);
1556 iput(sbi
->node_inode
);
1557 mutex_unlock(&sbi
->umount_mutex
);
1559 destroy_node_manager(sbi
);
1561 destroy_segment_manager(sbi
);
1564 make_bad_inode(sbi
->meta_inode
);
1565 iput(sbi
->meta_inode
);
1573 /* give only one another chance */
1576 shrink_dcache_sb(sb
);
1582 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
1583 const char *dev_name
, void *data
)
1585 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
1588 static void kill_f2fs_super(struct super_block
*sb
)
1591 set_sbi_flag(F2FS_SB(sb
), SBI_IS_CLOSE
);
1592 kill_block_super(sb
);
1595 static struct file_system_type f2fs_fs_type
= {
1596 .owner
= THIS_MODULE
,
1598 .mount
= f2fs_mount
,
1599 .kill_sb
= kill_f2fs_super
,
1600 .fs_flags
= FS_REQUIRES_DEV
,
1602 MODULE_ALIAS_FS("f2fs");
1604 static int __init
init_inodecache(void)
1606 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
1607 sizeof(struct f2fs_inode_info
), 0,
1608 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
1609 if (!f2fs_inode_cachep
)
1614 static void destroy_inodecache(void)
1617 * Make sure all delayed rcu free inodes are flushed before we
1621 kmem_cache_destroy(f2fs_inode_cachep
);
1624 static int __init
init_f2fs_fs(void)
1628 f2fs_build_trace_ios();
1630 err
= init_inodecache();
1633 err
= create_node_manager_caches();
1635 goto free_inodecache
;
1636 err
= create_segment_manager_caches();
1638 goto free_node_manager_caches
;
1639 err
= create_checkpoint_caches();
1641 goto free_segment_manager_caches
;
1642 err
= create_extent_cache();
1644 goto free_checkpoint_caches
;
1645 f2fs_kset
= kset_create_and_add("f2fs", NULL
, fs_kobj
);
1648 goto free_extent_cache
;
1650 err
= register_shrinker(&f2fs_shrinker_info
);
1654 err
= register_filesystem(&f2fs_fs_type
);
1657 err
= f2fs_create_root_stats();
1659 goto free_filesystem
;
1660 f2fs_proc_root
= proc_mkdir("fs/f2fs", NULL
);
1664 unregister_filesystem(&f2fs_fs_type
);
1666 unregister_shrinker(&f2fs_shrinker_info
);
1668 kset_unregister(f2fs_kset
);
1670 destroy_extent_cache();
1671 free_checkpoint_caches
:
1672 destroy_checkpoint_caches();
1673 free_segment_manager_caches
:
1674 destroy_segment_manager_caches();
1675 free_node_manager_caches
:
1676 destroy_node_manager_caches();
1678 destroy_inodecache();
1683 static void __exit
exit_f2fs_fs(void)
1685 remove_proc_entry("fs/f2fs", NULL
);
1686 f2fs_destroy_root_stats();
1687 unregister_shrinker(&f2fs_shrinker_info
);
1688 unregister_filesystem(&f2fs_fs_type
);
1689 destroy_extent_cache();
1690 destroy_checkpoint_caches();
1691 destroy_segment_manager_caches();
1692 destroy_node_manager_caches();
1693 destroy_inodecache();
1694 kset_unregister(f2fs_kset
);
1695 f2fs_destroy_trace_ios();
1698 module_init(init_f2fs_fs
)
1699 module_exit(exit_f2fs_fs
)
1701 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
1702 MODULE_DESCRIPTION("Flash Friendly File System");
1703 MODULE_LICENSE("GPL");