4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 #include <linux/sysfs.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
38 static struct proc_dir_entry
*f2fs_proc_root
;
39 static struct kmem_cache
*f2fs_inode_cachep
;
40 static struct kset
*f2fs_kset
;
42 #ifdef CONFIG_F2FS_FAULT_INJECTION
43 struct f2fs_fault_info f2fs_fault
;
45 char *fault_name
[FAULT_MAX
] = {
46 [FAULT_KMALLOC
] = "kmalloc",
47 [FAULT_PAGE_ALLOC
] = "page alloc",
48 [FAULT_ALLOC_NID
] = "alloc nid",
49 [FAULT_ORPHAN
] = "orphan",
50 [FAULT_BLOCK
] = "no more block",
51 [FAULT_DIR_DEPTH
] = "too big dir depth",
52 [FAULT_EVICT_INODE
] = "evict_inode fail",
55 static void f2fs_build_fault_attr(unsigned int rate
)
58 atomic_set(&f2fs_fault
.inject_ops
, 0);
59 f2fs_fault
.inject_rate
= rate
;
60 f2fs_fault
.inject_type
= (1 << FAULT_MAX
) - 1;
62 memset(&f2fs_fault
, 0, sizeof(struct f2fs_fault_info
));
67 /* f2fs-wide shrinker description */
68 static struct shrinker f2fs_shrinker_info
= {
69 .scan_objects
= f2fs_shrink_scan
,
70 .count_objects
= f2fs_shrink_count
,
71 .seeks
= DEFAULT_SEEKS
,
76 Opt_disable_roll_forward
,
86 Opt_disable_ext_identify
,
106 static match_table_t f2fs_tokens
= {
107 {Opt_gc_background
, "background_gc=%s"},
108 {Opt_disable_roll_forward
, "disable_roll_forward"},
109 {Opt_norecovery
, "norecovery"},
110 {Opt_discard
, "discard"},
111 {Opt_nodiscard
, "nodiscard"},
112 {Opt_noheap
, "no_heap"},
113 {Opt_user_xattr
, "user_xattr"},
114 {Opt_nouser_xattr
, "nouser_xattr"},
116 {Opt_noacl
, "noacl"},
117 {Opt_active_logs
, "active_logs=%u"},
118 {Opt_disable_ext_identify
, "disable_ext_identify"},
119 {Opt_inline_xattr
, "inline_xattr"},
120 {Opt_inline_data
, "inline_data"},
121 {Opt_inline_dentry
, "inline_dentry"},
122 {Opt_noinline_dentry
, "noinline_dentry"},
123 {Opt_flush_merge
, "flush_merge"},
124 {Opt_noflush_merge
, "noflush_merge"},
125 {Opt_nobarrier
, "nobarrier"},
126 {Opt_fastboot
, "fastboot"},
127 {Opt_extent_cache
, "extent_cache"},
128 {Opt_noextent_cache
, "noextent_cache"},
129 {Opt_noinline_data
, "noinline_data"},
130 {Opt_data_flush
, "data_flush"},
131 {Opt_mode
, "mode=%s"},
132 {Opt_fault_injection
, "fault_injection=%u"},
133 {Opt_lazytime
, "lazytime"},
134 {Opt_nolazytime
, "nolazytime"},
138 /* Sysfs support for f2fs */
140 GC_THREAD
, /* struct f2fs_gc_thread */
141 SM_INFO
, /* struct f2fs_sm_info */
142 NM_INFO
, /* struct f2fs_nm_info */
143 F2FS_SBI
, /* struct f2fs_sb_info */
144 #ifdef CONFIG_F2FS_FAULT_INJECTION
145 FAULT_INFO_RATE
, /* struct f2fs_fault_info */
146 FAULT_INFO_TYPE
, /* struct f2fs_fault_info */
151 struct attribute attr
;
152 ssize_t (*show
)(struct f2fs_attr
*, struct f2fs_sb_info
*, char *);
153 ssize_t (*store
)(struct f2fs_attr
*, struct f2fs_sb_info
*,
154 const char *, size_t);
159 static unsigned char *__struct_ptr(struct f2fs_sb_info
*sbi
, int struct_type
)
161 if (struct_type
== GC_THREAD
)
162 return (unsigned char *)sbi
->gc_thread
;
163 else if (struct_type
== SM_INFO
)
164 return (unsigned char *)SM_I(sbi
);
165 else if (struct_type
== NM_INFO
)
166 return (unsigned char *)NM_I(sbi
);
167 else if (struct_type
== F2FS_SBI
)
168 return (unsigned char *)sbi
;
169 #ifdef CONFIG_F2FS_FAULT_INJECTION
170 else if (struct_type
== FAULT_INFO_RATE
||
171 struct_type
== FAULT_INFO_TYPE
)
172 return (unsigned char *)&f2fs_fault
;
177 static ssize_t
lifetime_write_kbytes_show(struct f2fs_attr
*a
,
178 struct f2fs_sb_info
*sbi
, char *buf
)
180 struct super_block
*sb
= sbi
->sb
;
182 if (!sb
->s_bdev
->bd_part
)
183 return snprintf(buf
, PAGE_SIZE
, "0\n");
185 return snprintf(buf
, PAGE_SIZE
, "%llu\n",
186 (unsigned long long)(sbi
->kbytes_written
+
187 BD_PART_WRITTEN(sbi
)));
190 static ssize_t
f2fs_sbi_show(struct f2fs_attr
*a
,
191 struct f2fs_sb_info
*sbi
, char *buf
)
193 unsigned char *ptr
= NULL
;
196 ptr
= __struct_ptr(sbi
, a
->struct_type
);
200 ui
= (unsigned int *)(ptr
+ a
->offset
);
202 return snprintf(buf
, PAGE_SIZE
, "%u\n", *ui
);
205 static ssize_t
f2fs_sbi_store(struct f2fs_attr
*a
,
206 struct f2fs_sb_info
*sbi
,
207 const char *buf
, size_t count
)
214 ptr
= __struct_ptr(sbi
, a
->struct_type
);
218 ui
= (unsigned int *)(ptr
+ a
->offset
);
220 ret
= kstrtoul(skip_spaces(buf
), 0, &t
);
223 #ifdef CONFIG_F2FS_FAULT_INJECTION
224 if (a
->struct_type
== FAULT_INFO_TYPE
&& t
>= (1 << FAULT_MAX
))
231 static ssize_t
f2fs_attr_show(struct kobject
*kobj
,
232 struct attribute
*attr
, char *buf
)
234 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
236 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
238 return a
->show
? a
->show(a
, sbi
, buf
) : 0;
241 static ssize_t
f2fs_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
242 const char *buf
, size_t len
)
244 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
246 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
248 return a
->store
? a
->store(a
, sbi
, buf
, len
) : 0;
251 static void f2fs_sb_release(struct kobject
*kobj
)
253 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
255 complete(&sbi
->s_kobj_unregister
);
258 #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
259 static struct f2fs_attr f2fs_attr_##_name = { \
260 .attr = {.name = __stringify(_name), .mode = _mode }, \
263 .struct_type = _struct_type, \
267 #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
268 F2FS_ATTR_OFFSET(struct_type, name, 0644, \
269 f2fs_sbi_show, f2fs_sbi_store, \
270 offsetof(struct struct_name, elname))
272 #define F2FS_GENERAL_RO_ATTR(name) \
273 static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
275 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_min_sleep_time
, min_sleep_time
);
276 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_max_sleep_time
, max_sleep_time
);
277 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_no_gc_sleep_time
, no_gc_sleep_time
);
278 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_idle
, gc_idle
);
279 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, reclaim_segments
, rec_prefree_segments
);
280 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, max_small_discards
, max_discards
);
281 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, batched_trim_sections
, trim_sections
);
282 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, ipu_policy
, ipu_policy
);
283 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_ipu_util
, min_ipu_util
);
284 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_fsync_blocks
, min_fsync_blocks
);
285 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, ram_thresh
, ram_thresh
);
286 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, ra_nid_pages
, ra_nid_pages
);
287 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, dirty_nats_ratio
, dirty_nats_ratio
);
288 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, max_victim_search
, max_victim_search
);
289 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, dir_level
, dir_level
);
290 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, cp_interval
, interval_time
[CP_TIME
]);
291 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, idle_interval
, interval_time
[REQ_TIME
]);
292 #ifdef CONFIG_F2FS_FAULT_INJECTION
293 F2FS_RW_ATTR(FAULT_INFO_RATE
, f2fs_fault_info
, inject_rate
, inject_rate
);
294 F2FS_RW_ATTR(FAULT_INFO_TYPE
, f2fs_fault_info
, inject_type
, inject_type
);
296 F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes
);
298 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
299 static struct attribute
*f2fs_attrs
[] = {
300 ATTR_LIST(gc_min_sleep_time
),
301 ATTR_LIST(gc_max_sleep_time
),
302 ATTR_LIST(gc_no_gc_sleep_time
),
304 ATTR_LIST(reclaim_segments
),
305 ATTR_LIST(max_small_discards
),
306 ATTR_LIST(batched_trim_sections
),
307 ATTR_LIST(ipu_policy
),
308 ATTR_LIST(min_ipu_util
),
309 ATTR_LIST(min_fsync_blocks
),
310 ATTR_LIST(max_victim_search
),
311 ATTR_LIST(dir_level
),
312 ATTR_LIST(ram_thresh
),
313 ATTR_LIST(ra_nid_pages
),
314 ATTR_LIST(dirty_nats_ratio
),
315 ATTR_LIST(cp_interval
),
316 ATTR_LIST(idle_interval
),
317 ATTR_LIST(lifetime_write_kbytes
),
321 static const struct sysfs_ops f2fs_attr_ops
= {
322 .show
= f2fs_attr_show
,
323 .store
= f2fs_attr_store
,
326 static struct kobj_type f2fs_ktype
= {
327 .default_attrs
= f2fs_attrs
,
328 .sysfs_ops
= &f2fs_attr_ops
,
329 .release
= f2fs_sb_release
,
332 #ifdef CONFIG_F2FS_FAULT_INJECTION
333 /* sysfs for f2fs fault injection */
334 static struct kobject f2fs_fault_inject
;
336 static struct attribute
*f2fs_fault_attrs
[] = {
337 ATTR_LIST(inject_rate
),
338 ATTR_LIST(inject_type
),
342 static struct kobj_type f2fs_fault_ktype
= {
343 .default_attrs
= f2fs_fault_attrs
,
344 .sysfs_ops
= &f2fs_attr_ops
,
348 void f2fs_msg(struct super_block
*sb
, const char *level
, const char *fmt
, ...)
350 struct va_format vaf
;
356 printk("%sF2FS-fs (%s): %pV\n", level
, sb
->s_id
, &vaf
);
360 static void init_once(void *foo
)
362 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
364 inode_init_once(&fi
->vfs_inode
);
367 static int parse_options(struct super_block
*sb
, char *options
)
369 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
370 struct request_queue
*q
;
371 substring_t args
[MAX_OPT_ARGS
];
375 #ifdef CONFIG_F2FS_FAULT_INJECTION
376 f2fs_build_fault_attr(0);
382 while ((p
= strsep(&options
, ",")) != NULL
) {
387 * Initialize args struct so we know whether arg was
388 * found; some options take optional arguments.
390 args
[0].to
= args
[0].from
= NULL
;
391 token
= match_token(p
, f2fs_tokens
, args
);
394 case Opt_gc_background
:
395 name
= match_strdup(&args
[0]);
399 if (strlen(name
) == 2 && !strncmp(name
, "on", 2)) {
401 clear_opt(sbi
, FORCE_FG_GC
);
402 } else if (strlen(name
) == 3 && !strncmp(name
, "off", 3)) {
403 clear_opt(sbi
, BG_GC
);
404 clear_opt(sbi
, FORCE_FG_GC
);
405 } else if (strlen(name
) == 4 && !strncmp(name
, "sync", 4)) {
407 set_opt(sbi
, FORCE_FG_GC
);
414 case Opt_disable_roll_forward
:
415 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
418 /* this option mounts f2fs with ro */
419 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
420 if (!f2fs_readonly(sb
))
424 q
= bdev_get_queue(sb
->s_bdev
);
425 if (blk_queue_discard(q
)) {
426 set_opt(sbi
, DISCARD
);
428 f2fs_msg(sb
, KERN_WARNING
,
429 "mounting with \"discard\" option, but "
430 "the device does not support discard");
434 clear_opt(sbi
, DISCARD
);
436 set_opt(sbi
, NOHEAP
);
438 #ifdef CONFIG_F2FS_FS_XATTR
440 set_opt(sbi
, XATTR_USER
);
442 case Opt_nouser_xattr
:
443 clear_opt(sbi
, XATTR_USER
);
445 case Opt_inline_xattr
:
446 set_opt(sbi
, INLINE_XATTR
);
450 f2fs_msg(sb
, KERN_INFO
,
451 "user_xattr options not supported");
453 case Opt_nouser_xattr
:
454 f2fs_msg(sb
, KERN_INFO
,
455 "nouser_xattr options not supported");
457 case Opt_inline_xattr
:
458 f2fs_msg(sb
, KERN_INFO
,
459 "inline_xattr options not supported");
462 #ifdef CONFIG_F2FS_FS_POSIX_ACL
464 set_opt(sbi
, POSIX_ACL
);
467 clear_opt(sbi
, POSIX_ACL
);
471 f2fs_msg(sb
, KERN_INFO
, "acl options not supported");
474 f2fs_msg(sb
, KERN_INFO
, "noacl options not supported");
477 case Opt_active_logs
:
478 if (args
->from
&& match_int(args
, &arg
))
480 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
482 sbi
->active_logs
= arg
;
484 case Opt_disable_ext_identify
:
485 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
487 case Opt_inline_data
:
488 set_opt(sbi
, INLINE_DATA
);
490 case Opt_inline_dentry
:
491 set_opt(sbi
, INLINE_DENTRY
);
493 case Opt_noinline_dentry
:
494 clear_opt(sbi
, INLINE_DENTRY
);
496 case Opt_flush_merge
:
497 set_opt(sbi
, FLUSH_MERGE
);
499 case Opt_noflush_merge
:
500 clear_opt(sbi
, FLUSH_MERGE
);
503 set_opt(sbi
, NOBARRIER
);
506 set_opt(sbi
, FASTBOOT
);
508 case Opt_extent_cache
:
509 set_opt(sbi
, EXTENT_CACHE
);
511 case Opt_noextent_cache
:
512 clear_opt(sbi
, EXTENT_CACHE
);
514 case Opt_noinline_data
:
515 clear_opt(sbi
, INLINE_DATA
);
518 set_opt(sbi
, DATA_FLUSH
);
521 name
= match_strdup(&args
[0]);
525 if (strlen(name
) == 8 &&
526 !strncmp(name
, "adaptive", 8)) {
527 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
528 } else if (strlen(name
) == 3 &&
529 !strncmp(name
, "lfs", 3)) {
530 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
537 case Opt_fault_injection
:
538 if (args
->from
&& match_int(args
, &arg
))
540 #ifdef CONFIG_F2FS_FAULT_INJECTION
541 f2fs_build_fault_attr(arg
);
543 f2fs_msg(sb
, KERN_INFO
,
544 "FAULT_INJECTION was not selected");
548 sb
->s_flags
|= MS_LAZYTIME
;
551 sb
->s_flags
&= ~MS_LAZYTIME
;
554 f2fs_msg(sb
, KERN_ERR
,
555 "Unrecognized mount option \"%s\" or missing value",
563 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
565 struct f2fs_inode_info
*fi
;
567 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
571 init_once((void *) fi
);
573 if (percpu_counter_init(&fi
->dirty_pages
, 0, GFP_NOFS
)) {
574 kmem_cache_free(f2fs_inode_cachep
, fi
);
578 /* Initialize f2fs-specific inode info */
579 fi
->vfs_inode
.i_version
= 1;
580 fi
->i_current_depth
= 1;
582 init_rwsem(&fi
->i_sem
);
583 INIT_LIST_HEAD(&fi
->dirty_list
);
584 INIT_LIST_HEAD(&fi
->gdirty_list
);
585 INIT_LIST_HEAD(&fi
->inmem_pages
);
586 mutex_init(&fi
->inmem_lock
);
587 init_rwsem(&fi
->dio_rwsem
[READ
]);
588 init_rwsem(&fi
->dio_rwsem
[WRITE
]);
590 /* Will be used by directory only */
591 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
592 return &fi
->vfs_inode
;
595 static int f2fs_drop_inode(struct inode
*inode
)
598 * This is to avoid a deadlock condition like below.
599 * writeback_single_inode(inode)
600 * - f2fs_write_data_page
601 * - f2fs_gc -> iput -> evict
602 * - inode_wait_for_writeback(inode)
604 if ((!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)) {
605 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
606 /* to avoid evict_inode call simultaneously */
607 atomic_inc(&inode
->i_count
);
608 spin_unlock(&inode
->i_lock
);
610 /* some remained atomic pages should discarded */
611 if (f2fs_is_atomic_file(inode
))
612 drop_inmem_pages(inode
);
614 /* should remain fi->extent_tree for writepage */
615 f2fs_destroy_extent_node(inode
);
617 sb_start_intwrite(inode
->i_sb
);
618 f2fs_i_size_write(inode
, 0);
620 if (F2FS_HAS_BLOCKS(inode
))
621 f2fs_truncate(inode
);
623 sb_end_intwrite(inode
->i_sb
);
625 fscrypt_put_encryption_info(inode
, NULL
);
626 spin_lock(&inode
->i_lock
);
627 atomic_dec(&inode
->i_count
);
632 return generic_drop_inode(inode
);
635 int f2fs_inode_dirtied(struct inode
*inode
)
637 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
639 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
640 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
641 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
645 set_inode_flag(inode
, FI_DIRTY_INODE
);
646 list_add_tail(&F2FS_I(inode
)->gdirty_list
,
647 &sbi
->inode_list
[DIRTY_META
]);
648 inc_page_count(sbi
, F2FS_DIRTY_IMETA
);
649 stat_inc_dirty_inode(sbi
, DIRTY_META
);
650 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
655 void f2fs_inode_synced(struct inode
*inode
)
657 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
659 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
660 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
661 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
664 list_del_init(&F2FS_I(inode
)->gdirty_list
);
665 clear_inode_flag(inode
, FI_DIRTY_INODE
);
666 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
667 dec_page_count(sbi
, F2FS_DIRTY_IMETA
);
668 stat_dec_dirty_inode(F2FS_I_SB(inode
), DIRTY_META
);
669 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
673 * f2fs_dirty_inode() is called from __mark_inode_dirty()
675 * We should call set_dirty_inode to write the dirty inode through write_inode.
677 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
679 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
681 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
682 inode
->i_ino
== F2FS_META_INO(sbi
))
685 if (flags
== I_DIRTY_TIME
)
688 if (is_inode_flag_set(inode
, FI_AUTO_RECOVER
))
689 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
691 f2fs_inode_dirtied(inode
);
694 static void f2fs_i_callback(struct rcu_head
*head
)
696 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
697 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
700 static void f2fs_destroy_inode(struct inode
*inode
)
702 percpu_counter_destroy(&F2FS_I(inode
)->dirty_pages
);
703 call_rcu(&inode
->i_rcu
, f2fs_i_callback
);
706 static void destroy_percpu_info(struct f2fs_sb_info
*sbi
)
710 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
711 percpu_counter_destroy(&sbi
->nr_pages
[i
]);
712 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
713 percpu_counter_destroy(&sbi
->total_valid_inode_count
);
716 static void f2fs_put_super(struct super_block
*sb
)
718 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
721 remove_proc_entry("segment_info", sbi
->s_proc
);
722 remove_proc_entry("segment_bits", sbi
->s_proc
);
723 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
725 kobject_del(&sbi
->s_kobj
);
729 /* prevent remaining shrinker jobs */
730 mutex_lock(&sbi
->umount_mutex
);
733 * We don't need to do checkpoint when superblock is clean.
734 * But, the previous checkpoint was not done by umount, it needs to do
735 * clean checkpoint again.
737 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
738 !is_set_ckpt_flags(F2FS_CKPT(sbi
), CP_UMOUNT_FLAG
)) {
739 struct cp_control cpc
= {
742 write_checkpoint(sbi
, &cpc
);
745 /* write_checkpoint can update stat informaion */
746 f2fs_destroy_stats(sbi
);
749 * normally superblock is clean, so we need to release this.
750 * In addition, EIO will skip do checkpoint, we need this as well.
752 release_ino_entry(sbi
, true);
753 release_discard_addrs(sbi
);
755 f2fs_leave_shrinker(sbi
);
756 mutex_unlock(&sbi
->umount_mutex
);
758 /* our cp_error case, we can wait for any writeback page */
759 f2fs_flush_merged_bios(sbi
);
761 iput(sbi
->node_inode
);
762 iput(sbi
->meta_inode
);
764 /* destroy f2fs internal modules */
765 destroy_node_manager(sbi
);
766 destroy_segment_manager(sbi
);
769 kobject_put(&sbi
->s_kobj
);
770 wait_for_completion(&sbi
->s_kobj_unregister
);
772 sb
->s_fs_info
= NULL
;
773 if (sbi
->s_chksum_driver
)
774 crypto_free_shash(sbi
->s_chksum_driver
);
775 kfree(sbi
->raw_super
);
777 destroy_percpu_info(sbi
);
781 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
783 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
786 trace_f2fs_sync_fs(sb
, sync
);
789 struct cp_control cpc
;
791 cpc
.reason
= __get_cp_reason(sbi
);
793 mutex_lock(&sbi
->gc_mutex
);
794 err
= write_checkpoint(sbi
, &cpc
);
795 mutex_unlock(&sbi
->gc_mutex
);
797 f2fs_trace_ios(NULL
, 1);
802 static int f2fs_freeze(struct super_block
*sb
)
806 if (f2fs_readonly(sb
))
809 err
= f2fs_sync_fs(sb
, 1);
813 static int f2fs_unfreeze(struct super_block
*sb
)
818 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
820 struct super_block
*sb
= dentry
->d_sb
;
821 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
822 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
823 block_t total_count
, user_block_count
, start_count
, ovp_count
;
825 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
826 user_block_count
= sbi
->user_block_count
;
827 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
828 ovp_count
= SM_I(sbi
)->ovp_segments
<< sbi
->log_blocks_per_seg
;
829 buf
->f_type
= F2FS_SUPER_MAGIC
;
830 buf
->f_bsize
= sbi
->blocksize
;
832 buf
->f_blocks
= total_count
- start_count
;
833 buf
->f_bfree
= user_block_count
- valid_user_blocks(sbi
) + ovp_count
;
834 buf
->f_bavail
= user_block_count
- valid_user_blocks(sbi
);
836 buf
->f_files
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
837 buf
->f_ffree
= buf
->f_files
- valid_inode_count(sbi
);
839 buf
->f_namelen
= F2FS_NAME_LEN
;
840 buf
->f_fsid
.val
[0] = (u32
)id
;
841 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
846 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
848 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
850 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, BG_GC
)) {
851 if (test_opt(sbi
, FORCE_FG_GC
))
852 seq_printf(seq
, ",background_gc=%s", "sync");
854 seq_printf(seq
, ",background_gc=%s", "on");
856 seq_printf(seq
, ",background_gc=%s", "off");
858 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
859 seq_puts(seq
, ",disable_roll_forward");
860 if (test_opt(sbi
, DISCARD
))
861 seq_puts(seq
, ",discard");
862 if (test_opt(sbi
, NOHEAP
))
863 seq_puts(seq
, ",no_heap_alloc");
864 #ifdef CONFIG_F2FS_FS_XATTR
865 if (test_opt(sbi
, XATTR_USER
))
866 seq_puts(seq
, ",user_xattr");
868 seq_puts(seq
, ",nouser_xattr");
869 if (test_opt(sbi
, INLINE_XATTR
))
870 seq_puts(seq
, ",inline_xattr");
872 #ifdef CONFIG_F2FS_FS_POSIX_ACL
873 if (test_opt(sbi
, POSIX_ACL
))
874 seq_puts(seq
, ",acl");
876 seq_puts(seq
, ",noacl");
878 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
879 seq_puts(seq
, ",disable_ext_identify");
880 if (test_opt(sbi
, INLINE_DATA
))
881 seq_puts(seq
, ",inline_data");
883 seq_puts(seq
, ",noinline_data");
884 if (test_opt(sbi
, INLINE_DENTRY
))
885 seq_puts(seq
, ",inline_dentry");
887 seq_puts(seq
, ",noinline_dentry");
888 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
889 seq_puts(seq
, ",flush_merge");
890 if (test_opt(sbi
, NOBARRIER
))
891 seq_puts(seq
, ",nobarrier");
892 if (test_opt(sbi
, FASTBOOT
))
893 seq_puts(seq
, ",fastboot");
894 if (test_opt(sbi
, EXTENT_CACHE
))
895 seq_puts(seq
, ",extent_cache");
897 seq_puts(seq
, ",noextent_cache");
898 if (test_opt(sbi
, DATA_FLUSH
))
899 seq_puts(seq
, ",data_flush");
901 seq_puts(seq
, ",mode=");
902 if (test_opt(sbi
, ADAPTIVE
))
903 seq_puts(seq
, "adaptive");
904 else if (test_opt(sbi
, LFS
))
905 seq_puts(seq
, "lfs");
906 seq_printf(seq
, ",active_logs=%u", sbi
->active_logs
);
911 static int segment_info_seq_show(struct seq_file
*seq
, void *offset
)
913 struct super_block
*sb
= seq
->private;
914 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
915 unsigned int total_segs
=
916 le32_to_cpu(sbi
->raw_super
->segment_count_main
);
919 seq_puts(seq
, "format: segment_type|valid_blocks\n"
920 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
922 for (i
= 0; i
< total_segs
; i
++) {
923 struct seg_entry
*se
= get_seg_entry(sbi
, i
);
926 seq_printf(seq
, "%-10d", i
);
927 seq_printf(seq
, "%d|%-3u", se
->type
,
928 get_valid_blocks(sbi
, i
, 1));
929 if ((i
% 10) == 9 || i
== (total_segs
- 1))
938 static int segment_bits_seq_show(struct seq_file
*seq
, void *offset
)
940 struct super_block
*sb
= seq
->private;
941 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
942 unsigned int total_segs
=
943 le32_to_cpu(sbi
->raw_super
->segment_count_main
);
946 seq_puts(seq
, "format: segment_type|valid_blocks|bitmaps\n"
947 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
949 for (i
= 0; i
< total_segs
; i
++) {
950 struct seg_entry
*se
= get_seg_entry(sbi
, i
);
952 seq_printf(seq
, "%-10d", i
);
953 seq_printf(seq
, "%d|%-3u|", se
->type
,
954 get_valid_blocks(sbi
, i
, 1));
955 for (j
= 0; j
< SIT_VBLOCK_MAP_SIZE
; j
++)
956 seq_printf(seq
, "%x ", se
->cur_valid_map
[j
]);
962 #define F2FS_PROC_FILE_DEF(_name) \
963 static int _name##_open_fs(struct inode *inode, struct file *file) \
965 return single_open(file, _name##_seq_show, PDE_DATA(inode)); \
968 static const struct file_operations f2fs_seq_##_name##_fops = { \
969 .open = _name##_open_fs, \
971 .llseek = seq_lseek, \
972 .release = single_release, \
975 F2FS_PROC_FILE_DEF(segment_info
);
976 F2FS_PROC_FILE_DEF(segment_bits
);
978 static void default_options(struct f2fs_sb_info
*sbi
)
980 /* init some FS parameters */
981 sbi
->active_logs
= NR_CURSEG_TYPE
;
984 set_opt(sbi
, INLINE_DATA
);
985 set_opt(sbi
, INLINE_DENTRY
);
986 set_opt(sbi
, EXTENT_CACHE
);
987 sbi
->sb
->s_flags
|= MS_LAZYTIME
;
988 set_opt(sbi
, FLUSH_MERGE
);
989 if (f2fs_sb_mounted_hmsmr(sbi
->sb
)) {
990 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
991 set_opt(sbi
, DISCARD
);
993 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
996 #ifdef CONFIG_F2FS_FS_XATTR
997 set_opt(sbi
, XATTR_USER
);
999 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1000 set_opt(sbi
, POSIX_ACL
);
1004 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
1006 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1007 struct f2fs_mount_info org_mount_opt
;
1008 int err
, active_logs
;
1009 bool need_restart_gc
= false;
1010 bool need_stop_gc
= false;
1011 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
1014 * Save the old mount options in case we
1015 * need to restore them.
1017 org_mount_opt
= sbi
->mount_opt
;
1018 active_logs
= sbi
->active_logs
;
1020 /* recover superblocks we couldn't write due to previous RO mount */
1021 if (!(*flags
& MS_RDONLY
) && is_sbi_flag_set(sbi
, SBI_NEED_SB_WRITE
)) {
1022 err
= f2fs_commit_super(sbi
, false);
1023 f2fs_msg(sb
, KERN_INFO
,
1024 "Try to recover all the superblocks, ret: %d", err
);
1026 clear_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1029 sbi
->mount_opt
.opt
= 0;
1030 default_options(sbi
);
1032 /* parse mount options */
1033 err
= parse_options(sb
, data
);
1038 * Previous and new state of filesystem is RO,
1039 * so skip checking GC and FLUSH_MERGE conditions.
1041 if (f2fs_readonly(sb
) && (*flags
& MS_RDONLY
))
1044 /* disallow enable/disable extent_cache dynamically */
1045 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
1047 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1048 "switch extent_cache option is not allowed");
1053 * We stop the GC thread if FS is mounted as RO
1054 * or if background_gc = off is passed in mount
1055 * option. Also sync the filesystem.
1057 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
1058 if (sbi
->gc_thread
) {
1059 stop_gc_thread(sbi
);
1060 need_restart_gc
= true;
1062 } else if (!sbi
->gc_thread
) {
1063 err
= start_gc_thread(sbi
);
1066 need_stop_gc
= true;
1069 if (*flags
& MS_RDONLY
) {
1070 writeback_inodes_sb(sb
, WB_REASON_SYNC
);
1073 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1074 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
1075 f2fs_sync_fs(sb
, 1);
1076 clear_sbi_flag(sbi
, SBI_IS_CLOSE
);
1080 * We stop issue flush thread if FS is mounted as RO
1081 * or if flush_merge is not passed in mount option.
1083 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
1084 destroy_flush_cmd_control(sbi
);
1085 } else if (!SM_I(sbi
)->cmd_control_info
) {
1086 err
= create_flush_cmd_control(sbi
);
1091 /* Update the POSIXACL Flag */
1092 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
1093 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
1097 if (need_restart_gc
) {
1098 if (start_gc_thread(sbi
))
1099 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1100 "background gc thread has stopped");
1101 } else if (need_stop_gc
) {
1102 stop_gc_thread(sbi
);
1105 sbi
->mount_opt
= org_mount_opt
;
1106 sbi
->active_logs
= active_logs
;
1110 static struct super_operations f2fs_sops
= {
1111 .alloc_inode
= f2fs_alloc_inode
,
1112 .drop_inode
= f2fs_drop_inode
,
1113 .destroy_inode
= f2fs_destroy_inode
,
1114 .write_inode
= f2fs_write_inode
,
1115 .dirty_inode
= f2fs_dirty_inode
,
1116 .show_options
= f2fs_show_options
,
1117 .evict_inode
= f2fs_evict_inode
,
1118 .put_super
= f2fs_put_super
,
1119 .sync_fs
= f2fs_sync_fs
,
1120 .freeze_fs
= f2fs_freeze
,
1121 .unfreeze_fs
= f2fs_unfreeze
,
1122 .statfs
= f2fs_statfs
,
1123 .remount_fs
= f2fs_remount
,
1126 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1127 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
1129 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1130 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1134 static int f2fs_key_prefix(struct inode
*inode
, u8
**key
)
1136 *key
= F2FS_I_SB(inode
)->key_prefix
;
1137 return F2FS_I_SB(inode
)->key_prefix_size
;
1140 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
1143 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1144 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1145 ctx
, len
, fs_data
, XATTR_CREATE
);
1148 static unsigned f2fs_max_namelen(struct inode
*inode
)
1150 return S_ISLNK(inode
->i_mode
) ?
1151 inode
->i_sb
->s_blocksize
: F2FS_NAME_LEN
;
1154 static struct fscrypt_operations f2fs_cryptops
= {
1155 .get_context
= f2fs_get_context
,
1156 .key_prefix
= f2fs_key_prefix
,
1157 .set_context
= f2fs_set_context
,
1158 .is_encrypted
= f2fs_encrypted_inode
,
1159 .empty_dir
= f2fs_empty_dir
,
1160 .max_namelen
= f2fs_max_namelen
,
1163 static struct fscrypt_operations f2fs_cryptops
= {
1164 .is_encrypted
= f2fs_encrypted_inode
,
1168 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
1169 u64 ino
, u32 generation
)
1171 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1172 struct inode
*inode
;
1174 if (check_nid_range(sbi
, ino
))
1175 return ERR_PTR(-ESTALE
);
1178 * f2fs_iget isn't quite right if the inode is currently unallocated!
1179 * However f2fs_iget currently does appropriate checks to handle stale
1180 * inodes so everything is OK.
1182 inode
= f2fs_iget(sb
, ino
);
1184 return ERR_CAST(inode
);
1185 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
1186 /* we didn't find the right inode.. */
1188 return ERR_PTR(-ESTALE
);
1193 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
1194 int fh_len
, int fh_type
)
1196 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
1197 f2fs_nfs_get_inode
);
1200 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
1201 int fh_len
, int fh_type
)
1203 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
1204 f2fs_nfs_get_inode
);
1207 static const struct export_operations f2fs_export_ops
= {
1208 .fh_to_dentry
= f2fs_fh_to_dentry
,
1209 .fh_to_parent
= f2fs_fh_to_parent
,
1210 .get_parent
= f2fs_get_parent
,
1213 static loff_t
max_file_blocks(void)
1215 loff_t result
= (DEF_ADDRS_PER_INODE
- F2FS_INLINE_XATTR_ADDRS
);
1216 loff_t leaf_count
= ADDRS_PER_BLOCK
;
1218 /* two direct node blocks */
1219 result
+= (leaf_count
* 2);
1221 /* two indirect node blocks */
1222 leaf_count
*= NIDS_PER_BLOCK
;
1223 result
+= (leaf_count
* 2);
1225 /* one double indirect node block */
1226 leaf_count
*= NIDS_PER_BLOCK
;
1227 result
+= leaf_count
;
1232 static int __f2fs_commit_super(struct buffer_head
*bh
,
1233 struct f2fs_super_block
*super
)
1237 memcpy(bh
->b_data
+ F2FS_SUPER_OFFSET
, super
, sizeof(*super
));
1238 set_buffer_uptodate(bh
);
1239 set_buffer_dirty(bh
);
1242 /* it's rare case, we can do fua all the time */
1243 return __sync_dirty_buffer(bh
, WRITE_FLUSH_FUA
);
1246 static inline bool sanity_check_area_boundary(struct f2fs_sb_info
*sbi
,
1247 struct buffer_head
*bh
)
1249 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
1250 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
1251 struct super_block
*sb
= sbi
->sb
;
1252 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
1253 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
1254 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
1255 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
1256 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
1257 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
1258 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1259 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
1260 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
1261 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
1262 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
1263 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
1264 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1265 u64 main_end_blkaddr
= main_blkaddr
+
1266 (segment_count_main
<< log_blocks_per_seg
);
1267 u64 seg_end_blkaddr
= segment0_blkaddr
+
1268 (segment_count
<< log_blocks_per_seg
);
1270 if (segment0_blkaddr
!= cp_blkaddr
) {
1271 f2fs_msg(sb
, KERN_INFO
,
1272 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1273 segment0_blkaddr
, cp_blkaddr
);
1277 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
1279 f2fs_msg(sb
, KERN_INFO
,
1280 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1281 cp_blkaddr
, sit_blkaddr
,
1282 segment_count_ckpt
<< log_blocks_per_seg
);
1286 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
1288 f2fs_msg(sb
, KERN_INFO
,
1289 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1290 sit_blkaddr
, nat_blkaddr
,
1291 segment_count_sit
<< log_blocks_per_seg
);
1295 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
1297 f2fs_msg(sb
, KERN_INFO
,
1298 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1299 nat_blkaddr
, ssa_blkaddr
,
1300 segment_count_nat
<< log_blocks_per_seg
);
1304 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
1306 f2fs_msg(sb
, KERN_INFO
,
1307 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1308 ssa_blkaddr
, main_blkaddr
,
1309 segment_count_ssa
<< log_blocks_per_seg
);
1313 if (main_end_blkaddr
> seg_end_blkaddr
) {
1314 f2fs_msg(sb
, KERN_INFO
,
1315 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
1318 (segment_count
<< log_blocks_per_seg
),
1319 segment_count_main
<< log_blocks_per_seg
);
1321 } else if (main_end_blkaddr
< seg_end_blkaddr
) {
1325 /* fix in-memory information all the time */
1326 raw_super
->segment_count
= cpu_to_le32((main_end_blkaddr
-
1327 segment0_blkaddr
) >> log_blocks_per_seg
);
1329 if (f2fs_readonly(sb
) || bdev_read_only(sb
->s_bdev
)) {
1330 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1333 err
= __f2fs_commit_super(bh
, NULL
);
1334 res
= err
? "failed" : "done";
1336 f2fs_msg(sb
, KERN_INFO
,
1337 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1340 (segment_count
<< log_blocks_per_seg
),
1341 segment_count_main
<< log_blocks_per_seg
);
1348 static int sanity_check_raw_super(struct f2fs_sb_info
*sbi
,
1349 struct buffer_head
*bh
)
1351 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
1352 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
1353 struct super_block
*sb
= sbi
->sb
;
1354 unsigned int blocksize
;
1356 if (F2FS_SUPER_MAGIC
!= le32_to_cpu(raw_super
->magic
)) {
1357 f2fs_msg(sb
, KERN_INFO
,
1358 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1359 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
1363 /* Currently, support only 4KB page cache size */
1364 if (F2FS_BLKSIZE
!= PAGE_SIZE
) {
1365 f2fs_msg(sb
, KERN_INFO
,
1366 "Invalid page_cache_size (%lu), supports only 4KB\n",
1371 /* Currently, support only 4KB block size */
1372 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
1373 if (blocksize
!= F2FS_BLKSIZE
) {
1374 f2fs_msg(sb
, KERN_INFO
,
1375 "Invalid blocksize (%u), supports only 4KB\n",
1380 /* check log blocks per segment */
1381 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
1382 f2fs_msg(sb
, KERN_INFO
,
1383 "Invalid log blocks per segment (%u)\n",
1384 le32_to_cpu(raw_super
->log_blocks_per_seg
));
1388 /* Currently, support 512/1024/2048/4096 bytes sector size */
1389 if (le32_to_cpu(raw_super
->log_sectorsize
) >
1390 F2FS_MAX_LOG_SECTOR_SIZE
||
1391 le32_to_cpu(raw_super
->log_sectorsize
) <
1392 F2FS_MIN_LOG_SECTOR_SIZE
) {
1393 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectorsize (%u)",
1394 le32_to_cpu(raw_super
->log_sectorsize
));
1397 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
1398 le32_to_cpu(raw_super
->log_sectorsize
) !=
1399 F2FS_MAX_LOG_SECTOR_SIZE
) {
1400 f2fs_msg(sb
, KERN_INFO
,
1401 "Invalid log sectors per block(%u) log sectorsize(%u)",
1402 le32_to_cpu(raw_super
->log_sectors_per_block
),
1403 le32_to_cpu(raw_super
->log_sectorsize
));
1407 /* check reserved ino info */
1408 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
1409 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
1410 le32_to_cpu(raw_super
->root_ino
) != 3) {
1411 f2fs_msg(sb
, KERN_INFO
,
1412 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1413 le32_to_cpu(raw_super
->node_ino
),
1414 le32_to_cpu(raw_super
->meta_ino
),
1415 le32_to_cpu(raw_super
->root_ino
));
1419 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1420 if (sanity_check_area_boundary(sbi
, bh
))
1426 int sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
1428 unsigned int total
, fsmeta
;
1429 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1430 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1432 total
= le32_to_cpu(raw_super
->segment_count
);
1433 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1434 fsmeta
+= le32_to_cpu(raw_super
->segment_count_sit
);
1435 fsmeta
+= le32_to_cpu(raw_super
->segment_count_nat
);
1436 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
1437 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
1439 if (unlikely(fsmeta
>= total
))
1442 if (unlikely(f2fs_cp_error(sbi
))) {
1443 f2fs_msg(sbi
->sb
, KERN_ERR
, "A bug case: need to run fsck");
1449 static void init_sb_info(struct f2fs_sb_info
*sbi
)
1451 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
1453 sbi
->log_sectors_per_block
=
1454 le32_to_cpu(raw_super
->log_sectors_per_block
);
1455 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
1456 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
1457 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1458 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
1459 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
1460 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
1461 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
1462 sbi
->total_node_count
=
1463 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
1464 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
1465 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
1466 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
1467 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
1468 sbi
->cur_victim_sec
= NULL_SECNO
;
1469 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
1471 sbi
->dir_level
= DEF_DIR_LEVEL
;
1472 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
1473 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
1474 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
1476 INIT_LIST_HEAD(&sbi
->s_list
);
1477 mutex_init(&sbi
->umount_mutex
);
1478 mutex_init(&sbi
->wio_mutex
[NODE
]);
1479 mutex_init(&sbi
->wio_mutex
[DATA
]);
1481 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1482 memcpy(sbi
->key_prefix
, F2FS_KEY_DESC_PREFIX
,
1483 F2FS_KEY_DESC_PREFIX_SIZE
);
1484 sbi
->key_prefix_size
= F2FS_KEY_DESC_PREFIX_SIZE
;
1488 static int init_percpu_info(struct f2fs_sb_info
*sbi
)
1492 for (i
= 0; i
< NR_COUNT_TYPE
; i
++) {
1493 err
= percpu_counter_init(&sbi
->nr_pages
[i
], 0, GFP_KERNEL
);
1498 err
= percpu_counter_init(&sbi
->alloc_valid_block_count
, 0, GFP_KERNEL
);
1502 return percpu_counter_init(&sbi
->total_valid_inode_count
, 0,
1507 * Read f2fs raw super block.
1508 * Because we have two copies of super block, so read both of them
1509 * to get the first valid one. If any one of them is broken, we pass
1510 * them recovery flag back to the caller.
1512 static int read_raw_super_block(struct f2fs_sb_info
*sbi
,
1513 struct f2fs_super_block
**raw_super
,
1514 int *valid_super_block
, int *recovery
)
1516 struct super_block
*sb
= sbi
->sb
;
1518 struct buffer_head
*bh
;
1519 struct f2fs_super_block
*super
;
1522 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
1526 for (block
= 0; block
< 2; block
++) {
1527 bh
= sb_bread(sb
, block
);
1529 f2fs_msg(sb
, KERN_ERR
, "Unable to read %dth superblock",
1535 /* sanity checking of raw super */
1536 if (sanity_check_raw_super(sbi
, bh
)) {
1537 f2fs_msg(sb
, KERN_ERR
,
1538 "Can't find valid F2FS filesystem in %dth superblock",
1546 memcpy(super
, bh
->b_data
+ F2FS_SUPER_OFFSET
,
1548 *valid_super_block
= block
;
1554 /* Fail to read any one of the superblocks*/
1558 /* No valid superblock */
1567 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
1569 struct buffer_head
*bh
;
1572 if ((recover
&& f2fs_readonly(sbi
->sb
)) ||
1573 bdev_read_only(sbi
->sb
->s_bdev
)) {
1574 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1578 /* write back-up superblock first */
1579 bh
= sb_getblk(sbi
->sb
, sbi
->valid_super_block
? 0: 1);
1582 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
1585 /* if we are in recovery path, skip writing valid superblock */
1589 /* write current valid superblock */
1590 bh
= sb_getblk(sbi
->sb
, sbi
->valid_super_block
);
1593 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
1598 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
1600 struct f2fs_sb_info
*sbi
;
1601 struct f2fs_super_block
*raw_super
;
1604 bool retry
= true, need_fsck
= false;
1605 char *options
= NULL
;
1606 int recovery
, i
, valid_super_block
;
1607 struct curseg_info
*seg_i
;
1612 valid_super_block
= -1;
1615 /* allocate memory for f2fs-specific super block info */
1616 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
1622 /* Load the checksum driver */
1623 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32", 0, 0);
1624 if (IS_ERR(sbi
->s_chksum_driver
)) {
1625 f2fs_msg(sb
, KERN_ERR
, "Cannot load crc32 driver.");
1626 err
= PTR_ERR(sbi
->s_chksum_driver
);
1627 sbi
->s_chksum_driver
= NULL
;
1631 /* set a block size */
1632 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
1633 f2fs_msg(sb
, KERN_ERR
, "unable to set blocksize");
1637 err
= read_raw_super_block(sbi
, &raw_super
, &valid_super_block
,
1642 sb
->s_fs_info
= sbi
;
1643 sbi
->raw_super
= raw_super
;
1645 default_options(sbi
);
1646 /* parse mount options */
1647 options
= kstrdup((const char *)data
, GFP_KERNEL
);
1648 if (data
&& !options
) {
1653 err
= parse_options(sb
, options
);
1657 sbi
->max_file_blocks
= max_file_blocks();
1658 sb
->s_maxbytes
= sbi
->max_file_blocks
<<
1659 le32_to_cpu(raw_super
->log_blocksize
);
1660 sb
->s_max_links
= F2FS_LINK_MAX
;
1661 get_random_bytes(&sbi
->s_next_generation
, sizeof(u32
));
1663 sb
->s_op
= &f2fs_sops
;
1664 sb
->s_cop
= &f2fs_cryptops
;
1665 sb
->s_xattr
= f2fs_xattr_handlers
;
1666 sb
->s_export_op
= &f2fs_export_ops
;
1667 sb
->s_magic
= F2FS_SUPER_MAGIC
;
1668 sb
->s_time_gran
= 1;
1669 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
1670 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
1671 memcpy(sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
1673 /* init f2fs-specific super block info */
1674 sbi
->valid_super_block
= valid_super_block
;
1675 mutex_init(&sbi
->gc_mutex
);
1676 mutex_init(&sbi
->cp_mutex
);
1677 init_rwsem(&sbi
->node_write
);
1679 /* disallow all the data/node/meta page writes */
1680 set_sbi_flag(sbi
, SBI_POR_DOING
);
1681 spin_lock_init(&sbi
->stat_lock
);
1683 init_rwsem(&sbi
->read_io
.io_rwsem
);
1684 sbi
->read_io
.sbi
= sbi
;
1685 sbi
->read_io
.bio
= NULL
;
1686 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
1687 init_rwsem(&sbi
->write_io
[i
].io_rwsem
);
1688 sbi
->write_io
[i
].sbi
= sbi
;
1689 sbi
->write_io
[i
].bio
= NULL
;
1692 init_rwsem(&sbi
->cp_rwsem
);
1693 init_waitqueue_head(&sbi
->cp_wait
);
1696 err
= init_percpu_info(sbi
);
1700 /* get an inode for meta space */
1701 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
1702 if (IS_ERR(sbi
->meta_inode
)) {
1703 f2fs_msg(sb
, KERN_ERR
, "Failed to read F2FS meta data inode");
1704 err
= PTR_ERR(sbi
->meta_inode
);
1708 err
= get_valid_checkpoint(sbi
);
1710 f2fs_msg(sb
, KERN_ERR
, "Failed to get valid F2FS checkpoint");
1711 goto free_meta_inode
;
1714 sbi
->total_valid_node_count
=
1715 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
1716 percpu_counter_set(&sbi
->total_valid_inode_count
,
1717 le32_to_cpu(sbi
->ckpt
->valid_inode_count
));
1718 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
1719 sbi
->total_valid_block_count
=
1720 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
1721 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
1723 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
1724 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
1725 spin_lock_init(&sbi
->inode_lock
[i
]);
1728 init_extent_cache_info(sbi
);
1730 init_ino_entry_info(sbi
);
1732 /* setup f2fs internal modules */
1733 err
= build_segment_manager(sbi
);
1735 f2fs_msg(sb
, KERN_ERR
,
1736 "Failed to initialize F2FS segment manager");
1739 err
= build_node_manager(sbi
);
1741 f2fs_msg(sb
, KERN_ERR
,
1742 "Failed to initialize F2FS node manager");
1746 /* For write statistics */
1747 if (sb
->s_bdev
->bd_part
)
1748 sbi
->sectors_written_start
=
1749 (u64
)part_stat_read(sb
->s_bdev
->bd_part
, sectors
[1]);
1751 /* Read accumulated write IO statistics if exists */
1752 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
1753 if (__exist_node_summaries(sbi
))
1754 sbi
->kbytes_written
=
1755 le64_to_cpu(seg_i
->journal
->info
.kbytes_written
);
1757 build_gc_manager(sbi
);
1759 /* get an inode for node space */
1760 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
1761 if (IS_ERR(sbi
->node_inode
)) {
1762 f2fs_msg(sb
, KERN_ERR
, "Failed to read node inode");
1763 err
= PTR_ERR(sbi
->node_inode
);
1767 f2fs_join_shrinker(sbi
);
1769 /* if there are nt orphan nodes free them */
1770 err
= recover_orphan_inodes(sbi
);
1772 goto free_node_inode
;
1774 /* read root inode and dentry */
1775 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
1777 f2fs_msg(sb
, KERN_ERR
, "Failed to read root inode");
1778 err
= PTR_ERR(root
);
1779 goto free_node_inode
;
1781 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
|| !root
->i_size
) {
1784 goto free_node_inode
;
1787 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
1790 goto free_root_inode
;
1793 err
= f2fs_build_stats(sbi
);
1795 goto free_root_inode
;
1798 sbi
->s_proc
= proc_mkdir(sb
->s_id
, f2fs_proc_root
);
1801 proc_create_data("segment_info", S_IRUGO
, sbi
->s_proc
,
1802 &f2fs_seq_segment_info_fops
, sb
);
1803 proc_create_data("segment_bits", S_IRUGO
, sbi
->s_proc
,
1804 &f2fs_seq_segment_bits_fops
, sb
);
1807 sbi
->s_kobj
.kset
= f2fs_kset
;
1808 init_completion(&sbi
->s_kobj_unregister
);
1809 err
= kobject_init_and_add(&sbi
->s_kobj
, &f2fs_ktype
, NULL
,
1814 /* recover fsynced data */
1815 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
)) {
1817 * mount should be failed, when device has readonly mode, and
1818 * previous checkpoint was not done by clean system shutdown.
1820 if (bdev_read_only(sb
->s_bdev
) &&
1821 !is_set_ckpt_flags(sbi
->ckpt
, CP_UMOUNT_FLAG
)) {
1827 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
1829 err
= recover_fsync_data(sbi
, false);
1832 f2fs_msg(sb
, KERN_ERR
,
1833 "Cannot recover all fsync data errno=%d", err
);
1837 err
= recover_fsync_data(sbi
, true);
1839 if (!f2fs_readonly(sb
) && err
> 0) {
1841 f2fs_msg(sb
, KERN_ERR
,
1842 "Need to recover fsync data");
1847 /* recover_fsync_data() cleared this already */
1848 clear_sbi_flag(sbi
, SBI_POR_DOING
);
1851 * If filesystem is not mounted as read-only then
1852 * do start the gc_thread.
1854 if (test_opt(sbi
, BG_GC
) && !f2fs_readonly(sb
)) {
1855 /* After POR, we can run background GC thread.*/
1856 err
= start_gc_thread(sbi
);
1862 /* recover broken superblock */
1864 err
= f2fs_commit_super(sbi
, true);
1865 f2fs_msg(sb
, KERN_INFO
,
1866 "Try to recover %dth superblock, ret: %d",
1867 sbi
->valid_super_block
? 1 : 2, err
);
1870 f2fs_update_time(sbi
, CP_TIME
);
1871 f2fs_update_time(sbi
, REQ_TIME
);
1875 f2fs_sync_inode_meta(sbi
);
1876 kobject_del(&sbi
->s_kobj
);
1877 kobject_put(&sbi
->s_kobj
);
1878 wait_for_completion(&sbi
->s_kobj_unregister
);
1881 remove_proc_entry("segment_info", sbi
->s_proc
);
1882 remove_proc_entry("segment_bits", sbi
->s_proc
);
1883 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
1885 f2fs_destroy_stats(sbi
);
1890 mutex_lock(&sbi
->umount_mutex
);
1891 f2fs_leave_shrinker(sbi
);
1892 iput(sbi
->node_inode
);
1893 mutex_unlock(&sbi
->umount_mutex
);
1895 destroy_node_manager(sbi
);
1897 destroy_segment_manager(sbi
);
1900 make_bad_inode(sbi
->meta_inode
);
1901 iput(sbi
->meta_inode
);
1903 destroy_percpu_info(sbi
);
1908 if (sbi
->s_chksum_driver
)
1909 crypto_free_shash(sbi
->s_chksum_driver
);
1912 /* give only one another chance */
1915 shrink_dcache_sb(sb
);
1921 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
1922 const char *dev_name
, void *data
)
1924 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
1927 static void kill_f2fs_super(struct super_block
*sb
)
1930 set_sbi_flag(F2FS_SB(sb
), SBI_IS_CLOSE
);
1931 kill_block_super(sb
);
1934 static struct file_system_type f2fs_fs_type
= {
1935 .owner
= THIS_MODULE
,
1937 .mount
= f2fs_mount
,
1938 .kill_sb
= kill_f2fs_super
,
1939 .fs_flags
= FS_REQUIRES_DEV
,
1941 MODULE_ALIAS_FS("f2fs");
1943 static int __init
init_inodecache(void)
1945 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
1946 sizeof(struct f2fs_inode_info
), 0,
1947 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
1948 if (!f2fs_inode_cachep
)
1953 static void destroy_inodecache(void)
1956 * Make sure all delayed rcu free inodes are flushed before we
1960 kmem_cache_destroy(f2fs_inode_cachep
);
1963 static int __init
init_f2fs_fs(void)
1967 f2fs_build_trace_ios();
1969 err
= init_inodecache();
1972 err
= create_node_manager_caches();
1974 goto free_inodecache
;
1975 err
= create_segment_manager_caches();
1977 goto free_node_manager_caches
;
1978 err
= create_checkpoint_caches();
1980 goto free_segment_manager_caches
;
1981 err
= create_extent_cache();
1983 goto free_checkpoint_caches
;
1984 f2fs_kset
= kset_create_and_add("f2fs", NULL
, fs_kobj
);
1987 goto free_extent_cache
;
1989 #ifdef CONFIG_F2FS_FAULT_INJECTION
1990 f2fs_fault_inject
.kset
= f2fs_kset
;
1991 f2fs_build_fault_attr(0);
1992 err
= kobject_init_and_add(&f2fs_fault_inject
, &f2fs_fault_ktype
,
1993 NULL
, "fault_injection");
1995 f2fs_fault_inject
.kset
= NULL
;
1999 err
= register_shrinker(&f2fs_shrinker_info
);
2003 err
= register_filesystem(&f2fs_fs_type
);
2006 err
= f2fs_create_root_stats();
2008 goto free_filesystem
;
2009 f2fs_proc_root
= proc_mkdir("fs/f2fs", NULL
);
2013 unregister_filesystem(&f2fs_fs_type
);
2015 unregister_shrinker(&f2fs_shrinker_info
);
2017 #ifdef CONFIG_F2FS_FAULT_INJECTION
2018 if (f2fs_fault_inject
.kset
)
2019 kobject_put(&f2fs_fault_inject
);
2021 kset_unregister(f2fs_kset
);
2023 destroy_extent_cache();
2024 free_checkpoint_caches
:
2025 destroy_checkpoint_caches();
2026 free_segment_manager_caches
:
2027 destroy_segment_manager_caches();
2028 free_node_manager_caches
:
2029 destroy_node_manager_caches();
2031 destroy_inodecache();
2036 static void __exit
exit_f2fs_fs(void)
2038 remove_proc_entry("fs/f2fs", NULL
);
2039 f2fs_destroy_root_stats();
2040 unregister_filesystem(&f2fs_fs_type
);
2041 unregister_shrinker(&f2fs_shrinker_info
);
2042 #ifdef CONFIG_F2FS_FAULT_INJECTION
2043 kobject_put(&f2fs_fault_inject
);
2045 kset_unregister(f2fs_kset
);
2046 destroy_extent_cache();
2047 destroy_checkpoint_caches();
2048 destroy_segment_manager_caches();
2049 destroy_node_manager_caches();
2050 destroy_inodecache();
2051 f2fs_destroy_trace_ios();
2054 module_init(init_f2fs_fs
)
2055 module_exit(exit_f2fs_fs
)
2057 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
2058 MODULE_DESCRIPTION("Flash Friendly File System");
2059 MODULE_LICENSE("GPL");