4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/f2fs_fs.h>
26 #include <linux/sysfs.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/f2fs.h>
38 static struct proc_dir_entry
*f2fs_proc_root
;
39 static struct kmem_cache
*f2fs_inode_cachep
;
40 static struct kset
*f2fs_kset
;
42 #ifdef CONFIG_F2FS_FAULT_INJECTION
44 char *fault_name
[FAULT_MAX
] = {
45 [FAULT_KMALLOC
] = "kmalloc",
46 [FAULT_PAGE_ALLOC
] = "page alloc",
47 [FAULT_ALLOC_NID
] = "alloc nid",
48 [FAULT_ORPHAN
] = "orphan",
49 [FAULT_BLOCK
] = "no more block",
50 [FAULT_DIR_DEPTH
] = "too big dir depth",
51 [FAULT_EVICT_INODE
] = "evict_inode fail",
52 [FAULT_TRUNCATE
] = "truncate fail",
53 [FAULT_IO
] = "IO error",
54 [FAULT_CHECKPOINT
] = "checkpoint error",
57 static void f2fs_build_fault_attr(struct f2fs_sb_info
*sbi
,
60 struct f2fs_fault_info
*ffi
= &sbi
->fault_info
;
63 atomic_set(&ffi
->inject_ops
, 0);
64 ffi
->inject_rate
= rate
;
65 ffi
->inject_type
= (1 << FAULT_MAX
) - 1;
67 memset(ffi
, 0, sizeof(struct f2fs_fault_info
));
72 /* f2fs-wide shrinker description */
73 static struct shrinker f2fs_shrinker_info
= {
74 .scan_objects
= f2fs_shrink_scan
,
75 .count_objects
= f2fs_shrink_count
,
76 .seeks
= DEFAULT_SEEKS
,
81 Opt_disable_roll_forward
,
92 Opt_disable_ext_identify
,
114 static match_table_t f2fs_tokens
= {
115 {Opt_gc_background
, "background_gc=%s"},
116 {Opt_disable_roll_forward
, "disable_roll_forward"},
117 {Opt_norecovery
, "norecovery"},
118 {Opt_discard
, "discard"},
119 {Opt_nodiscard
, "nodiscard"},
120 {Opt_noheap
, "no_heap"},
122 {Opt_user_xattr
, "user_xattr"},
123 {Opt_nouser_xattr
, "nouser_xattr"},
125 {Opt_noacl
, "noacl"},
126 {Opt_active_logs
, "active_logs=%u"},
127 {Opt_disable_ext_identify
, "disable_ext_identify"},
128 {Opt_inline_xattr
, "inline_xattr"},
129 {Opt_noinline_xattr
, "noinline_xattr"},
130 {Opt_inline_data
, "inline_data"},
131 {Opt_inline_dentry
, "inline_dentry"},
132 {Opt_noinline_dentry
, "noinline_dentry"},
133 {Opt_flush_merge
, "flush_merge"},
134 {Opt_noflush_merge
, "noflush_merge"},
135 {Opt_nobarrier
, "nobarrier"},
136 {Opt_fastboot
, "fastboot"},
137 {Opt_extent_cache
, "extent_cache"},
138 {Opt_noextent_cache
, "noextent_cache"},
139 {Opt_noinline_data
, "noinline_data"},
140 {Opt_data_flush
, "data_flush"},
141 {Opt_mode
, "mode=%s"},
142 {Opt_io_size_bits
, "io_bits=%u"},
143 {Opt_fault_injection
, "fault_injection=%u"},
144 {Opt_lazytime
, "lazytime"},
145 {Opt_nolazytime
, "nolazytime"},
149 /* Sysfs support for f2fs */
151 GC_THREAD
, /* struct f2fs_gc_thread */
152 SM_INFO
, /* struct f2fs_sm_info */
153 DCC_INFO
, /* struct discard_cmd_control */
154 NM_INFO
, /* struct f2fs_nm_info */
155 F2FS_SBI
, /* struct f2fs_sb_info */
156 #ifdef CONFIG_F2FS_FAULT_INJECTION
157 FAULT_INFO_RATE
, /* struct f2fs_fault_info */
158 FAULT_INFO_TYPE
, /* struct f2fs_fault_info */
163 struct attribute attr
;
164 ssize_t (*show
)(struct f2fs_attr
*, struct f2fs_sb_info
*, char *);
165 ssize_t (*store
)(struct f2fs_attr
*, struct f2fs_sb_info
*,
166 const char *, size_t);
171 static unsigned char *__struct_ptr(struct f2fs_sb_info
*sbi
, int struct_type
)
173 if (struct_type
== GC_THREAD
)
174 return (unsigned char *)sbi
->gc_thread
;
175 else if (struct_type
== SM_INFO
)
176 return (unsigned char *)SM_I(sbi
);
177 else if (struct_type
== DCC_INFO
)
178 return (unsigned char *)SM_I(sbi
)->dcc_info
;
179 else if (struct_type
== NM_INFO
)
180 return (unsigned char *)NM_I(sbi
);
181 else if (struct_type
== F2FS_SBI
)
182 return (unsigned char *)sbi
;
183 #ifdef CONFIG_F2FS_FAULT_INJECTION
184 else if (struct_type
== FAULT_INFO_RATE
||
185 struct_type
== FAULT_INFO_TYPE
)
186 return (unsigned char *)&sbi
->fault_info
;
191 static ssize_t
lifetime_write_kbytes_show(struct f2fs_attr
*a
,
192 struct f2fs_sb_info
*sbi
, char *buf
)
194 struct super_block
*sb
= sbi
->sb
;
196 if (!sb
->s_bdev
->bd_part
)
197 return snprintf(buf
, PAGE_SIZE
, "0\n");
199 return snprintf(buf
, PAGE_SIZE
, "%llu\n",
200 (unsigned long long)(sbi
->kbytes_written
+
201 BD_PART_WRITTEN(sbi
)));
204 static ssize_t
f2fs_sbi_show(struct f2fs_attr
*a
,
205 struct f2fs_sb_info
*sbi
, char *buf
)
207 unsigned char *ptr
= NULL
;
210 ptr
= __struct_ptr(sbi
, a
->struct_type
);
214 ui
= (unsigned int *)(ptr
+ a
->offset
);
216 return snprintf(buf
, PAGE_SIZE
, "%u\n", *ui
);
219 static ssize_t
f2fs_sbi_store(struct f2fs_attr
*a
,
220 struct f2fs_sb_info
*sbi
,
221 const char *buf
, size_t count
)
228 ptr
= __struct_ptr(sbi
, a
->struct_type
);
232 ui
= (unsigned int *)(ptr
+ a
->offset
);
234 ret
= kstrtoul(skip_spaces(buf
), 0, &t
);
237 #ifdef CONFIG_F2FS_FAULT_INJECTION
238 if (a
->struct_type
== FAULT_INFO_TYPE
&& t
>= (1 << FAULT_MAX
))
245 static ssize_t
f2fs_attr_show(struct kobject
*kobj
,
246 struct attribute
*attr
, char *buf
)
248 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
250 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
252 return a
->show
? a
->show(a
, sbi
, buf
) : 0;
255 static ssize_t
f2fs_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
256 const char *buf
, size_t len
)
258 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
260 struct f2fs_attr
*a
= container_of(attr
, struct f2fs_attr
, attr
);
262 return a
->store
? a
->store(a
, sbi
, buf
, len
) : 0;
265 static void f2fs_sb_release(struct kobject
*kobj
)
267 struct f2fs_sb_info
*sbi
= container_of(kobj
, struct f2fs_sb_info
,
269 complete(&sbi
->s_kobj_unregister
);
272 #define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
273 static struct f2fs_attr f2fs_attr_##_name = { \
274 .attr = {.name = __stringify(_name), .mode = _mode }, \
277 .struct_type = _struct_type, \
281 #define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
282 F2FS_ATTR_OFFSET(struct_type, name, 0644, \
283 f2fs_sbi_show, f2fs_sbi_store, \
284 offsetof(struct struct_name, elname))
286 #define F2FS_GENERAL_RO_ATTR(name) \
287 static struct f2fs_attr f2fs_attr_##name = __ATTR(name, 0444, name##_show, NULL)
289 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_min_sleep_time
, min_sleep_time
);
290 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_max_sleep_time
, max_sleep_time
);
291 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_no_gc_sleep_time
, no_gc_sleep_time
);
292 F2FS_RW_ATTR(GC_THREAD
, f2fs_gc_kthread
, gc_idle
, gc_idle
);
293 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, reclaim_segments
, rec_prefree_segments
);
294 F2FS_RW_ATTR(DCC_INFO
, discard_cmd_control
, max_small_discards
, max_discards
);
295 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, batched_trim_sections
, trim_sections
);
296 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, ipu_policy
, ipu_policy
);
297 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_ipu_util
, min_ipu_util
);
298 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_fsync_blocks
, min_fsync_blocks
);
299 F2FS_RW_ATTR(SM_INFO
, f2fs_sm_info
, min_hot_blocks
, min_hot_blocks
);
300 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, ram_thresh
, ram_thresh
);
301 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, ra_nid_pages
, ra_nid_pages
);
302 F2FS_RW_ATTR(NM_INFO
, f2fs_nm_info
, dirty_nats_ratio
, dirty_nats_ratio
);
303 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, max_victim_search
, max_victim_search
);
304 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, dir_level
, dir_level
);
305 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, cp_interval
, interval_time
[CP_TIME
]);
306 F2FS_RW_ATTR(F2FS_SBI
, f2fs_sb_info
, idle_interval
, interval_time
[REQ_TIME
]);
307 #ifdef CONFIG_F2FS_FAULT_INJECTION
308 F2FS_RW_ATTR(FAULT_INFO_RATE
, f2fs_fault_info
, inject_rate
, inject_rate
);
309 F2FS_RW_ATTR(FAULT_INFO_TYPE
, f2fs_fault_info
, inject_type
, inject_type
);
311 F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes
);
313 #define ATTR_LIST(name) (&f2fs_attr_##name.attr)
314 static struct attribute
*f2fs_attrs
[] = {
315 ATTR_LIST(gc_min_sleep_time
),
316 ATTR_LIST(gc_max_sleep_time
),
317 ATTR_LIST(gc_no_gc_sleep_time
),
319 ATTR_LIST(reclaim_segments
),
320 ATTR_LIST(max_small_discards
),
321 ATTR_LIST(batched_trim_sections
),
322 ATTR_LIST(ipu_policy
),
323 ATTR_LIST(min_ipu_util
),
324 ATTR_LIST(min_fsync_blocks
),
325 ATTR_LIST(min_hot_blocks
),
326 ATTR_LIST(max_victim_search
),
327 ATTR_LIST(dir_level
),
328 ATTR_LIST(ram_thresh
),
329 ATTR_LIST(ra_nid_pages
),
330 ATTR_LIST(dirty_nats_ratio
),
331 ATTR_LIST(cp_interval
),
332 ATTR_LIST(idle_interval
),
333 #ifdef CONFIG_F2FS_FAULT_INJECTION
334 ATTR_LIST(inject_rate
),
335 ATTR_LIST(inject_type
),
337 ATTR_LIST(lifetime_write_kbytes
),
341 static const struct sysfs_ops f2fs_attr_ops
= {
342 .show
= f2fs_attr_show
,
343 .store
= f2fs_attr_store
,
346 static struct kobj_type f2fs_ktype
= {
347 .default_attrs
= f2fs_attrs
,
348 .sysfs_ops
= &f2fs_attr_ops
,
349 .release
= f2fs_sb_release
,
352 void f2fs_msg(struct super_block
*sb
, const char *level
, const char *fmt
, ...)
354 struct va_format vaf
;
360 printk("%sF2FS-fs (%s): %pV\n", level
, sb
->s_id
, &vaf
);
364 static void init_once(void *foo
)
366 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
368 inode_init_once(&fi
->vfs_inode
);
371 static int parse_options(struct super_block
*sb
, char *options
)
373 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
374 struct request_queue
*q
;
375 substring_t args
[MAX_OPT_ARGS
];
382 while ((p
= strsep(&options
, ",")) != NULL
) {
387 * Initialize args struct so we know whether arg was
388 * found; some options take optional arguments.
390 args
[0].to
= args
[0].from
= NULL
;
391 token
= match_token(p
, f2fs_tokens
, args
);
394 case Opt_gc_background
:
395 name
= match_strdup(&args
[0]);
399 if (strlen(name
) == 2 && !strncmp(name
, "on", 2)) {
401 clear_opt(sbi
, FORCE_FG_GC
);
402 } else if (strlen(name
) == 3 && !strncmp(name
, "off", 3)) {
403 clear_opt(sbi
, BG_GC
);
404 clear_opt(sbi
, FORCE_FG_GC
);
405 } else if (strlen(name
) == 4 && !strncmp(name
, "sync", 4)) {
407 set_opt(sbi
, FORCE_FG_GC
);
414 case Opt_disable_roll_forward
:
415 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
418 /* this option mounts f2fs with ro */
419 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
420 if (!f2fs_readonly(sb
))
424 q
= bdev_get_queue(sb
->s_bdev
);
425 if (blk_queue_discard(q
)) {
426 set_opt(sbi
, DISCARD
);
427 } else if (!f2fs_sb_mounted_blkzoned(sb
)) {
428 f2fs_msg(sb
, KERN_WARNING
,
429 "mounting with \"discard\" option, but "
430 "the device does not support discard");
434 if (f2fs_sb_mounted_blkzoned(sb
)) {
435 f2fs_msg(sb
, KERN_WARNING
,
436 "discard is required for zoned block devices");
439 clear_opt(sbi
, DISCARD
);
442 set_opt(sbi
, NOHEAP
);
445 clear_opt(sbi
, NOHEAP
);
447 #ifdef CONFIG_F2FS_FS_XATTR
449 set_opt(sbi
, XATTR_USER
);
451 case Opt_nouser_xattr
:
452 clear_opt(sbi
, XATTR_USER
);
454 case Opt_inline_xattr
:
455 set_opt(sbi
, INLINE_XATTR
);
457 case Opt_noinline_xattr
:
458 clear_opt(sbi
, INLINE_XATTR
);
462 f2fs_msg(sb
, KERN_INFO
,
463 "user_xattr options not supported");
465 case Opt_nouser_xattr
:
466 f2fs_msg(sb
, KERN_INFO
,
467 "nouser_xattr options not supported");
469 case Opt_inline_xattr
:
470 f2fs_msg(sb
, KERN_INFO
,
471 "inline_xattr options not supported");
473 case Opt_noinline_xattr
:
474 f2fs_msg(sb
, KERN_INFO
,
475 "noinline_xattr options not supported");
478 #ifdef CONFIG_F2FS_FS_POSIX_ACL
480 set_opt(sbi
, POSIX_ACL
);
483 clear_opt(sbi
, POSIX_ACL
);
487 f2fs_msg(sb
, KERN_INFO
, "acl options not supported");
490 f2fs_msg(sb
, KERN_INFO
, "noacl options not supported");
493 case Opt_active_logs
:
494 if (args
->from
&& match_int(args
, &arg
))
496 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
498 sbi
->active_logs
= arg
;
500 case Opt_disable_ext_identify
:
501 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
503 case Opt_inline_data
:
504 set_opt(sbi
, INLINE_DATA
);
506 case Opt_inline_dentry
:
507 set_opt(sbi
, INLINE_DENTRY
);
509 case Opt_noinline_dentry
:
510 clear_opt(sbi
, INLINE_DENTRY
);
512 case Opt_flush_merge
:
513 set_opt(sbi
, FLUSH_MERGE
);
515 case Opt_noflush_merge
:
516 clear_opt(sbi
, FLUSH_MERGE
);
519 set_opt(sbi
, NOBARRIER
);
522 set_opt(sbi
, FASTBOOT
);
524 case Opt_extent_cache
:
525 set_opt(sbi
, EXTENT_CACHE
);
527 case Opt_noextent_cache
:
528 clear_opt(sbi
, EXTENT_CACHE
);
530 case Opt_noinline_data
:
531 clear_opt(sbi
, INLINE_DATA
);
534 set_opt(sbi
, DATA_FLUSH
);
537 name
= match_strdup(&args
[0]);
541 if (strlen(name
) == 8 &&
542 !strncmp(name
, "adaptive", 8)) {
543 if (f2fs_sb_mounted_blkzoned(sb
)) {
544 f2fs_msg(sb
, KERN_WARNING
,
545 "adaptive mode is not allowed with "
546 "zoned block device feature");
550 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
551 } else if (strlen(name
) == 3 &&
552 !strncmp(name
, "lfs", 3)) {
553 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
560 case Opt_io_size_bits
:
561 if (args
->from
&& match_int(args
, &arg
))
563 if (arg
> __ilog2_u32(BIO_MAX_PAGES
)) {
564 f2fs_msg(sb
, KERN_WARNING
,
565 "Not support %d, larger than %d",
566 1 << arg
, BIO_MAX_PAGES
);
569 sbi
->write_io_size_bits
= arg
;
571 case Opt_fault_injection
:
572 if (args
->from
&& match_int(args
, &arg
))
574 #ifdef CONFIG_F2FS_FAULT_INJECTION
575 f2fs_build_fault_attr(sbi
, arg
);
576 set_opt(sbi
, FAULT_INJECTION
);
578 f2fs_msg(sb
, KERN_INFO
,
579 "FAULT_INJECTION was not selected");
583 sb
->s_flags
|= MS_LAZYTIME
;
586 sb
->s_flags
&= ~MS_LAZYTIME
;
589 f2fs_msg(sb
, KERN_ERR
,
590 "Unrecognized mount option \"%s\" or missing value",
596 if (F2FS_IO_SIZE_BITS(sbi
) && !test_opt(sbi
, LFS
)) {
597 f2fs_msg(sb
, KERN_ERR
,
598 "Should set mode=lfs with %uKB-sized IO",
599 F2FS_IO_SIZE_KB(sbi
));
605 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
607 struct f2fs_inode_info
*fi
;
609 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
613 init_once((void *) fi
);
615 /* Initialize f2fs-specific inode info */
616 fi
->vfs_inode
.i_version
= 1;
617 atomic_set(&fi
->dirty_pages
, 0);
618 fi
->i_current_depth
= 1;
620 init_rwsem(&fi
->i_sem
);
621 INIT_LIST_HEAD(&fi
->dirty_list
);
622 INIT_LIST_HEAD(&fi
->gdirty_list
);
623 INIT_LIST_HEAD(&fi
->inmem_pages
);
624 mutex_init(&fi
->inmem_lock
);
625 init_rwsem(&fi
->dio_rwsem
[READ
]);
626 init_rwsem(&fi
->dio_rwsem
[WRITE
]);
628 /* Will be used by directory only */
629 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
630 return &fi
->vfs_inode
;
633 static int f2fs_drop_inode(struct inode
*inode
)
637 * This is to avoid a deadlock condition like below.
638 * writeback_single_inode(inode)
639 * - f2fs_write_data_page
640 * - f2fs_gc -> iput -> evict
641 * - inode_wait_for_writeback(inode)
643 if ((!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)) {
644 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
645 /* to avoid evict_inode call simultaneously */
646 atomic_inc(&inode
->i_count
);
647 spin_unlock(&inode
->i_lock
);
649 /* some remained atomic pages should discarded */
650 if (f2fs_is_atomic_file(inode
))
651 drop_inmem_pages(inode
);
653 /* should remain fi->extent_tree for writepage */
654 f2fs_destroy_extent_node(inode
);
656 sb_start_intwrite(inode
->i_sb
);
657 f2fs_i_size_write(inode
, 0);
659 if (F2FS_HAS_BLOCKS(inode
))
660 f2fs_truncate(inode
);
662 sb_end_intwrite(inode
->i_sb
);
664 fscrypt_put_encryption_info(inode
, NULL
);
665 spin_lock(&inode
->i_lock
);
666 atomic_dec(&inode
->i_count
);
668 trace_f2fs_drop_inode(inode
, 0);
671 ret
= generic_drop_inode(inode
);
672 trace_f2fs_drop_inode(inode
, ret
);
676 int f2fs_inode_dirtied(struct inode
*inode
, bool sync
)
678 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
681 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
682 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
685 set_inode_flag(inode
, FI_DIRTY_INODE
);
686 stat_inc_dirty_inode(sbi
, DIRTY_META
);
688 if (sync
&& list_empty(&F2FS_I(inode
)->gdirty_list
)) {
689 list_add_tail(&F2FS_I(inode
)->gdirty_list
,
690 &sbi
->inode_list
[DIRTY_META
]);
691 inc_page_count(sbi
, F2FS_DIRTY_IMETA
);
693 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
697 void f2fs_inode_synced(struct inode
*inode
)
699 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
701 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
702 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
703 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
706 if (!list_empty(&F2FS_I(inode
)->gdirty_list
)) {
707 list_del_init(&F2FS_I(inode
)->gdirty_list
);
708 dec_page_count(sbi
, F2FS_DIRTY_IMETA
);
710 clear_inode_flag(inode
, FI_DIRTY_INODE
);
711 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
712 stat_dec_dirty_inode(F2FS_I_SB(inode
), DIRTY_META
);
713 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
717 * f2fs_dirty_inode() is called from __mark_inode_dirty()
719 * We should call set_dirty_inode to write the dirty inode through write_inode.
721 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
723 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
725 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
726 inode
->i_ino
== F2FS_META_INO(sbi
))
729 if (flags
== I_DIRTY_TIME
)
732 if (is_inode_flag_set(inode
, FI_AUTO_RECOVER
))
733 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
735 f2fs_inode_dirtied(inode
, false);
738 static void f2fs_i_callback(struct rcu_head
*head
)
740 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
741 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
744 static void f2fs_destroy_inode(struct inode
*inode
)
746 call_rcu(&inode
->i_rcu
, f2fs_i_callback
);
749 static void destroy_percpu_info(struct f2fs_sb_info
*sbi
)
751 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
752 percpu_counter_destroy(&sbi
->total_valid_inode_count
);
755 static void destroy_device_list(struct f2fs_sb_info
*sbi
)
759 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
760 blkdev_put(FDEV(i
).bdev
, FMODE_EXCL
);
761 #ifdef CONFIG_BLK_DEV_ZONED
762 kfree(FDEV(i
).blkz_type
);
768 static void f2fs_put_super(struct super_block
*sb
)
770 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
773 remove_proc_entry("segment_info", sbi
->s_proc
);
774 remove_proc_entry("segment_bits", sbi
->s_proc
);
775 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
777 kobject_del(&sbi
->s_kobj
);
781 /* prevent remaining shrinker jobs */
782 mutex_lock(&sbi
->umount_mutex
);
785 * We don't need to do checkpoint when superblock is clean.
786 * But, the previous checkpoint was not done by umount, it needs to do
787 * clean checkpoint again.
789 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
790 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
791 struct cp_control cpc
= {
794 write_checkpoint(sbi
, &cpc
);
797 /* be sure to wait for any on-going discard commands */
798 f2fs_wait_discard_bios(sbi
);
800 if (!sbi
->discard_blks
) {
801 struct cp_control cpc
= {
802 .reason
= CP_UMOUNT
| CP_TRIMMED
,
804 write_checkpoint(sbi
, &cpc
);
807 /* write_checkpoint can update stat informaion */
808 f2fs_destroy_stats(sbi
);
811 * normally superblock is clean, so we need to release this.
812 * In addition, EIO will skip do checkpoint, we need this as well.
814 release_ino_entry(sbi
, true);
816 f2fs_leave_shrinker(sbi
);
817 mutex_unlock(&sbi
->umount_mutex
);
819 /* our cp_error case, we can wait for any writeback page */
820 f2fs_flush_merged_bios(sbi
);
822 iput(sbi
->node_inode
);
823 iput(sbi
->meta_inode
);
825 /* destroy f2fs internal modules */
826 destroy_node_manager(sbi
);
827 destroy_segment_manager(sbi
);
830 kobject_put(&sbi
->s_kobj
);
831 wait_for_completion(&sbi
->s_kobj_unregister
);
833 sb
->s_fs_info
= NULL
;
834 if (sbi
->s_chksum_driver
)
835 crypto_free_shash(sbi
->s_chksum_driver
);
836 kfree(sbi
->raw_super
);
838 destroy_device_list(sbi
);
839 mempool_destroy(sbi
->write_io_dummy
);
840 destroy_percpu_info(sbi
);
844 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
846 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
849 trace_f2fs_sync_fs(sb
, sync
);
852 struct cp_control cpc
;
854 cpc
.reason
= __get_cp_reason(sbi
);
856 mutex_lock(&sbi
->gc_mutex
);
857 err
= write_checkpoint(sbi
, &cpc
);
858 mutex_unlock(&sbi
->gc_mutex
);
860 f2fs_trace_ios(NULL
, 1);
865 static int f2fs_freeze(struct super_block
*sb
)
867 if (f2fs_readonly(sb
))
870 /* IO error happened before */
871 if (unlikely(f2fs_cp_error(F2FS_SB(sb
))))
874 /* must be clean, since sync_filesystem() was already called */
875 if (is_sbi_flag_set(F2FS_SB(sb
), SBI_IS_DIRTY
))
880 static int f2fs_unfreeze(struct super_block
*sb
)
885 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
887 struct super_block
*sb
= dentry
->d_sb
;
888 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
889 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
890 block_t total_count
, user_block_count
, start_count
, ovp_count
;
892 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
893 user_block_count
= sbi
->user_block_count
;
894 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
895 ovp_count
= SM_I(sbi
)->ovp_segments
<< sbi
->log_blocks_per_seg
;
896 buf
->f_type
= F2FS_SUPER_MAGIC
;
897 buf
->f_bsize
= sbi
->blocksize
;
899 buf
->f_blocks
= total_count
- start_count
;
900 buf
->f_bfree
= user_block_count
- valid_user_blocks(sbi
) + ovp_count
;
901 buf
->f_bavail
= user_block_count
- valid_user_blocks(sbi
);
903 buf
->f_files
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
904 buf
->f_ffree
= min(buf
->f_files
- valid_node_count(sbi
),
907 buf
->f_namelen
= F2FS_NAME_LEN
;
908 buf
->f_fsid
.val
[0] = (u32
)id
;
909 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
914 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
916 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
918 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, BG_GC
)) {
919 if (test_opt(sbi
, FORCE_FG_GC
))
920 seq_printf(seq
, ",background_gc=%s", "sync");
922 seq_printf(seq
, ",background_gc=%s", "on");
924 seq_printf(seq
, ",background_gc=%s", "off");
926 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
927 seq_puts(seq
, ",disable_roll_forward");
928 if (test_opt(sbi
, DISCARD
))
929 seq_puts(seq
, ",discard");
930 if (test_opt(sbi
, NOHEAP
))
931 seq_puts(seq
, ",no_heap");
933 seq_puts(seq
, ",heap");
934 #ifdef CONFIG_F2FS_FS_XATTR
935 if (test_opt(sbi
, XATTR_USER
))
936 seq_puts(seq
, ",user_xattr");
938 seq_puts(seq
, ",nouser_xattr");
939 if (test_opt(sbi
, INLINE_XATTR
))
940 seq_puts(seq
, ",inline_xattr");
942 seq_puts(seq
, ",noinline_xattr");
944 #ifdef CONFIG_F2FS_FS_POSIX_ACL
945 if (test_opt(sbi
, POSIX_ACL
))
946 seq_puts(seq
, ",acl");
948 seq_puts(seq
, ",noacl");
950 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
951 seq_puts(seq
, ",disable_ext_identify");
952 if (test_opt(sbi
, INLINE_DATA
))
953 seq_puts(seq
, ",inline_data");
955 seq_puts(seq
, ",noinline_data");
956 if (test_opt(sbi
, INLINE_DENTRY
))
957 seq_puts(seq
, ",inline_dentry");
959 seq_puts(seq
, ",noinline_dentry");
960 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
961 seq_puts(seq
, ",flush_merge");
962 if (test_opt(sbi
, NOBARRIER
))
963 seq_puts(seq
, ",nobarrier");
964 if (test_opt(sbi
, FASTBOOT
))
965 seq_puts(seq
, ",fastboot");
966 if (test_opt(sbi
, EXTENT_CACHE
))
967 seq_puts(seq
, ",extent_cache");
969 seq_puts(seq
, ",noextent_cache");
970 if (test_opt(sbi
, DATA_FLUSH
))
971 seq_puts(seq
, ",data_flush");
973 seq_puts(seq
, ",mode=");
974 if (test_opt(sbi
, ADAPTIVE
))
975 seq_puts(seq
, "adaptive");
976 else if (test_opt(sbi
, LFS
))
977 seq_puts(seq
, "lfs");
978 seq_printf(seq
, ",active_logs=%u", sbi
->active_logs
);
979 if (F2FS_IO_SIZE_BITS(sbi
))
980 seq_printf(seq
, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi
));
981 #ifdef CONFIG_F2FS_FAULT_INJECTION
982 if (test_opt(sbi
, FAULT_INJECTION
))
983 seq_puts(seq
, ",fault_injection");
989 static int segment_info_seq_show(struct seq_file
*seq
, void *offset
)
991 struct super_block
*sb
= seq
->private;
992 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
993 unsigned int total_segs
=
994 le32_to_cpu(sbi
->raw_super
->segment_count_main
);
997 seq_puts(seq
, "format: segment_type|valid_blocks\n"
998 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
1000 for (i
= 0; i
< total_segs
; i
++) {
1001 struct seg_entry
*se
= get_seg_entry(sbi
, i
);
1004 seq_printf(seq
, "%-10d", i
);
1005 seq_printf(seq
, "%d|%-3u", se
->type
,
1006 get_valid_blocks(sbi
, i
, false));
1007 if ((i
% 10) == 9 || i
== (total_segs
- 1))
1008 seq_putc(seq
, '\n');
1016 static int segment_bits_seq_show(struct seq_file
*seq
, void *offset
)
1018 struct super_block
*sb
= seq
->private;
1019 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1020 unsigned int total_segs
=
1021 le32_to_cpu(sbi
->raw_super
->segment_count_main
);
1024 seq_puts(seq
, "format: segment_type|valid_blocks|bitmaps\n"
1025 "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n");
1027 for (i
= 0; i
< total_segs
; i
++) {
1028 struct seg_entry
*se
= get_seg_entry(sbi
, i
);
1030 seq_printf(seq
, "%-10d", i
);
1031 seq_printf(seq
, "%d|%-3u|", se
->type
,
1032 get_valid_blocks(sbi
, i
, false));
1033 for (j
= 0; j
< SIT_VBLOCK_MAP_SIZE
; j
++)
1034 seq_printf(seq
, " %.2x", se
->cur_valid_map
[j
]);
1035 seq_putc(seq
, '\n');
1040 #define F2FS_PROC_FILE_DEF(_name) \
1041 static int _name##_open_fs(struct inode *inode, struct file *file) \
1043 return single_open(file, _name##_seq_show, PDE_DATA(inode)); \
1046 static const struct file_operations f2fs_seq_##_name##_fops = { \
1047 .open = _name##_open_fs, \
1049 .llseek = seq_lseek, \
1050 .release = single_release, \
1053 F2FS_PROC_FILE_DEF(segment_info
);
1054 F2FS_PROC_FILE_DEF(segment_bits
);
1056 static void default_options(struct f2fs_sb_info
*sbi
)
1058 /* init some FS parameters */
1059 sbi
->active_logs
= NR_CURSEG_TYPE
;
1061 set_opt(sbi
, BG_GC
);
1062 set_opt(sbi
, INLINE_XATTR
);
1063 set_opt(sbi
, INLINE_DATA
);
1064 set_opt(sbi
, INLINE_DENTRY
);
1065 set_opt(sbi
, EXTENT_CACHE
);
1066 set_opt(sbi
, NOHEAP
);
1067 sbi
->sb
->s_flags
|= MS_LAZYTIME
;
1068 set_opt(sbi
, FLUSH_MERGE
);
1069 if (f2fs_sb_mounted_blkzoned(sbi
->sb
)) {
1070 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
1071 set_opt(sbi
, DISCARD
);
1073 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
1076 #ifdef CONFIG_F2FS_FS_XATTR
1077 set_opt(sbi
, XATTR_USER
);
1079 #ifdef CONFIG_F2FS_FS_POSIX_ACL
1080 set_opt(sbi
, POSIX_ACL
);
1083 #ifdef CONFIG_F2FS_FAULT_INJECTION
1084 f2fs_build_fault_attr(sbi
, 0);
1088 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
1090 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1091 struct f2fs_mount_info org_mount_opt
;
1092 int err
, active_logs
;
1093 bool need_restart_gc
= false;
1094 bool need_stop_gc
= false;
1095 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
1096 #ifdef CONFIG_F2FS_FAULT_INJECTION
1097 struct f2fs_fault_info ffi
= sbi
->fault_info
;
1101 * Save the old mount options in case we
1102 * need to restore them.
1104 org_mount_opt
= sbi
->mount_opt
;
1105 active_logs
= sbi
->active_logs
;
1107 /* recover superblocks we couldn't write due to previous RO mount */
1108 if (!(*flags
& MS_RDONLY
) && is_sbi_flag_set(sbi
, SBI_NEED_SB_WRITE
)) {
1109 err
= f2fs_commit_super(sbi
, false);
1110 f2fs_msg(sb
, KERN_INFO
,
1111 "Try to recover all the superblocks, ret: %d", err
);
1113 clear_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1116 sbi
->mount_opt
.opt
= 0;
1117 default_options(sbi
);
1119 /* parse mount options */
1120 err
= parse_options(sb
, data
);
1125 * Previous and new state of filesystem is RO,
1126 * so skip checking GC and FLUSH_MERGE conditions.
1128 if (f2fs_readonly(sb
) && (*flags
& MS_RDONLY
))
1131 /* disallow enable/disable extent_cache dynamically */
1132 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
1134 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1135 "switch extent_cache option is not allowed");
1140 * We stop the GC thread if FS is mounted as RO
1141 * or if background_gc = off is passed in mount
1142 * option. Also sync the filesystem.
1144 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
1145 if (sbi
->gc_thread
) {
1146 stop_gc_thread(sbi
);
1147 need_restart_gc
= true;
1149 } else if (!sbi
->gc_thread
) {
1150 err
= start_gc_thread(sbi
);
1153 need_stop_gc
= true;
1156 if (*flags
& MS_RDONLY
) {
1157 writeback_inodes_sb(sb
, WB_REASON_SYNC
);
1160 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1161 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
1162 f2fs_sync_fs(sb
, 1);
1163 clear_sbi_flag(sbi
, SBI_IS_CLOSE
);
1167 * We stop issue flush thread if FS is mounted as RO
1168 * or if flush_merge is not passed in mount option.
1170 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
1171 clear_opt(sbi
, FLUSH_MERGE
);
1172 destroy_flush_cmd_control(sbi
, false);
1174 err
= create_flush_cmd_control(sbi
);
1179 /* Update the POSIXACL Flag */
1180 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
1181 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
1185 if (need_restart_gc
) {
1186 if (start_gc_thread(sbi
))
1187 f2fs_msg(sbi
->sb
, KERN_WARNING
,
1188 "background gc thread has stopped");
1189 } else if (need_stop_gc
) {
1190 stop_gc_thread(sbi
);
1193 sbi
->mount_opt
= org_mount_opt
;
1194 sbi
->active_logs
= active_logs
;
1195 #ifdef CONFIG_F2FS_FAULT_INJECTION
1196 sbi
->fault_info
= ffi
;
1201 static struct super_operations f2fs_sops
= {
1202 .alloc_inode
= f2fs_alloc_inode
,
1203 .drop_inode
= f2fs_drop_inode
,
1204 .destroy_inode
= f2fs_destroy_inode
,
1205 .write_inode
= f2fs_write_inode
,
1206 .dirty_inode
= f2fs_dirty_inode
,
1207 .show_options
= f2fs_show_options
,
1208 .evict_inode
= f2fs_evict_inode
,
1209 .put_super
= f2fs_put_super
,
1210 .sync_fs
= f2fs_sync_fs
,
1211 .freeze_fs
= f2fs_freeze
,
1212 .unfreeze_fs
= f2fs_unfreeze
,
1213 .statfs
= f2fs_statfs
,
1214 .remount_fs
= f2fs_remount
,
1217 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1218 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
1220 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1221 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1225 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
1228 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1229 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1230 ctx
, len
, fs_data
, XATTR_CREATE
);
1233 static unsigned f2fs_max_namelen(struct inode
*inode
)
1235 return S_ISLNK(inode
->i_mode
) ?
1236 inode
->i_sb
->s_blocksize
: F2FS_NAME_LEN
;
1239 static const struct fscrypt_operations f2fs_cryptops
= {
1240 .key_prefix
= "f2fs:",
1241 .get_context
= f2fs_get_context
,
1242 .set_context
= f2fs_set_context
,
1243 .is_encrypted
= f2fs_encrypted_inode
,
1244 .empty_dir
= f2fs_empty_dir
,
1245 .max_namelen
= f2fs_max_namelen
,
1248 static const struct fscrypt_operations f2fs_cryptops
= {
1249 .is_encrypted
= f2fs_encrypted_inode
,
1253 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
1254 u64 ino
, u32 generation
)
1256 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1257 struct inode
*inode
;
1259 if (check_nid_range(sbi
, ino
))
1260 return ERR_PTR(-ESTALE
);
1263 * f2fs_iget isn't quite right if the inode is currently unallocated!
1264 * However f2fs_iget currently does appropriate checks to handle stale
1265 * inodes so everything is OK.
1267 inode
= f2fs_iget(sb
, ino
);
1269 return ERR_CAST(inode
);
1270 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
1271 /* we didn't find the right inode.. */
1273 return ERR_PTR(-ESTALE
);
1278 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
1279 int fh_len
, int fh_type
)
1281 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
1282 f2fs_nfs_get_inode
);
1285 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
1286 int fh_len
, int fh_type
)
1288 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
1289 f2fs_nfs_get_inode
);
1292 static const struct export_operations f2fs_export_ops
= {
1293 .fh_to_dentry
= f2fs_fh_to_dentry
,
1294 .fh_to_parent
= f2fs_fh_to_parent
,
1295 .get_parent
= f2fs_get_parent
,
1298 static loff_t
max_file_blocks(void)
1300 loff_t result
= (DEF_ADDRS_PER_INODE
- F2FS_INLINE_XATTR_ADDRS
);
1301 loff_t leaf_count
= ADDRS_PER_BLOCK
;
1303 /* two direct node blocks */
1304 result
+= (leaf_count
* 2);
1306 /* two indirect node blocks */
1307 leaf_count
*= NIDS_PER_BLOCK
;
1308 result
+= (leaf_count
* 2);
1310 /* one double indirect node block */
1311 leaf_count
*= NIDS_PER_BLOCK
;
1312 result
+= leaf_count
;
1317 static int __f2fs_commit_super(struct buffer_head
*bh
,
1318 struct f2fs_super_block
*super
)
1322 memcpy(bh
->b_data
+ F2FS_SUPER_OFFSET
, super
, sizeof(*super
));
1323 set_buffer_uptodate(bh
);
1324 set_buffer_dirty(bh
);
1327 /* it's rare case, we can do fua all the time */
1328 return __sync_dirty_buffer(bh
, REQ_SYNC
| REQ_PREFLUSH
| REQ_FUA
);
1331 static inline bool sanity_check_area_boundary(struct f2fs_sb_info
*sbi
,
1332 struct buffer_head
*bh
)
1334 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
1335 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
1336 struct super_block
*sb
= sbi
->sb
;
1337 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
1338 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
1339 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
1340 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
1341 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
1342 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
1343 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1344 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
1345 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
1346 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
1347 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
1348 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
1349 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1350 u64 main_end_blkaddr
= main_blkaddr
+
1351 (segment_count_main
<< log_blocks_per_seg
);
1352 u64 seg_end_blkaddr
= segment0_blkaddr
+
1353 (segment_count
<< log_blocks_per_seg
);
1355 if (segment0_blkaddr
!= cp_blkaddr
) {
1356 f2fs_msg(sb
, KERN_INFO
,
1357 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1358 segment0_blkaddr
, cp_blkaddr
);
1362 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
1364 f2fs_msg(sb
, KERN_INFO
,
1365 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1366 cp_blkaddr
, sit_blkaddr
,
1367 segment_count_ckpt
<< log_blocks_per_seg
);
1371 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
1373 f2fs_msg(sb
, KERN_INFO
,
1374 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1375 sit_blkaddr
, nat_blkaddr
,
1376 segment_count_sit
<< log_blocks_per_seg
);
1380 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
1382 f2fs_msg(sb
, KERN_INFO
,
1383 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1384 nat_blkaddr
, ssa_blkaddr
,
1385 segment_count_nat
<< log_blocks_per_seg
);
1389 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
1391 f2fs_msg(sb
, KERN_INFO
,
1392 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1393 ssa_blkaddr
, main_blkaddr
,
1394 segment_count_ssa
<< log_blocks_per_seg
);
1398 if (main_end_blkaddr
> seg_end_blkaddr
) {
1399 f2fs_msg(sb
, KERN_INFO
,
1400 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
1403 (segment_count
<< log_blocks_per_seg
),
1404 segment_count_main
<< log_blocks_per_seg
);
1406 } else if (main_end_blkaddr
< seg_end_blkaddr
) {
1410 /* fix in-memory information all the time */
1411 raw_super
->segment_count
= cpu_to_le32((main_end_blkaddr
-
1412 segment0_blkaddr
) >> log_blocks_per_seg
);
1414 if (f2fs_readonly(sb
) || bdev_read_only(sb
->s_bdev
)) {
1415 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1418 err
= __f2fs_commit_super(bh
, NULL
);
1419 res
= err
? "failed" : "done";
1421 f2fs_msg(sb
, KERN_INFO
,
1422 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1425 (segment_count
<< log_blocks_per_seg
),
1426 segment_count_main
<< log_blocks_per_seg
);
1433 static int sanity_check_raw_super(struct f2fs_sb_info
*sbi
,
1434 struct buffer_head
*bh
)
1436 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
1437 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
1438 struct super_block
*sb
= sbi
->sb
;
1439 unsigned int blocksize
;
1441 if (F2FS_SUPER_MAGIC
!= le32_to_cpu(raw_super
->magic
)) {
1442 f2fs_msg(sb
, KERN_INFO
,
1443 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1444 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
1448 /* Currently, support only 4KB page cache size */
1449 if (F2FS_BLKSIZE
!= PAGE_SIZE
) {
1450 f2fs_msg(sb
, KERN_INFO
,
1451 "Invalid page_cache_size (%lu), supports only 4KB\n",
1456 /* Currently, support only 4KB block size */
1457 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
1458 if (blocksize
!= F2FS_BLKSIZE
) {
1459 f2fs_msg(sb
, KERN_INFO
,
1460 "Invalid blocksize (%u), supports only 4KB\n",
1465 /* check log blocks per segment */
1466 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
1467 f2fs_msg(sb
, KERN_INFO
,
1468 "Invalid log blocks per segment (%u)\n",
1469 le32_to_cpu(raw_super
->log_blocks_per_seg
));
1473 /* Currently, support 512/1024/2048/4096 bytes sector size */
1474 if (le32_to_cpu(raw_super
->log_sectorsize
) >
1475 F2FS_MAX_LOG_SECTOR_SIZE
||
1476 le32_to_cpu(raw_super
->log_sectorsize
) <
1477 F2FS_MIN_LOG_SECTOR_SIZE
) {
1478 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectorsize (%u)",
1479 le32_to_cpu(raw_super
->log_sectorsize
));
1482 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
1483 le32_to_cpu(raw_super
->log_sectorsize
) !=
1484 F2FS_MAX_LOG_SECTOR_SIZE
) {
1485 f2fs_msg(sb
, KERN_INFO
,
1486 "Invalid log sectors per block(%u) log sectorsize(%u)",
1487 le32_to_cpu(raw_super
->log_sectors_per_block
),
1488 le32_to_cpu(raw_super
->log_sectorsize
));
1492 /* check reserved ino info */
1493 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
1494 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
1495 le32_to_cpu(raw_super
->root_ino
) != 3) {
1496 f2fs_msg(sb
, KERN_INFO
,
1497 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1498 le32_to_cpu(raw_super
->node_ino
),
1499 le32_to_cpu(raw_super
->meta_ino
),
1500 le32_to_cpu(raw_super
->root_ino
));
1504 if (le32_to_cpu(raw_super
->segment_count
) > F2FS_MAX_SEGMENT
) {
1505 f2fs_msg(sb
, KERN_INFO
,
1506 "Invalid segment count (%u)",
1507 le32_to_cpu(raw_super
->segment_count
));
1511 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1512 if (sanity_check_area_boundary(sbi
, bh
))
1518 int sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
1520 unsigned int total
, fsmeta
;
1521 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1522 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1523 unsigned int ovp_segments
, reserved_segments
;
1525 total
= le32_to_cpu(raw_super
->segment_count
);
1526 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1527 fsmeta
+= le32_to_cpu(raw_super
->segment_count_sit
);
1528 fsmeta
+= le32_to_cpu(raw_super
->segment_count_nat
);
1529 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
1530 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
1532 if (unlikely(fsmeta
>= total
))
1535 ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
1536 reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
1538 if (unlikely(fsmeta
< F2FS_MIN_SEGMENTS
||
1539 ovp_segments
== 0 || reserved_segments
== 0)) {
1540 f2fs_msg(sbi
->sb
, KERN_ERR
,
1541 "Wrong layout: check mkfs.f2fs version");
1545 if (unlikely(f2fs_cp_error(sbi
))) {
1546 f2fs_msg(sbi
->sb
, KERN_ERR
, "A bug case: need to run fsck");
1552 static void init_sb_info(struct f2fs_sb_info
*sbi
)
1554 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
1557 sbi
->log_sectors_per_block
=
1558 le32_to_cpu(raw_super
->log_sectors_per_block
);
1559 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
1560 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
1561 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1562 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
1563 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
1564 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
1565 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
1566 sbi
->total_node_count
=
1567 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
1568 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
1569 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
1570 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
1571 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
1572 sbi
->cur_victim_sec
= NULL_SECNO
;
1573 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
1575 sbi
->dir_level
= DEF_DIR_LEVEL
;
1576 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
1577 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
1578 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
1580 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
1581 atomic_set(&sbi
->nr_pages
[i
], 0);
1583 atomic_set(&sbi
->wb_sync_req
, 0);
1585 INIT_LIST_HEAD(&sbi
->s_list
);
1586 mutex_init(&sbi
->umount_mutex
);
1587 mutex_init(&sbi
->wio_mutex
[NODE
]);
1588 mutex_init(&sbi
->wio_mutex
[DATA
]);
1589 spin_lock_init(&sbi
->cp_lock
);
1592 static int init_percpu_info(struct f2fs_sb_info
*sbi
)
1596 err
= percpu_counter_init(&sbi
->alloc_valid_block_count
, 0, GFP_KERNEL
);
1600 return percpu_counter_init(&sbi
->total_valid_inode_count
, 0,
1604 #ifdef CONFIG_BLK_DEV_ZONED
1605 static int init_blkz_info(struct f2fs_sb_info
*sbi
, int devi
)
1607 struct block_device
*bdev
= FDEV(devi
).bdev
;
1608 sector_t nr_sectors
= bdev
->bd_part
->nr_sects
;
1609 sector_t sector
= 0;
1610 struct blk_zone
*zones
;
1611 unsigned int i
, nr_zones
;
1615 if (!f2fs_sb_mounted_blkzoned(sbi
->sb
))
1618 if (sbi
->blocks_per_blkz
&& sbi
->blocks_per_blkz
!=
1619 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
)))
1621 sbi
->blocks_per_blkz
= SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
));
1622 if (sbi
->log_blocks_per_blkz
&& sbi
->log_blocks_per_blkz
!=
1623 __ilog2_u32(sbi
->blocks_per_blkz
))
1625 sbi
->log_blocks_per_blkz
= __ilog2_u32(sbi
->blocks_per_blkz
);
1626 FDEV(devi
).nr_blkz
= SECTOR_TO_BLOCK(nr_sectors
) >>
1627 sbi
->log_blocks_per_blkz
;
1628 if (nr_sectors
& (bdev_zone_sectors(bdev
) - 1))
1629 FDEV(devi
).nr_blkz
++;
1631 FDEV(devi
).blkz_type
= kmalloc(FDEV(devi
).nr_blkz
, GFP_KERNEL
);
1632 if (!FDEV(devi
).blkz_type
)
1635 #define F2FS_REPORT_NR_ZONES 4096
1637 zones
= kcalloc(F2FS_REPORT_NR_ZONES
, sizeof(struct blk_zone
),
1642 /* Get block zones type */
1643 while (zones
&& sector
< nr_sectors
) {
1645 nr_zones
= F2FS_REPORT_NR_ZONES
;
1646 err
= blkdev_report_zones(bdev
, sector
,
1656 for (i
= 0; i
< nr_zones
; i
++) {
1657 FDEV(devi
).blkz_type
[n
] = zones
[i
].type
;
1658 sector
+= zones
[i
].len
;
1670 * Read f2fs raw super block.
1671 * Because we have two copies of super block, so read both of them
1672 * to get the first valid one. If any one of them is broken, we pass
1673 * them recovery flag back to the caller.
1675 static int read_raw_super_block(struct f2fs_sb_info
*sbi
,
1676 struct f2fs_super_block
**raw_super
,
1677 int *valid_super_block
, int *recovery
)
1679 struct super_block
*sb
= sbi
->sb
;
1681 struct buffer_head
*bh
;
1682 struct f2fs_super_block
*super
;
1685 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
1689 for (block
= 0; block
< 2; block
++) {
1690 bh
= sb_bread(sb
, block
);
1692 f2fs_msg(sb
, KERN_ERR
, "Unable to read %dth superblock",
1698 /* sanity checking of raw super */
1699 if (sanity_check_raw_super(sbi
, bh
)) {
1700 f2fs_msg(sb
, KERN_ERR
,
1701 "Can't find valid F2FS filesystem in %dth superblock",
1709 memcpy(super
, bh
->b_data
+ F2FS_SUPER_OFFSET
,
1711 *valid_super_block
= block
;
1717 /* Fail to read any one of the superblocks*/
1721 /* No valid superblock */
1730 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
1732 struct buffer_head
*bh
;
1735 if ((recover
&& f2fs_readonly(sbi
->sb
)) ||
1736 bdev_read_only(sbi
->sb
->s_bdev
)) {
1737 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1741 /* write back-up superblock first */
1742 bh
= sb_getblk(sbi
->sb
, sbi
->valid_super_block
? 0: 1);
1745 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
1748 /* if we are in recovery path, skip writing valid superblock */
1752 /* write current valid superblock */
1753 bh
= sb_getblk(sbi
->sb
, sbi
->valid_super_block
);
1756 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
1761 static int f2fs_scan_devices(struct f2fs_sb_info
*sbi
)
1763 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1764 unsigned int max_devices
= MAX_DEVICES
;
1767 /* Initialize single device information */
1768 if (!RDEV(0).path
[0]) {
1769 if (!bdev_is_zoned(sbi
->sb
->s_bdev
))
1775 * Initialize multiple devices information, or single
1776 * zoned block device information.
1778 sbi
->devs
= kcalloc(max_devices
, sizeof(struct f2fs_dev_info
),
1783 for (i
= 0; i
< max_devices
; i
++) {
1785 if (i
> 0 && !RDEV(i
).path
[0])
1788 if (max_devices
== 1) {
1789 /* Single zoned block device mount */
1791 blkdev_get_by_dev(sbi
->sb
->s_bdev
->bd_dev
,
1792 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
1794 /* Multi-device mount */
1795 memcpy(FDEV(i
).path
, RDEV(i
).path
, MAX_PATH_LEN
);
1796 FDEV(i
).total_segments
=
1797 le32_to_cpu(RDEV(i
).total_segments
);
1799 FDEV(i
).start_blk
= 0;
1800 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
1801 (FDEV(i
).total_segments
<<
1802 sbi
->log_blocks_per_seg
) - 1 +
1803 le32_to_cpu(raw_super
->segment0_blkaddr
);
1805 FDEV(i
).start_blk
= FDEV(i
- 1).end_blk
+ 1;
1806 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
1807 (FDEV(i
).total_segments
<<
1808 sbi
->log_blocks_per_seg
) - 1;
1810 FDEV(i
).bdev
= blkdev_get_by_path(FDEV(i
).path
,
1811 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
1813 if (IS_ERR(FDEV(i
).bdev
))
1814 return PTR_ERR(FDEV(i
).bdev
);
1816 /* to release errored devices */
1817 sbi
->s_ndevs
= i
+ 1;
1819 #ifdef CONFIG_BLK_DEV_ZONED
1820 if (bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HM
&&
1821 !f2fs_sb_mounted_blkzoned(sbi
->sb
)) {
1822 f2fs_msg(sbi
->sb
, KERN_ERR
,
1823 "Zoned block device feature not enabled\n");
1826 if (bdev_zoned_model(FDEV(i
).bdev
) != BLK_ZONED_NONE
) {
1827 if (init_blkz_info(sbi
, i
)) {
1828 f2fs_msg(sbi
->sb
, KERN_ERR
,
1829 "Failed to initialize F2FS blkzone information");
1832 if (max_devices
== 1)
1834 f2fs_msg(sbi
->sb
, KERN_INFO
,
1835 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
1837 FDEV(i
).total_segments
,
1838 FDEV(i
).start_blk
, FDEV(i
).end_blk
,
1839 bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HA
?
1840 "Host-aware" : "Host-managed");
1844 f2fs_msg(sbi
->sb
, KERN_INFO
,
1845 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
1847 FDEV(i
).total_segments
,
1848 FDEV(i
).start_blk
, FDEV(i
).end_blk
);
1850 f2fs_msg(sbi
->sb
, KERN_INFO
,
1851 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi
));
1855 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
1857 struct f2fs_sb_info
*sbi
;
1858 struct f2fs_super_block
*raw_super
;
1861 bool retry
= true, need_fsck
= false;
1862 char *options
= NULL
;
1863 int recovery
, i
, valid_super_block
;
1864 struct curseg_info
*seg_i
;
1869 valid_super_block
= -1;
1872 /* allocate memory for f2fs-specific super block info */
1873 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
1879 /* Load the checksum driver */
1880 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32", 0, 0);
1881 if (IS_ERR(sbi
->s_chksum_driver
)) {
1882 f2fs_msg(sb
, KERN_ERR
, "Cannot load crc32 driver.");
1883 err
= PTR_ERR(sbi
->s_chksum_driver
);
1884 sbi
->s_chksum_driver
= NULL
;
1888 /* set a block size */
1889 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
1890 f2fs_msg(sb
, KERN_ERR
, "unable to set blocksize");
1894 err
= read_raw_super_block(sbi
, &raw_super
, &valid_super_block
,
1899 sb
->s_fs_info
= sbi
;
1900 sbi
->raw_super
= raw_super
;
1903 * The BLKZONED feature indicates that the drive was formatted with
1904 * zone alignment optimization. This is optional for host-aware
1905 * devices, but mandatory for host-managed zoned block devices.
1907 #ifndef CONFIG_BLK_DEV_ZONED
1908 if (f2fs_sb_mounted_blkzoned(sb
)) {
1909 f2fs_msg(sb
, KERN_ERR
,
1910 "Zoned block device support is not enabled\n");
1914 default_options(sbi
);
1915 /* parse mount options */
1916 options
= kstrdup((const char *)data
, GFP_KERNEL
);
1917 if (data
&& !options
) {
1922 err
= parse_options(sb
, options
);
1926 sbi
->max_file_blocks
= max_file_blocks();
1927 sb
->s_maxbytes
= sbi
->max_file_blocks
<<
1928 le32_to_cpu(raw_super
->log_blocksize
);
1929 sb
->s_max_links
= F2FS_LINK_MAX
;
1930 get_random_bytes(&sbi
->s_next_generation
, sizeof(u32
));
1932 sb
->s_op
= &f2fs_sops
;
1933 sb
->s_cop
= &f2fs_cryptops
;
1934 sb
->s_xattr
= f2fs_xattr_handlers
;
1935 sb
->s_export_op
= &f2fs_export_ops
;
1936 sb
->s_magic
= F2FS_SUPER_MAGIC
;
1937 sb
->s_time_gran
= 1;
1938 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
1939 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
1940 memcpy(sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
1942 /* init f2fs-specific super block info */
1943 sbi
->valid_super_block
= valid_super_block
;
1944 mutex_init(&sbi
->gc_mutex
);
1945 mutex_init(&sbi
->cp_mutex
);
1946 init_rwsem(&sbi
->node_write
);
1947 init_rwsem(&sbi
->node_change
);
1949 /* disallow all the data/node/meta page writes */
1950 set_sbi_flag(sbi
, SBI_POR_DOING
);
1951 spin_lock_init(&sbi
->stat_lock
);
1953 init_rwsem(&sbi
->read_io
.io_rwsem
);
1954 sbi
->read_io
.sbi
= sbi
;
1955 sbi
->read_io
.bio
= NULL
;
1956 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
1957 init_rwsem(&sbi
->write_io
[i
].io_rwsem
);
1958 sbi
->write_io
[i
].sbi
= sbi
;
1959 sbi
->write_io
[i
].bio
= NULL
;
1962 init_rwsem(&sbi
->cp_rwsem
);
1963 init_waitqueue_head(&sbi
->cp_wait
);
1966 err
= init_percpu_info(sbi
);
1970 if (F2FS_IO_SIZE(sbi
) > 1) {
1971 sbi
->write_io_dummy
=
1972 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi
) - 1), 0);
1973 if (!sbi
->write_io_dummy
)
1977 /* get an inode for meta space */
1978 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
1979 if (IS_ERR(sbi
->meta_inode
)) {
1980 f2fs_msg(sb
, KERN_ERR
, "Failed to read F2FS meta data inode");
1981 err
= PTR_ERR(sbi
->meta_inode
);
1985 err
= get_valid_checkpoint(sbi
);
1987 f2fs_msg(sb
, KERN_ERR
, "Failed to get valid F2FS checkpoint");
1988 goto free_meta_inode
;
1991 /* Initialize device list */
1992 err
= f2fs_scan_devices(sbi
);
1994 f2fs_msg(sb
, KERN_ERR
, "Failed to find devices");
1998 sbi
->total_valid_node_count
=
1999 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
2000 percpu_counter_set(&sbi
->total_valid_inode_count
,
2001 le32_to_cpu(sbi
->ckpt
->valid_inode_count
));
2002 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
2003 sbi
->total_valid_block_count
=
2004 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
2005 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
2007 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
2008 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
2009 spin_lock_init(&sbi
->inode_lock
[i
]);
2012 init_extent_cache_info(sbi
);
2014 init_ino_entry_info(sbi
);
2016 /* setup f2fs internal modules */
2017 err
= build_segment_manager(sbi
);
2019 f2fs_msg(sb
, KERN_ERR
,
2020 "Failed to initialize F2FS segment manager");
2023 err
= build_node_manager(sbi
);
2025 f2fs_msg(sb
, KERN_ERR
,
2026 "Failed to initialize F2FS node manager");
2030 /* For write statistics */
2031 if (sb
->s_bdev
->bd_part
)
2032 sbi
->sectors_written_start
=
2033 (u64
)part_stat_read(sb
->s_bdev
->bd_part
, sectors
[1]);
2035 /* Read accumulated write IO statistics if exists */
2036 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
2037 if (__exist_node_summaries(sbi
))
2038 sbi
->kbytes_written
=
2039 le64_to_cpu(seg_i
->journal
->info
.kbytes_written
);
2041 build_gc_manager(sbi
);
2043 /* get an inode for node space */
2044 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
2045 if (IS_ERR(sbi
->node_inode
)) {
2046 f2fs_msg(sb
, KERN_ERR
, "Failed to read node inode");
2047 err
= PTR_ERR(sbi
->node_inode
);
2051 f2fs_join_shrinker(sbi
);
2053 err
= f2fs_build_stats(sbi
);
2057 /* if there are nt orphan nodes free them */
2058 err
= recover_orphan_inodes(sbi
);
2060 goto free_node_inode
;
2062 /* read root inode and dentry */
2063 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
2065 f2fs_msg(sb
, KERN_ERR
, "Failed to read root inode");
2066 err
= PTR_ERR(root
);
2067 goto free_node_inode
;
2069 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
|| !root
->i_size
) {
2072 goto free_node_inode
;
2075 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
2078 goto free_root_inode
;
2082 sbi
->s_proc
= proc_mkdir(sb
->s_id
, f2fs_proc_root
);
2085 proc_create_data("segment_info", S_IRUGO
, sbi
->s_proc
,
2086 &f2fs_seq_segment_info_fops
, sb
);
2087 proc_create_data("segment_bits", S_IRUGO
, sbi
->s_proc
,
2088 &f2fs_seq_segment_bits_fops
, sb
);
2091 sbi
->s_kobj
.kset
= f2fs_kset
;
2092 init_completion(&sbi
->s_kobj_unregister
);
2093 err
= kobject_init_and_add(&sbi
->s_kobj
, &f2fs_ktype
, NULL
,
2098 /* recover fsynced data */
2099 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
)) {
2101 * mount should be failed, when device has readonly mode, and
2102 * previous checkpoint was not done by clean system shutdown.
2104 if (bdev_read_only(sb
->s_bdev
) &&
2105 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
2111 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
2116 err
= recover_fsync_data(sbi
, false);
2119 f2fs_msg(sb
, KERN_ERR
,
2120 "Cannot recover all fsync data errno=%d", err
);
2124 err
= recover_fsync_data(sbi
, true);
2126 if (!f2fs_readonly(sb
) && err
> 0) {
2128 f2fs_msg(sb
, KERN_ERR
,
2129 "Need to recover fsync data");
2134 /* recover_fsync_data() cleared this already */
2135 clear_sbi_flag(sbi
, SBI_POR_DOING
);
2138 * If filesystem is not mounted as read-only then
2139 * do start the gc_thread.
2141 if (test_opt(sbi
, BG_GC
) && !f2fs_readonly(sb
)) {
2142 /* After POR, we can run background GC thread.*/
2143 err
= start_gc_thread(sbi
);
2149 /* recover broken superblock */
2151 err
= f2fs_commit_super(sbi
, true);
2152 f2fs_msg(sb
, KERN_INFO
,
2153 "Try to recover %dth superblock, ret: %d",
2154 sbi
->valid_super_block
? 1 : 2, err
);
2157 f2fs_msg(sbi
->sb
, KERN_NOTICE
, "Mounted with checkpoint version = %llx",
2158 cur_cp_version(F2FS_CKPT(sbi
)));
2159 f2fs_update_time(sbi
, CP_TIME
);
2160 f2fs_update_time(sbi
, REQ_TIME
);
2164 f2fs_sync_inode_meta(sbi
);
2165 kobject_del(&sbi
->s_kobj
);
2166 kobject_put(&sbi
->s_kobj
);
2167 wait_for_completion(&sbi
->s_kobj_unregister
);
2170 remove_proc_entry("segment_info", sbi
->s_proc
);
2171 remove_proc_entry("segment_bits", sbi
->s_proc
);
2172 remove_proc_entry(sb
->s_id
, f2fs_proc_root
);
2178 truncate_inode_pages_final(NODE_MAPPING(sbi
));
2179 mutex_lock(&sbi
->umount_mutex
);
2180 release_ino_entry(sbi
, true);
2181 f2fs_leave_shrinker(sbi
);
2183 * Some dirty meta pages can be produced by recover_orphan_inodes()
2184 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
2185 * followed by write_checkpoint() through f2fs_write_node_pages(), which
2186 * falls into an infinite loop in sync_meta_pages().
2188 truncate_inode_pages_final(META_MAPPING(sbi
));
2189 iput(sbi
->node_inode
);
2190 mutex_unlock(&sbi
->umount_mutex
);
2191 f2fs_destroy_stats(sbi
);
2193 destroy_node_manager(sbi
);
2195 destroy_segment_manager(sbi
);
2197 destroy_device_list(sbi
);
2200 make_bad_inode(sbi
->meta_inode
);
2201 iput(sbi
->meta_inode
);
2203 mempool_destroy(sbi
->write_io_dummy
);
2205 destroy_percpu_info(sbi
);
2210 if (sbi
->s_chksum_driver
)
2211 crypto_free_shash(sbi
->s_chksum_driver
);
2214 /* give only one another chance */
2217 shrink_dcache_sb(sb
);
2223 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
2224 const char *dev_name
, void *data
)
2226 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
2229 static void kill_f2fs_super(struct super_block
*sb
)
2232 set_sbi_flag(F2FS_SB(sb
), SBI_IS_CLOSE
);
2233 kill_block_super(sb
);
2236 static struct file_system_type f2fs_fs_type
= {
2237 .owner
= THIS_MODULE
,
2239 .mount
= f2fs_mount
,
2240 .kill_sb
= kill_f2fs_super
,
2241 .fs_flags
= FS_REQUIRES_DEV
,
2243 MODULE_ALIAS_FS("f2fs");
2245 static int __init
init_inodecache(void)
2247 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
2248 sizeof(struct f2fs_inode_info
), 0,
2249 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
2250 if (!f2fs_inode_cachep
)
2255 static void destroy_inodecache(void)
2258 * Make sure all delayed rcu free inodes are flushed before we
2262 kmem_cache_destroy(f2fs_inode_cachep
);
2265 static int __init
init_f2fs_fs(void)
2269 f2fs_build_trace_ios();
2271 err
= init_inodecache();
2274 err
= create_node_manager_caches();
2276 goto free_inodecache
;
2277 err
= create_segment_manager_caches();
2279 goto free_node_manager_caches
;
2280 err
= create_checkpoint_caches();
2282 goto free_segment_manager_caches
;
2283 err
= create_extent_cache();
2285 goto free_checkpoint_caches
;
2286 f2fs_kset
= kset_create_and_add("f2fs", NULL
, fs_kobj
);
2289 goto free_extent_cache
;
2291 err
= register_shrinker(&f2fs_shrinker_info
);
2295 err
= register_filesystem(&f2fs_fs_type
);
2298 err
= f2fs_create_root_stats();
2300 goto free_filesystem
;
2301 f2fs_proc_root
= proc_mkdir("fs/f2fs", NULL
);
2305 unregister_filesystem(&f2fs_fs_type
);
2307 unregister_shrinker(&f2fs_shrinker_info
);
2309 kset_unregister(f2fs_kset
);
2311 destroy_extent_cache();
2312 free_checkpoint_caches
:
2313 destroy_checkpoint_caches();
2314 free_segment_manager_caches
:
2315 destroy_segment_manager_caches();
2316 free_node_manager_caches
:
2317 destroy_node_manager_caches();
2319 destroy_inodecache();
2324 static void __exit
exit_f2fs_fs(void)
2326 remove_proc_entry("fs/f2fs", NULL
);
2327 f2fs_destroy_root_stats();
2328 unregister_filesystem(&f2fs_fs_type
);
2329 unregister_shrinker(&f2fs_shrinker_info
);
2330 kset_unregister(f2fs_kset
);
2331 destroy_extent_cache();
2332 destroy_checkpoint_caches();
2333 destroy_segment_manager_caches();
2334 destroy_node_manager_caches();
2335 destroy_inodecache();
2336 f2fs_destroy_trace_ios();
2339 module_init(init_f2fs_fs
)
2340 module_exit(exit_f2fs_fs
)
2342 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
2343 MODULE_DESCRIPTION("Flash Friendly File System");
2344 MODULE_LICENSE("GPL");