4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/init.h>
14 #include <linux/statfs.h>
15 #include <linux/buffer_head.h>
16 #include <linux/backing-dev.h>
17 #include <linux/kthread.h>
18 #include <linux/parser.h>
19 #include <linux/mount.h>
20 #include <linux/seq_file.h>
21 #include <linux/proc_fs.h>
22 #include <linux/random.h>
23 #include <linux/exportfs.h>
24 #include <linux/blkdev.h>
25 #include <linux/quotaops.h>
26 #include <linux/f2fs_fs.h>
27 #include <linux/sysfs.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/f2fs.h>
39 static struct kmem_cache
*f2fs_inode_cachep
;
41 #ifdef CONFIG_F2FS_FAULT_INJECTION
43 char *fault_name
[FAULT_MAX
] = {
44 [FAULT_KMALLOC
] = "kmalloc",
45 [FAULT_PAGE_ALLOC
] = "page alloc",
46 [FAULT_ALLOC_NID
] = "alloc nid",
47 [FAULT_ORPHAN
] = "orphan",
48 [FAULT_BLOCK
] = "no more block",
49 [FAULT_DIR_DEPTH
] = "too big dir depth",
50 [FAULT_EVICT_INODE
] = "evict_inode fail",
51 [FAULT_TRUNCATE
] = "truncate fail",
52 [FAULT_IO
] = "IO error",
53 [FAULT_CHECKPOINT
] = "checkpoint error",
56 static void f2fs_build_fault_attr(struct f2fs_sb_info
*sbi
,
59 struct f2fs_fault_info
*ffi
= &sbi
->fault_info
;
62 atomic_set(&ffi
->inject_ops
, 0);
63 ffi
->inject_rate
= rate
;
64 ffi
->inject_type
= (1 << FAULT_MAX
) - 1;
66 memset(ffi
, 0, sizeof(struct f2fs_fault_info
));
71 /* f2fs-wide shrinker description */
72 static struct shrinker f2fs_shrinker_info
= {
73 .scan_objects
= f2fs_shrink_scan
,
74 .count_objects
= f2fs_shrink_count
,
75 .seeks
= DEFAULT_SEEKS
,
80 Opt_disable_roll_forward
,
91 Opt_disable_ext_identify
,
116 static match_table_t f2fs_tokens
= {
117 {Opt_gc_background
, "background_gc=%s"},
118 {Opt_disable_roll_forward
, "disable_roll_forward"},
119 {Opt_norecovery
, "norecovery"},
120 {Opt_discard
, "discard"},
121 {Opt_nodiscard
, "nodiscard"},
122 {Opt_noheap
, "no_heap"},
124 {Opt_user_xattr
, "user_xattr"},
125 {Opt_nouser_xattr
, "nouser_xattr"},
127 {Opt_noacl
, "noacl"},
128 {Opt_active_logs
, "active_logs=%u"},
129 {Opt_disable_ext_identify
, "disable_ext_identify"},
130 {Opt_inline_xattr
, "inline_xattr"},
131 {Opt_noinline_xattr
, "noinline_xattr"},
132 {Opt_inline_data
, "inline_data"},
133 {Opt_inline_dentry
, "inline_dentry"},
134 {Opt_noinline_dentry
, "noinline_dentry"},
135 {Opt_flush_merge
, "flush_merge"},
136 {Opt_noflush_merge
, "noflush_merge"},
137 {Opt_nobarrier
, "nobarrier"},
138 {Opt_fastboot
, "fastboot"},
139 {Opt_extent_cache
, "extent_cache"},
140 {Opt_noextent_cache
, "noextent_cache"},
141 {Opt_noinline_data
, "noinline_data"},
142 {Opt_data_flush
, "data_flush"},
143 {Opt_mode
, "mode=%s"},
144 {Opt_io_size_bits
, "io_bits=%u"},
145 {Opt_fault_injection
, "fault_injection=%u"},
146 {Opt_lazytime
, "lazytime"},
147 {Opt_nolazytime
, "nolazytime"},
148 {Opt_usrquota
, "usrquota"},
149 {Opt_grpquota
, "grpquota"},
150 {Opt_prjquota
, "prjquota"},
154 void f2fs_msg(struct super_block
*sb
, const char *level
, const char *fmt
, ...)
156 struct va_format vaf
;
162 printk("%sF2FS-fs (%s): %pV\n", level
, sb
->s_id
, &vaf
);
166 static void init_once(void *foo
)
168 struct f2fs_inode_info
*fi
= (struct f2fs_inode_info
*) foo
;
170 inode_init_once(&fi
->vfs_inode
);
173 static int parse_options(struct super_block
*sb
, char *options
)
175 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
176 struct request_queue
*q
;
177 substring_t args
[MAX_OPT_ARGS
];
184 while ((p
= strsep(&options
, ",")) != NULL
) {
189 * Initialize args struct so we know whether arg was
190 * found; some options take optional arguments.
192 args
[0].to
= args
[0].from
= NULL
;
193 token
= match_token(p
, f2fs_tokens
, args
);
196 case Opt_gc_background
:
197 name
= match_strdup(&args
[0]);
201 if (strlen(name
) == 2 && !strncmp(name
, "on", 2)) {
203 clear_opt(sbi
, FORCE_FG_GC
);
204 } else if (strlen(name
) == 3 && !strncmp(name
, "off", 3)) {
205 clear_opt(sbi
, BG_GC
);
206 clear_opt(sbi
, FORCE_FG_GC
);
207 } else if (strlen(name
) == 4 && !strncmp(name
, "sync", 4)) {
209 set_opt(sbi
, FORCE_FG_GC
);
216 case Opt_disable_roll_forward
:
217 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
220 /* this option mounts f2fs with ro */
221 set_opt(sbi
, DISABLE_ROLL_FORWARD
);
222 if (!f2fs_readonly(sb
))
226 q
= bdev_get_queue(sb
->s_bdev
);
227 if (blk_queue_discard(q
)) {
228 set_opt(sbi
, DISCARD
);
229 } else if (!f2fs_sb_mounted_blkzoned(sb
)) {
230 f2fs_msg(sb
, KERN_WARNING
,
231 "mounting with \"discard\" option, but "
232 "the device does not support discard");
236 if (f2fs_sb_mounted_blkzoned(sb
)) {
237 f2fs_msg(sb
, KERN_WARNING
,
238 "discard is required for zoned block devices");
241 clear_opt(sbi
, DISCARD
);
244 set_opt(sbi
, NOHEAP
);
247 clear_opt(sbi
, NOHEAP
);
249 #ifdef CONFIG_F2FS_FS_XATTR
251 set_opt(sbi
, XATTR_USER
);
253 case Opt_nouser_xattr
:
254 clear_opt(sbi
, XATTR_USER
);
256 case Opt_inline_xattr
:
257 set_opt(sbi
, INLINE_XATTR
);
259 case Opt_noinline_xattr
:
260 clear_opt(sbi
, INLINE_XATTR
);
264 f2fs_msg(sb
, KERN_INFO
,
265 "user_xattr options not supported");
267 case Opt_nouser_xattr
:
268 f2fs_msg(sb
, KERN_INFO
,
269 "nouser_xattr options not supported");
271 case Opt_inline_xattr
:
272 f2fs_msg(sb
, KERN_INFO
,
273 "inline_xattr options not supported");
275 case Opt_noinline_xattr
:
276 f2fs_msg(sb
, KERN_INFO
,
277 "noinline_xattr options not supported");
280 #ifdef CONFIG_F2FS_FS_POSIX_ACL
282 set_opt(sbi
, POSIX_ACL
);
285 clear_opt(sbi
, POSIX_ACL
);
289 f2fs_msg(sb
, KERN_INFO
, "acl options not supported");
292 f2fs_msg(sb
, KERN_INFO
, "noacl options not supported");
295 case Opt_active_logs
:
296 if (args
->from
&& match_int(args
, &arg
))
298 if (arg
!= 2 && arg
!= 4 && arg
!= NR_CURSEG_TYPE
)
300 sbi
->active_logs
= arg
;
302 case Opt_disable_ext_identify
:
303 set_opt(sbi
, DISABLE_EXT_IDENTIFY
);
305 case Opt_inline_data
:
306 set_opt(sbi
, INLINE_DATA
);
308 case Opt_inline_dentry
:
309 set_opt(sbi
, INLINE_DENTRY
);
311 case Opt_noinline_dentry
:
312 clear_opt(sbi
, INLINE_DENTRY
);
314 case Opt_flush_merge
:
315 set_opt(sbi
, FLUSH_MERGE
);
317 case Opt_noflush_merge
:
318 clear_opt(sbi
, FLUSH_MERGE
);
321 set_opt(sbi
, NOBARRIER
);
324 set_opt(sbi
, FASTBOOT
);
326 case Opt_extent_cache
:
327 set_opt(sbi
, EXTENT_CACHE
);
329 case Opt_noextent_cache
:
330 clear_opt(sbi
, EXTENT_CACHE
);
332 case Opt_noinline_data
:
333 clear_opt(sbi
, INLINE_DATA
);
336 set_opt(sbi
, DATA_FLUSH
);
339 name
= match_strdup(&args
[0]);
343 if (strlen(name
) == 8 &&
344 !strncmp(name
, "adaptive", 8)) {
345 if (f2fs_sb_mounted_blkzoned(sb
)) {
346 f2fs_msg(sb
, KERN_WARNING
,
347 "adaptive mode is not allowed with "
348 "zoned block device feature");
352 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
353 } else if (strlen(name
) == 3 &&
354 !strncmp(name
, "lfs", 3)) {
355 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
362 case Opt_io_size_bits
:
363 if (args
->from
&& match_int(args
, &arg
))
365 if (arg
> __ilog2_u32(BIO_MAX_PAGES
)) {
366 f2fs_msg(sb
, KERN_WARNING
,
367 "Not support %d, larger than %d",
368 1 << arg
, BIO_MAX_PAGES
);
371 sbi
->write_io_size_bits
= arg
;
373 case Opt_fault_injection
:
374 if (args
->from
&& match_int(args
, &arg
))
376 #ifdef CONFIG_F2FS_FAULT_INJECTION
377 f2fs_build_fault_attr(sbi
, arg
);
378 set_opt(sbi
, FAULT_INJECTION
);
380 f2fs_msg(sb
, KERN_INFO
,
381 "FAULT_INJECTION was not selected");
385 sb
->s_flags
|= MS_LAZYTIME
;
388 sb
->s_flags
&= ~MS_LAZYTIME
;
392 set_opt(sbi
, USRQUOTA
);
395 set_opt(sbi
, GRPQUOTA
);
398 set_opt(sbi
, PRJQUOTA
);
404 f2fs_msg(sb
, KERN_INFO
,
405 "quota operations not supported");
409 f2fs_msg(sb
, KERN_ERR
,
410 "Unrecognized mount option \"%s\" or missing value",
416 if (F2FS_IO_SIZE_BITS(sbi
) && !test_opt(sbi
, LFS
)) {
417 f2fs_msg(sb
, KERN_ERR
,
418 "Should set mode=lfs with %uKB-sized IO",
419 F2FS_IO_SIZE_KB(sbi
));
425 static struct inode
*f2fs_alloc_inode(struct super_block
*sb
)
427 struct f2fs_inode_info
*fi
;
429 fi
= kmem_cache_alloc(f2fs_inode_cachep
, GFP_F2FS_ZERO
);
433 init_once((void *) fi
);
435 /* Initialize f2fs-specific inode info */
436 fi
->vfs_inode
.i_version
= 1;
437 atomic_set(&fi
->dirty_pages
, 0);
438 fi
->i_current_depth
= 1;
440 init_rwsem(&fi
->i_sem
);
441 INIT_LIST_HEAD(&fi
->dirty_list
);
442 INIT_LIST_HEAD(&fi
->gdirty_list
);
443 INIT_LIST_HEAD(&fi
->inmem_pages
);
444 mutex_init(&fi
->inmem_lock
);
445 init_rwsem(&fi
->dio_rwsem
[READ
]);
446 init_rwsem(&fi
->dio_rwsem
[WRITE
]);
447 init_rwsem(&fi
->i_mmap_sem
);
450 memset(&fi
->i_dquot
, 0, sizeof(fi
->i_dquot
));
451 fi
->i_reserved_quota
= 0;
453 /* Will be used by directory only */
454 fi
->i_dir_level
= F2FS_SB(sb
)->dir_level
;
456 return &fi
->vfs_inode
;
459 static int f2fs_drop_inode(struct inode
*inode
)
463 * This is to avoid a deadlock condition like below.
464 * writeback_single_inode(inode)
465 * - f2fs_write_data_page
466 * - f2fs_gc -> iput -> evict
467 * - inode_wait_for_writeback(inode)
469 if ((!inode_unhashed(inode
) && inode
->i_state
& I_SYNC
)) {
470 if (!inode
->i_nlink
&& !is_bad_inode(inode
)) {
471 /* to avoid evict_inode call simultaneously */
472 atomic_inc(&inode
->i_count
);
473 spin_unlock(&inode
->i_lock
);
475 /* some remained atomic pages should discarded */
476 if (f2fs_is_atomic_file(inode
))
477 drop_inmem_pages(inode
);
479 /* should remain fi->extent_tree for writepage */
480 f2fs_destroy_extent_node(inode
);
482 sb_start_intwrite(inode
->i_sb
);
483 f2fs_i_size_write(inode
, 0);
485 if (F2FS_HAS_BLOCKS(inode
))
486 f2fs_truncate(inode
);
488 sb_end_intwrite(inode
->i_sb
);
490 fscrypt_put_encryption_info(inode
, NULL
);
491 spin_lock(&inode
->i_lock
);
492 atomic_dec(&inode
->i_count
);
494 trace_f2fs_drop_inode(inode
, 0);
497 ret
= generic_drop_inode(inode
);
498 trace_f2fs_drop_inode(inode
, ret
);
502 int f2fs_inode_dirtied(struct inode
*inode
, bool sync
)
504 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
507 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
508 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
511 set_inode_flag(inode
, FI_DIRTY_INODE
);
512 stat_inc_dirty_inode(sbi
, DIRTY_META
);
514 if (sync
&& list_empty(&F2FS_I(inode
)->gdirty_list
)) {
515 list_add_tail(&F2FS_I(inode
)->gdirty_list
,
516 &sbi
->inode_list
[DIRTY_META
]);
517 inc_page_count(sbi
, F2FS_DIRTY_IMETA
);
519 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
523 void f2fs_inode_synced(struct inode
*inode
)
525 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
527 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
528 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
)) {
529 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
532 if (!list_empty(&F2FS_I(inode
)->gdirty_list
)) {
533 list_del_init(&F2FS_I(inode
)->gdirty_list
);
534 dec_page_count(sbi
, F2FS_DIRTY_IMETA
);
536 clear_inode_flag(inode
, FI_DIRTY_INODE
);
537 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
538 stat_dec_dirty_inode(F2FS_I_SB(inode
), DIRTY_META
);
539 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
543 * f2fs_dirty_inode() is called from __mark_inode_dirty()
545 * We should call set_dirty_inode to write the dirty inode through write_inode.
547 static void f2fs_dirty_inode(struct inode
*inode
, int flags
)
549 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
551 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
552 inode
->i_ino
== F2FS_META_INO(sbi
))
555 if (flags
== I_DIRTY_TIME
)
558 if (is_inode_flag_set(inode
, FI_AUTO_RECOVER
))
559 clear_inode_flag(inode
, FI_AUTO_RECOVER
);
561 f2fs_inode_dirtied(inode
, false);
564 static void f2fs_i_callback(struct rcu_head
*head
)
566 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
567 kmem_cache_free(f2fs_inode_cachep
, F2FS_I(inode
));
570 static void f2fs_destroy_inode(struct inode
*inode
)
572 call_rcu(&inode
->i_rcu
, f2fs_i_callback
);
575 static void destroy_percpu_info(struct f2fs_sb_info
*sbi
)
577 percpu_counter_destroy(&sbi
->alloc_valid_block_count
);
578 percpu_counter_destroy(&sbi
->total_valid_inode_count
);
581 static void destroy_device_list(struct f2fs_sb_info
*sbi
)
585 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
586 blkdev_put(FDEV(i
).bdev
, FMODE_EXCL
);
587 #ifdef CONFIG_BLK_DEV_ZONED
588 kfree(FDEV(i
).blkz_type
);
594 static void f2fs_quota_off_umount(struct super_block
*sb
);
595 static void f2fs_put_super(struct super_block
*sb
)
597 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
600 f2fs_quota_off_umount(sb
);
602 /* prevent remaining shrinker jobs */
603 mutex_lock(&sbi
->umount_mutex
);
606 * We don't need to do checkpoint when superblock is clean.
607 * But, the previous checkpoint was not done by umount, it needs to do
608 * clean checkpoint again.
610 if (is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) ||
611 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
612 struct cp_control cpc
= {
615 write_checkpoint(sbi
, &cpc
);
618 /* be sure to wait for any on-going discard commands */
619 f2fs_wait_discard_bios(sbi
);
621 if (f2fs_discard_en(sbi
) && !sbi
->discard_blks
) {
622 struct cp_control cpc
= {
623 .reason
= CP_UMOUNT
| CP_TRIMMED
,
625 write_checkpoint(sbi
, &cpc
);
628 /* write_checkpoint can update stat informaion */
629 f2fs_destroy_stats(sbi
);
632 * normally superblock is clean, so we need to release this.
633 * In addition, EIO will skip do checkpoint, we need this as well.
635 release_ino_entry(sbi
, true);
637 f2fs_leave_shrinker(sbi
);
638 mutex_unlock(&sbi
->umount_mutex
);
640 /* our cp_error case, we can wait for any writeback page */
641 f2fs_flush_merged_writes(sbi
);
643 iput(sbi
->node_inode
);
644 iput(sbi
->meta_inode
);
646 /* destroy f2fs internal modules */
647 destroy_node_manager(sbi
);
648 destroy_segment_manager(sbi
);
652 f2fs_exit_sysfs(sbi
);
654 sb
->s_fs_info
= NULL
;
655 if (sbi
->s_chksum_driver
)
656 crypto_free_shash(sbi
->s_chksum_driver
);
657 kfree(sbi
->raw_super
);
659 destroy_device_list(sbi
);
660 mempool_destroy(sbi
->write_io_dummy
);
661 destroy_percpu_info(sbi
);
662 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
663 kfree(sbi
->write_io
[i
]);
667 int f2fs_sync_fs(struct super_block
*sb
, int sync
)
669 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
672 trace_f2fs_sync_fs(sb
, sync
);
675 struct cp_control cpc
;
677 cpc
.reason
= __get_cp_reason(sbi
);
679 mutex_lock(&sbi
->gc_mutex
);
680 err
= write_checkpoint(sbi
, &cpc
);
681 mutex_unlock(&sbi
->gc_mutex
);
683 f2fs_trace_ios(NULL
, 1);
688 static int f2fs_freeze(struct super_block
*sb
)
690 if (f2fs_readonly(sb
))
693 /* IO error happened before */
694 if (unlikely(f2fs_cp_error(F2FS_SB(sb
))))
697 /* must be clean, since sync_filesystem() was already called */
698 if (is_sbi_flag_set(F2FS_SB(sb
), SBI_IS_DIRTY
))
703 static int f2fs_unfreeze(struct super_block
*sb
)
708 static int f2fs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
710 struct super_block
*sb
= dentry
->d_sb
;
711 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
712 u64 id
= huge_encode_dev(sb
->s_bdev
->bd_dev
);
713 block_t total_count
, user_block_count
, start_count
, ovp_count
;
714 u64 avail_node_count
;
716 total_count
= le64_to_cpu(sbi
->raw_super
->block_count
);
717 user_block_count
= sbi
->user_block_count
;
718 start_count
= le32_to_cpu(sbi
->raw_super
->segment0_blkaddr
);
719 ovp_count
= SM_I(sbi
)->ovp_segments
<< sbi
->log_blocks_per_seg
;
720 buf
->f_type
= F2FS_SUPER_MAGIC
;
721 buf
->f_bsize
= sbi
->blocksize
;
723 buf
->f_blocks
= total_count
- start_count
;
724 buf
->f_bfree
= user_block_count
- valid_user_blocks(sbi
) + ovp_count
;
725 buf
->f_bavail
= user_block_count
- valid_user_blocks(sbi
) -
726 sbi
->reserved_blocks
;
728 avail_node_count
= sbi
->total_node_count
- F2FS_RESERVED_NODE_NUM
;
730 if (avail_node_count
> user_block_count
) {
731 buf
->f_files
= user_block_count
;
732 buf
->f_ffree
= buf
->f_bavail
;
734 buf
->f_files
= avail_node_count
;
735 buf
->f_ffree
= min(avail_node_count
- valid_node_count(sbi
),
739 buf
->f_namelen
= F2FS_NAME_LEN
;
740 buf
->f_fsid
.val
[0] = (u32
)id
;
741 buf
->f_fsid
.val
[1] = (u32
)(id
>> 32);
746 static int f2fs_show_options(struct seq_file
*seq
, struct dentry
*root
)
748 struct f2fs_sb_info
*sbi
= F2FS_SB(root
->d_sb
);
750 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, BG_GC
)) {
751 if (test_opt(sbi
, FORCE_FG_GC
))
752 seq_printf(seq
, ",background_gc=%s", "sync");
754 seq_printf(seq
, ",background_gc=%s", "on");
756 seq_printf(seq
, ",background_gc=%s", "off");
758 if (test_opt(sbi
, DISABLE_ROLL_FORWARD
))
759 seq_puts(seq
, ",disable_roll_forward");
760 if (test_opt(sbi
, DISCARD
))
761 seq_puts(seq
, ",discard");
762 if (test_opt(sbi
, NOHEAP
))
763 seq_puts(seq
, ",no_heap");
765 seq_puts(seq
, ",heap");
766 #ifdef CONFIG_F2FS_FS_XATTR
767 if (test_opt(sbi
, XATTR_USER
))
768 seq_puts(seq
, ",user_xattr");
770 seq_puts(seq
, ",nouser_xattr");
771 if (test_opt(sbi
, INLINE_XATTR
))
772 seq_puts(seq
, ",inline_xattr");
774 seq_puts(seq
, ",noinline_xattr");
776 #ifdef CONFIG_F2FS_FS_POSIX_ACL
777 if (test_opt(sbi
, POSIX_ACL
))
778 seq_puts(seq
, ",acl");
780 seq_puts(seq
, ",noacl");
782 if (test_opt(sbi
, DISABLE_EXT_IDENTIFY
))
783 seq_puts(seq
, ",disable_ext_identify");
784 if (test_opt(sbi
, INLINE_DATA
))
785 seq_puts(seq
, ",inline_data");
787 seq_puts(seq
, ",noinline_data");
788 if (test_opt(sbi
, INLINE_DENTRY
))
789 seq_puts(seq
, ",inline_dentry");
791 seq_puts(seq
, ",noinline_dentry");
792 if (!f2fs_readonly(sbi
->sb
) && test_opt(sbi
, FLUSH_MERGE
))
793 seq_puts(seq
, ",flush_merge");
794 if (test_opt(sbi
, NOBARRIER
))
795 seq_puts(seq
, ",nobarrier");
796 if (test_opt(sbi
, FASTBOOT
))
797 seq_puts(seq
, ",fastboot");
798 if (test_opt(sbi
, EXTENT_CACHE
))
799 seq_puts(seq
, ",extent_cache");
801 seq_puts(seq
, ",noextent_cache");
802 if (test_opt(sbi
, DATA_FLUSH
))
803 seq_puts(seq
, ",data_flush");
805 seq_puts(seq
, ",mode=");
806 if (test_opt(sbi
, ADAPTIVE
))
807 seq_puts(seq
, "adaptive");
808 else if (test_opt(sbi
, LFS
))
809 seq_puts(seq
, "lfs");
810 seq_printf(seq
, ",active_logs=%u", sbi
->active_logs
);
811 if (F2FS_IO_SIZE_BITS(sbi
))
812 seq_printf(seq
, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi
));
813 #ifdef CONFIG_F2FS_FAULT_INJECTION
814 if (test_opt(sbi
, FAULT_INJECTION
))
815 seq_printf(seq
, ",fault_injection=%u",
816 sbi
->fault_info
.inject_rate
);
819 if (test_opt(sbi
, USRQUOTA
))
820 seq_puts(seq
, ",usrquota");
821 if (test_opt(sbi
, GRPQUOTA
))
822 seq_puts(seq
, ",grpquota");
823 if (test_opt(sbi
, PRJQUOTA
))
824 seq_puts(seq
, ",prjquota");
830 static void default_options(struct f2fs_sb_info
*sbi
)
832 /* init some FS parameters */
833 sbi
->active_logs
= NR_CURSEG_TYPE
;
836 set_opt(sbi
, INLINE_XATTR
);
837 set_opt(sbi
, INLINE_DATA
);
838 set_opt(sbi
, INLINE_DENTRY
);
839 set_opt(sbi
, EXTENT_CACHE
);
840 set_opt(sbi
, NOHEAP
);
841 sbi
->sb
->s_flags
|= MS_LAZYTIME
;
842 set_opt(sbi
, FLUSH_MERGE
);
843 if (f2fs_sb_mounted_blkzoned(sbi
->sb
)) {
844 set_opt_mode(sbi
, F2FS_MOUNT_LFS
);
845 set_opt(sbi
, DISCARD
);
847 set_opt_mode(sbi
, F2FS_MOUNT_ADAPTIVE
);
850 #ifdef CONFIG_F2FS_FS_XATTR
851 set_opt(sbi
, XATTR_USER
);
853 #ifdef CONFIG_F2FS_FS_POSIX_ACL
854 set_opt(sbi
, POSIX_ACL
);
857 #ifdef CONFIG_F2FS_FAULT_INJECTION
858 f2fs_build_fault_attr(sbi
, 0);
862 static int f2fs_remount(struct super_block
*sb
, int *flags
, char *data
)
864 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
865 struct f2fs_mount_info org_mount_opt
;
866 unsigned long old_sb_flags
;
867 int err
, active_logs
;
868 bool need_restart_gc
= false;
869 bool need_stop_gc
= false;
870 bool no_extent_cache
= !test_opt(sbi
, EXTENT_CACHE
);
871 #ifdef CONFIG_F2FS_FAULT_INJECTION
872 struct f2fs_fault_info ffi
= sbi
->fault_info
;
876 * Save the old mount options in case we
877 * need to restore them.
879 org_mount_opt
= sbi
->mount_opt
;
880 old_sb_flags
= sb
->s_flags
;
881 active_logs
= sbi
->active_logs
;
883 /* recover superblocks we couldn't write due to previous RO mount */
884 if (!(*flags
& MS_RDONLY
) && is_sbi_flag_set(sbi
, SBI_NEED_SB_WRITE
)) {
885 err
= f2fs_commit_super(sbi
, false);
886 f2fs_msg(sb
, KERN_INFO
,
887 "Try to recover all the superblocks, ret: %d", err
);
889 clear_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
892 default_options(sbi
);
894 /* parse mount options */
895 err
= parse_options(sb
, data
);
900 * Previous and new state of filesystem is RO,
901 * so skip checking GC and FLUSH_MERGE conditions.
903 if (f2fs_readonly(sb
) && (*flags
& MS_RDONLY
))
906 if (!f2fs_readonly(sb
) && (*flags
& MS_RDONLY
)) {
907 err
= dquot_suspend(sb
, -1);
911 /* dquot_resume needs RW */
912 sb
->s_flags
&= ~MS_RDONLY
;
913 dquot_resume(sb
, -1);
916 /* disallow enable/disable extent_cache dynamically */
917 if (no_extent_cache
== !!test_opt(sbi
, EXTENT_CACHE
)) {
919 f2fs_msg(sbi
->sb
, KERN_WARNING
,
920 "switch extent_cache option is not allowed");
925 * We stop the GC thread if FS is mounted as RO
926 * or if background_gc = off is passed in mount
927 * option. Also sync the filesystem.
929 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, BG_GC
)) {
930 if (sbi
->gc_thread
) {
932 need_restart_gc
= true;
934 } else if (!sbi
->gc_thread
) {
935 err
= start_gc_thread(sbi
);
941 if (*flags
& MS_RDONLY
) {
942 writeback_inodes_sb(sb
, WB_REASON_SYNC
);
945 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
946 set_sbi_flag(sbi
, SBI_IS_CLOSE
);
948 clear_sbi_flag(sbi
, SBI_IS_CLOSE
);
952 * We stop issue flush thread if FS is mounted as RO
953 * or if flush_merge is not passed in mount option.
955 if ((*flags
& MS_RDONLY
) || !test_opt(sbi
, FLUSH_MERGE
)) {
956 clear_opt(sbi
, FLUSH_MERGE
);
957 destroy_flush_cmd_control(sbi
, false);
959 err
= create_flush_cmd_control(sbi
);
964 /* Update the POSIXACL Flag */
965 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
966 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
970 if (need_restart_gc
) {
971 if (start_gc_thread(sbi
))
972 f2fs_msg(sbi
->sb
, KERN_WARNING
,
973 "background gc thread has stopped");
974 } else if (need_stop_gc
) {
978 sbi
->mount_opt
= org_mount_opt
;
979 sbi
->active_logs
= active_logs
;
980 sb
->s_flags
= old_sb_flags
;
981 #ifdef CONFIG_F2FS_FAULT_INJECTION
982 sbi
->fault_info
= ffi
;
988 /* Read data from quotafile */
989 static ssize_t
f2fs_quota_read(struct super_block
*sb
, int type
, char *data
,
990 size_t len
, loff_t off
)
992 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
993 struct address_space
*mapping
= inode
->i_mapping
;
994 block_t blkidx
= F2FS_BYTES_TO_BLK(off
);
995 int offset
= off
& (sb
->s_blocksize
- 1);
998 loff_t i_size
= i_size_read(inode
);
1005 if (off
+ len
> i_size
)
1008 while (toread
> 0) {
1009 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
, toread
);
1011 page
= read_mapping_page(mapping
, blkidx
, NULL
);
1013 return PTR_ERR(page
);
1017 if (unlikely(page
->mapping
!= mapping
)) {
1018 f2fs_put_page(page
, 1);
1021 if (unlikely(!PageUptodate(page
))) {
1022 f2fs_put_page(page
, 1);
1026 kaddr
= kmap_atomic(page
);
1027 memcpy(data
, kaddr
+ offset
, tocopy
);
1028 kunmap_atomic(kaddr
);
1029 f2fs_put_page(page
, 1);
1039 /* Write to quotafile */
1040 static ssize_t
f2fs_quota_write(struct super_block
*sb
, int type
,
1041 const char *data
, size_t len
, loff_t off
)
1043 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1044 struct address_space
*mapping
= inode
->i_mapping
;
1045 const struct address_space_operations
*a_ops
= mapping
->a_ops
;
1046 int offset
= off
& (sb
->s_blocksize
- 1);
1047 size_t towrite
= len
;
1053 while (towrite
> 0) {
1054 tocopy
= min_t(unsigned long, sb
->s_blocksize
- offset
,
1057 err
= a_ops
->write_begin(NULL
, mapping
, off
, tocopy
, 0,
1062 kaddr
= kmap_atomic(page
);
1063 memcpy(kaddr
+ offset
, data
, tocopy
);
1064 kunmap_atomic(kaddr
);
1065 flush_dcache_page(page
);
1067 a_ops
->write_end(NULL
, mapping
, off
, tocopy
, tocopy
,
1079 inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
1080 f2fs_mark_inode_dirty_sync(inode
, false);
1081 return len
- towrite
;
1084 static struct dquot
**f2fs_get_dquots(struct inode
*inode
)
1086 return F2FS_I(inode
)->i_dquot
;
1089 static qsize_t
*f2fs_get_reserved_space(struct inode
*inode
)
1091 return &F2FS_I(inode
)->i_reserved_quota
;
1094 static int f2fs_quota_sync(struct super_block
*sb
, int type
)
1096 struct quota_info
*dqopt
= sb_dqopt(sb
);
1100 ret
= dquot_writeback_dquots(sb
, type
);
1105 * Now when everything is written we can discard the pagecache so
1106 * that userspace sees the changes.
1108 for (cnt
= 0; cnt
< MAXQUOTAS
; cnt
++) {
1109 if (type
!= -1 && cnt
!= type
)
1111 if (!sb_has_quota_active(sb
, cnt
))
1114 ret
= filemap_write_and_wait(dqopt
->files
[cnt
]->i_mapping
);
1118 inode_lock(dqopt
->files
[cnt
]);
1119 truncate_inode_pages(&dqopt
->files
[cnt
]->i_data
, 0);
1120 inode_unlock(dqopt
->files
[cnt
]);
1125 static int f2fs_quota_on(struct super_block
*sb
, int type
, int format_id
,
1126 const struct path
*path
)
1128 struct inode
*inode
;
1131 err
= f2fs_quota_sync(sb
, -1);
1135 err
= dquot_quota_on(sb
, type
, format_id
, path
);
1139 inode
= d_inode(path
->dentry
);
1142 F2FS_I(inode
)->i_flags
|= FS_NOATIME_FL
| FS_IMMUTABLE_FL
;
1143 inode_set_flags(inode
, S_NOATIME
| S_IMMUTABLE
,
1144 S_NOATIME
| S_IMMUTABLE
);
1145 inode_unlock(inode
);
1146 f2fs_mark_inode_dirty_sync(inode
, false);
1151 static int f2fs_quota_off(struct super_block
*sb
, int type
)
1153 struct inode
*inode
= sb_dqopt(sb
)->files
[type
];
1156 if (!inode
|| !igrab(inode
))
1157 return dquot_quota_off(sb
, type
);
1159 f2fs_quota_sync(sb
, -1);
1161 err
= dquot_quota_off(sb
, type
);
1166 F2FS_I(inode
)->i_flags
&= ~(FS_NOATIME_FL
| FS_IMMUTABLE_FL
);
1167 inode_set_flags(inode
, 0, S_NOATIME
| S_IMMUTABLE
);
1168 inode_unlock(inode
);
1169 f2fs_mark_inode_dirty_sync(inode
, false);
1175 static void f2fs_quota_off_umount(struct super_block
*sb
)
1179 for (type
= 0; type
< MAXQUOTAS
; type
++)
1180 f2fs_quota_off(sb
, type
);
1183 int f2fs_get_projid(struct inode
*inode
, kprojid_t
*projid
)
1185 *projid
= F2FS_I(inode
)->i_projid
;
1189 static const struct dquot_operations f2fs_quota_operations
= {
1190 .get_reserved_space
= f2fs_get_reserved_space
,
1191 .write_dquot
= dquot_commit
,
1192 .acquire_dquot
= dquot_acquire
,
1193 .release_dquot
= dquot_release
,
1194 .mark_dirty
= dquot_mark_dquot_dirty
,
1195 .write_info
= dquot_commit_info
,
1196 .alloc_dquot
= dquot_alloc
,
1197 .destroy_dquot
= dquot_destroy
,
1198 .get_projid
= f2fs_get_projid
,
1199 .get_next_id
= dquot_get_next_id
,
1202 static const struct quotactl_ops f2fs_quotactl_ops
= {
1203 .quota_on
= f2fs_quota_on
,
1204 .quota_off
= f2fs_quota_off
,
1205 .quota_sync
= f2fs_quota_sync
,
1206 .get_state
= dquot_get_state
,
1207 .set_info
= dquot_set_dqinfo
,
1208 .get_dqblk
= dquot_get_dqblk
,
1209 .set_dqblk
= dquot_set_dqblk
,
1210 .get_nextdqblk
= dquot_get_next_dqblk
,
1213 static inline void f2fs_quota_off_umount(struct super_block
*sb
)
1218 static struct super_operations f2fs_sops
= {
1219 .alloc_inode
= f2fs_alloc_inode
,
1220 .drop_inode
= f2fs_drop_inode
,
1221 .destroy_inode
= f2fs_destroy_inode
,
1222 .write_inode
= f2fs_write_inode
,
1223 .dirty_inode
= f2fs_dirty_inode
,
1224 .show_options
= f2fs_show_options
,
1226 .quota_read
= f2fs_quota_read
,
1227 .quota_write
= f2fs_quota_write
,
1228 .get_dquots
= f2fs_get_dquots
,
1230 .evict_inode
= f2fs_evict_inode
,
1231 .put_super
= f2fs_put_super
,
1232 .sync_fs
= f2fs_sync_fs
,
1233 .freeze_fs
= f2fs_freeze
,
1234 .unfreeze_fs
= f2fs_unfreeze
,
1235 .statfs
= f2fs_statfs
,
1236 .remount_fs
= f2fs_remount
,
1239 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1240 static int f2fs_get_context(struct inode
*inode
, void *ctx
, size_t len
)
1242 return f2fs_getxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1243 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1247 static int f2fs_set_context(struct inode
*inode
, const void *ctx
, size_t len
,
1250 return f2fs_setxattr(inode
, F2FS_XATTR_INDEX_ENCRYPTION
,
1251 F2FS_XATTR_NAME_ENCRYPTION_CONTEXT
,
1252 ctx
, len
, fs_data
, XATTR_CREATE
);
1255 static unsigned f2fs_max_namelen(struct inode
*inode
)
1257 return S_ISLNK(inode
->i_mode
) ?
1258 inode
->i_sb
->s_blocksize
: F2FS_NAME_LEN
;
1261 static const struct fscrypt_operations f2fs_cryptops
= {
1262 .key_prefix
= "f2fs:",
1263 .get_context
= f2fs_get_context
,
1264 .set_context
= f2fs_set_context
,
1265 .is_encrypted
= f2fs_encrypted_inode
,
1266 .empty_dir
= f2fs_empty_dir
,
1267 .max_namelen
= f2fs_max_namelen
,
1270 static const struct fscrypt_operations f2fs_cryptops
= {
1271 .is_encrypted
= f2fs_encrypted_inode
,
1275 static struct inode
*f2fs_nfs_get_inode(struct super_block
*sb
,
1276 u64 ino
, u32 generation
)
1278 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
1279 struct inode
*inode
;
1281 if (check_nid_range(sbi
, ino
))
1282 return ERR_PTR(-ESTALE
);
1285 * f2fs_iget isn't quite right if the inode is currently unallocated!
1286 * However f2fs_iget currently does appropriate checks to handle stale
1287 * inodes so everything is OK.
1289 inode
= f2fs_iget(sb
, ino
);
1291 return ERR_CAST(inode
);
1292 if (unlikely(generation
&& inode
->i_generation
!= generation
)) {
1293 /* we didn't find the right inode.. */
1295 return ERR_PTR(-ESTALE
);
1300 static struct dentry
*f2fs_fh_to_dentry(struct super_block
*sb
, struct fid
*fid
,
1301 int fh_len
, int fh_type
)
1303 return generic_fh_to_dentry(sb
, fid
, fh_len
, fh_type
,
1304 f2fs_nfs_get_inode
);
1307 static struct dentry
*f2fs_fh_to_parent(struct super_block
*sb
, struct fid
*fid
,
1308 int fh_len
, int fh_type
)
1310 return generic_fh_to_parent(sb
, fid
, fh_len
, fh_type
,
1311 f2fs_nfs_get_inode
);
1314 static const struct export_operations f2fs_export_ops
= {
1315 .fh_to_dentry
= f2fs_fh_to_dentry
,
1316 .fh_to_parent
= f2fs_fh_to_parent
,
1317 .get_parent
= f2fs_get_parent
,
1320 static loff_t
max_file_blocks(void)
1323 loff_t leaf_count
= ADDRS_PER_BLOCK
;
1326 * note: previously, result is equal to (DEF_ADDRS_PER_INODE -
1327 * F2FS_INLINE_XATTR_ADDRS), but now f2fs try to reserve more
1328 * space in inode.i_addr, it will be more safe to reassign
1332 /* two direct node blocks */
1333 result
+= (leaf_count
* 2);
1335 /* two indirect node blocks */
1336 leaf_count
*= NIDS_PER_BLOCK
;
1337 result
+= (leaf_count
* 2);
1339 /* one double indirect node block */
1340 leaf_count
*= NIDS_PER_BLOCK
;
1341 result
+= leaf_count
;
1346 static int __f2fs_commit_super(struct buffer_head
*bh
,
1347 struct f2fs_super_block
*super
)
1351 memcpy(bh
->b_data
+ F2FS_SUPER_OFFSET
, super
, sizeof(*super
));
1352 set_buffer_uptodate(bh
);
1353 set_buffer_dirty(bh
);
1356 /* it's rare case, we can do fua all the time */
1357 return __sync_dirty_buffer(bh
, REQ_SYNC
| REQ_PREFLUSH
| REQ_FUA
);
1360 static inline bool sanity_check_area_boundary(struct f2fs_sb_info
*sbi
,
1361 struct buffer_head
*bh
)
1363 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
1364 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
1365 struct super_block
*sb
= sbi
->sb
;
1366 u32 segment0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
1367 u32 cp_blkaddr
= le32_to_cpu(raw_super
->cp_blkaddr
);
1368 u32 sit_blkaddr
= le32_to_cpu(raw_super
->sit_blkaddr
);
1369 u32 nat_blkaddr
= le32_to_cpu(raw_super
->nat_blkaddr
);
1370 u32 ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
1371 u32 main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
1372 u32 segment_count_ckpt
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1373 u32 segment_count_sit
= le32_to_cpu(raw_super
->segment_count_sit
);
1374 u32 segment_count_nat
= le32_to_cpu(raw_super
->segment_count_nat
);
1375 u32 segment_count_ssa
= le32_to_cpu(raw_super
->segment_count_ssa
);
1376 u32 segment_count_main
= le32_to_cpu(raw_super
->segment_count_main
);
1377 u32 segment_count
= le32_to_cpu(raw_super
->segment_count
);
1378 u32 log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1379 u64 main_end_blkaddr
= main_blkaddr
+
1380 (segment_count_main
<< log_blocks_per_seg
);
1381 u64 seg_end_blkaddr
= segment0_blkaddr
+
1382 (segment_count
<< log_blocks_per_seg
);
1384 if (segment0_blkaddr
!= cp_blkaddr
) {
1385 f2fs_msg(sb
, KERN_INFO
,
1386 "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
1387 segment0_blkaddr
, cp_blkaddr
);
1391 if (cp_blkaddr
+ (segment_count_ckpt
<< log_blocks_per_seg
) !=
1393 f2fs_msg(sb
, KERN_INFO
,
1394 "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
1395 cp_blkaddr
, sit_blkaddr
,
1396 segment_count_ckpt
<< log_blocks_per_seg
);
1400 if (sit_blkaddr
+ (segment_count_sit
<< log_blocks_per_seg
) !=
1402 f2fs_msg(sb
, KERN_INFO
,
1403 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
1404 sit_blkaddr
, nat_blkaddr
,
1405 segment_count_sit
<< log_blocks_per_seg
);
1409 if (nat_blkaddr
+ (segment_count_nat
<< log_blocks_per_seg
) !=
1411 f2fs_msg(sb
, KERN_INFO
,
1412 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
1413 nat_blkaddr
, ssa_blkaddr
,
1414 segment_count_nat
<< log_blocks_per_seg
);
1418 if (ssa_blkaddr
+ (segment_count_ssa
<< log_blocks_per_seg
) !=
1420 f2fs_msg(sb
, KERN_INFO
,
1421 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
1422 ssa_blkaddr
, main_blkaddr
,
1423 segment_count_ssa
<< log_blocks_per_seg
);
1427 if (main_end_blkaddr
> seg_end_blkaddr
) {
1428 f2fs_msg(sb
, KERN_INFO
,
1429 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
1432 (segment_count
<< log_blocks_per_seg
),
1433 segment_count_main
<< log_blocks_per_seg
);
1435 } else if (main_end_blkaddr
< seg_end_blkaddr
) {
1439 /* fix in-memory information all the time */
1440 raw_super
->segment_count
= cpu_to_le32((main_end_blkaddr
-
1441 segment0_blkaddr
) >> log_blocks_per_seg
);
1443 if (f2fs_readonly(sb
) || bdev_read_only(sb
->s_bdev
)) {
1444 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1447 err
= __f2fs_commit_super(bh
, NULL
);
1448 res
= err
? "failed" : "done";
1450 f2fs_msg(sb
, KERN_INFO
,
1451 "Fix alignment : %s, start(%u) end(%u) block(%u)",
1454 (segment_count
<< log_blocks_per_seg
),
1455 segment_count_main
<< log_blocks_per_seg
);
1462 static int sanity_check_raw_super(struct f2fs_sb_info
*sbi
,
1463 struct buffer_head
*bh
)
1465 struct f2fs_super_block
*raw_super
= (struct f2fs_super_block
*)
1466 (bh
->b_data
+ F2FS_SUPER_OFFSET
);
1467 struct super_block
*sb
= sbi
->sb
;
1468 unsigned int blocksize
;
1470 if (F2FS_SUPER_MAGIC
!= le32_to_cpu(raw_super
->magic
)) {
1471 f2fs_msg(sb
, KERN_INFO
,
1472 "Magic Mismatch, valid(0x%x) - read(0x%x)",
1473 F2FS_SUPER_MAGIC
, le32_to_cpu(raw_super
->magic
));
1477 /* Currently, support only 4KB page cache size */
1478 if (F2FS_BLKSIZE
!= PAGE_SIZE
) {
1479 f2fs_msg(sb
, KERN_INFO
,
1480 "Invalid page_cache_size (%lu), supports only 4KB\n",
1485 /* Currently, support only 4KB block size */
1486 blocksize
= 1 << le32_to_cpu(raw_super
->log_blocksize
);
1487 if (blocksize
!= F2FS_BLKSIZE
) {
1488 f2fs_msg(sb
, KERN_INFO
,
1489 "Invalid blocksize (%u), supports only 4KB\n",
1494 /* check log blocks per segment */
1495 if (le32_to_cpu(raw_super
->log_blocks_per_seg
) != 9) {
1496 f2fs_msg(sb
, KERN_INFO
,
1497 "Invalid log blocks per segment (%u)\n",
1498 le32_to_cpu(raw_super
->log_blocks_per_seg
));
1502 /* Currently, support 512/1024/2048/4096 bytes sector size */
1503 if (le32_to_cpu(raw_super
->log_sectorsize
) >
1504 F2FS_MAX_LOG_SECTOR_SIZE
||
1505 le32_to_cpu(raw_super
->log_sectorsize
) <
1506 F2FS_MIN_LOG_SECTOR_SIZE
) {
1507 f2fs_msg(sb
, KERN_INFO
, "Invalid log sectorsize (%u)",
1508 le32_to_cpu(raw_super
->log_sectorsize
));
1511 if (le32_to_cpu(raw_super
->log_sectors_per_block
) +
1512 le32_to_cpu(raw_super
->log_sectorsize
) !=
1513 F2FS_MAX_LOG_SECTOR_SIZE
) {
1514 f2fs_msg(sb
, KERN_INFO
,
1515 "Invalid log sectors per block(%u) log sectorsize(%u)",
1516 le32_to_cpu(raw_super
->log_sectors_per_block
),
1517 le32_to_cpu(raw_super
->log_sectorsize
));
1521 /* check reserved ino info */
1522 if (le32_to_cpu(raw_super
->node_ino
) != 1 ||
1523 le32_to_cpu(raw_super
->meta_ino
) != 2 ||
1524 le32_to_cpu(raw_super
->root_ino
) != 3) {
1525 f2fs_msg(sb
, KERN_INFO
,
1526 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
1527 le32_to_cpu(raw_super
->node_ino
),
1528 le32_to_cpu(raw_super
->meta_ino
),
1529 le32_to_cpu(raw_super
->root_ino
));
1533 if (le32_to_cpu(raw_super
->segment_count
) > F2FS_MAX_SEGMENT
) {
1534 f2fs_msg(sb
, KERN_INFO
,
1535 "Invalid segment count (%u)",
1536 le32_to_cpu(raw_super
->segment_count
));
1540 /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
1541 if (sanity_check_area_boundary(sbi
, bh
))
1547 int sanity_check_ckpt(struct f2fs_sb_info
*sbi
)
1549 unsigned int total
, fsmeta
;
1550 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1551 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1552 unsigned int ovp_segments
, reserved_segments
;
1553 unsigned int main_segs
, blocks_per_seg
;
1556 total
= le32_to_cpu(raw_super
->segment_count
);
1557 fsmeta
= le32_to_cpu(raw_super
->segment_count_ckpt
);
1558 fsmeta
+= le32_to_cpu(raw_super
->segment_count_sit
);
1559 fsmeta
+= le32_to_cpu(raw_super
->segment_count_nat
);
1560 fsmeta
+= le32_to_cpu(ckpt
->rsvd_segment_count
);
1561 fsmeta
+= le32_to_cpu(raw_super
->segment_count_ssa
);
1563 if (unlikely(fsmeta
>= total
))
1566 ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
1567 reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
1569 if (unlikely(fsmeta
< F2FS_MIN_SEGMENTS
||
1570 ovp_segments
== 0 || reserved_segments
== 0)) {
1571 f2fs_msg(sbi
->sb
, KERN_ERR
,
1572 "Wrong layout: check mkfs.f2fs version");
1576 main_segs
= le32_to_cpu(raw_super
->segment_count_main
);
1577 blocks_per_seg
= sbi
->blocks_per_seg
;
1579 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
1580 if (le32_to_cpu(ckpt
->cur_node_segno
[i
]) >= main_segs
||
1581 le16_to_cpu(ckpt
->cur_node_blkoff
[i
]) >= blocks_per_seg
)
1584 for (i
= 0; i
< NR_CURSEG_DATA_TYPE
; i
++) {
1585 if (le32_to_cpu(ckpt
->cur_data_segno
[i
]) >= main_segs
||
1586 le16_to_cpu(ckpt
->cur_data_blkoff
[i
]) >= blocks_per_seg
)
1590 if (unlikely(f2fs_cp_error(sbi
))) {
1591 f2fs_msg(sbi
->sb
, KERN_ERR
, "A bug case: need to run fsck");
1597 static void init_sb_info(struct f2fs_sb_info
*sbi
)
1599 struct f2fs_super_block
*raw_super
= sbi
->raw_super
;
1602 sbi
->log_sectors_per_block
=
1603 le32_to_cpu(raw_super
->log_sectors_per_block
);
1604 sbi
->log_blocksize
= le32_to_cpu(raw_super
->log_blocksize
);
1605 sbi
->blocksize
= 1 << sbi
->log_blocksize
;
1606 sbi
->log_blocks_per_seg
= le32_to_cpu(raw_super
->log_blocks_per_seg
);
1607 sbi
->blocks_per_seg
= 1 << sbi
->log_blocks_per_seg
;
1608 sbi
->segs_per_sec
= le32_to_cpu(raw_super
->segs_per_sec
);
1609 sbi
->secs_per_zone
= le32_to_cpu(raw_super
->secs_per_zone
);
1610 sbi
->total_sections
= le32_to_cpu(raw_super
->section_count
);
1611 sbi
->total_node_count
=
1612 (le32_to_cpu(raw_super
->segment_count_nat
) / 2)
1613 * sbi
->blocks_per_seg
* NAT_ENTRY_PER_BLOCK
;
1614 sbi
->root_ino_num
= le32_to_cpu(raw_super
->root_ino
);
1615 sbi
->node_ino_num
= le32_to_cpu(raw_super
->node_ino
);
1616 sbi
->meta_ino_num
= le32_to_cpu(raw_super
->meta_ino
);
1617 sbi
->cur_victim_sec
= NULL_SECNO
;
1618 sbi
->max_victim_search
= DEF_MAX_VICTIM_SEARCH
;
1620 sbi
->dir_level
= DEF_DIR_LEVEL
;
1621 sbi
->interval_time
[CP_TIME
] = DEF_CP_INTERVAL
;
1622 sbi
->interval_time
[REQ_TIME
] = DEF_IDLE_INTERVAL
;
1623 clear_sbi_flag(sbi
, SBI_NEED_FSCK
);
1625 for (i
= 0; i
< NR_COUNT_TYPE
; i
++)
1626 atomic_set(&sbi
->nr_pages
[i
], 0);
1628 atomic_set(&sbi
->wb_sync_req
, 0);
1630 INIT_LIST_HEAD(&sbi
->s_list
);
1631 mutex_init(&sbi
->umount_mutex
);
1632 for (i
= 0; i
< NR_PAGE_TYPE
- 1; i
++)
1633 for (j
= HOT
; j
< NR_TEMP_TYPE
; j
++)
1634 mutex_init(&sbi
->wio_mutex
[i
][j
]);
1635 spin_lock_init(&sbi
->cp_lock
);
1638 static int init_percpu_info(struct f2fs_sb_info
*sbi
)
1642 err
= percpu_counter_init(&sbi
->alloc_valid_block_count
, 0, GFP_KERNEL
);
1646 return percpu_counter_init(&sbi
->total_valid_inode_count
, 0,
1650 #ifdef CONFIG_BLK_DEV_ZONED
1651 static int init_blkz_info(struct f2fs_sb_info
*sbi
, int devi
)
1653 struct block_device
*bdev
= FDEV(devi
).bdev
;
1654 sector_t nr_sectors
= bdev
->bd_part
->nr_sects
;
1655 sector_t sector
= 0;
1656 struct blk_zone
*zones
;
1657 unsigned int i
, nr_zones
;
1661 if (!f2fs_sb_mounted_blkzoned(sbi
->sb
))
1664 if (sbi
->blocks_per_blkz
&& sbi
->blocks_per_blkz
!=
1665 SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
)))
1667 sbi
->blocks_per_blkz
= SECTOR_TO_BLOCK(bdev_zone_sectors(bdev
));
1668 if (sbi
->log_blocks_per_blkz
&& sbi
->log_blocks_per_blkz
!=
1669 __ilog2_u32(sbi
->blocks_per_blkz
))
1671 sbi
->log_blocks_per_blkz
= __ilog2_u32(sbi
->blocks_per_blkz
);
1672 FDEV(devi
).nr_blkz
= SECTOR_TO_BLOCK(nr_sectors
) >>
1673 sbi
->log_blocks_per_blkz
;
1674 if (nr_sectors
& (bdev_zone_sectors(bdev
) - 1))
1675 FDEV(devi
).nr_blkz
++;
1677 FDEV(devi
).blkz_type
= kmalloc(FDEV(devi
).nr_blkz
, GFP_KERNEL
);
1678 if (!FDEV(devi
).blkz_type
)
1681 #define F2FS_REPORT_NR_ZONES 4096
1683 zones
= kcalloc(F2FS_REPORT_NR_ZONES
, sizeof(struct blk_zone
),
1688 /* Get block zones type */
1689 while (zones
&& sector
< nr_sectors
) {
1691 nr_zones
= F2FS_REPORT_NR_ZONES
;
1692 err
= blkdev_report_zones(bdev
, sector
,
1702 for (i
= 0; i
< nr_zones
; i
++) {
1703 FDEV(devi
).blkz_type
[n
] = zones
[i
].type
;
1704 sector
+= zones
[i
].len
;
1716 * Read f2fs raw super block.
1717 * Because we have two copies of super block, so read both of them
1718 * to get the first valid one. If any one of them is broken, we pass
1719 * them recovery flag back to the caller.
1721 static int read_raw_super_block(struct f2fs_sb_info
*sbi
,
1722 struct f2fs_super_block
**raw_super
,
1723 int *valid_super_block
, int *recovery
)
1725 struct super_block
*sb
= sbi
->sb
;
1727 struct buffer_head
*bh
;
1728 struct f2fs_super_block
*super
;
1731 super
= kzalloc(sizeof(struct f2fs_super_block
), GFP_KERNEL
);
1735 for (block
= 0; block
< 2; block
++) {
1736 bh
= sb_bread(sb
, block
);
1738 f2fs_msg(sb
, KERN_ERR
, "Unable to read %dth superblock",
1744 /* sanity checking of raw super */
1745 if (sanity_check_raw_super(sbi
, bh
)) {
1746 f2fs_msg(sb
, KERN_ERR
,
1747 "Can't find valid F2FS filesystem in %dth superblock",
1755 memcpy(super
, bh
->b_data
+ F2FS_SUPER_OFFSET
,
1757 *valid_super_block
= block
;
1763 /* Fail to read any one of the superblocks*/
1767 /* No valid superblock */
1776 int f2fs_commit_super(struct f2fs_sb_info
*sbi
, bool recover
)
1778 struct buffer_head
*bh
;
1781 if ((recover
&& f2fs_readonly(sbi
->sb
)) ||
1782 bdev_read_only(sbi
->sb
->s_bdev
)) {
1783 set_sbi_flag(sbi
, SBI_NEED_SB_WRITE
);
1787 /* write back-up superblock first */
1788 bh
= sb_getblk(sbi
->sb
, sbi
->valid_super_block
? 0: 1);
1791 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
1794 /* if we are in recovery path, skip writing valid superblock */
1798 /* write current valid superblock */
1799 bh
= sb_getblk(sbi
->sb
, sbi
->valid_super_block
);
1802 err
= __f2fs_commit_super(bh
, F2FS_RAW_SUPER(sbi
));
1807 static int f2fs_scan_devices(struct f2fs_sb_info
*sbi
)
1809 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
1810 unsigned int max_devices
= MAX_DEVICES
;
1813 /* Initialize single device information */
1814 if (!RDEV(0).path
[0]) {
1815 if (!bdev_is_zoned(sbi
->sb
->s_bdev
))
1821 * Initialize multiple devices information, or single
1822 * zoned block device information.
1824 sbi
->devs
= kcalloc(max_devices
, sizeof(struct f2fs_dev_info
),
1829 for (i
= 0; i
< max_devices
; i
++) {
1831 if (i
> 0 && !RDEV(i
).path
[0])
1834 if (max_devices
== 1) {
1835 /* Single zoned block device mount */
1837 blkdev_get_by_dev(sbi
->sb
->s_bdev
->bd_dev
,
1838 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
1840 /* Multi-device mount */
1841 memcpy(FDEV(i
).path
, RDEV(i
).path
, MAX_PATH_LEN
);
1842 FDEV(i
).total_segments
=
1843 le32_to_cpu(RDEV(i
).total_segments
);
1845 FDEV(i
).start_blk
= 0;
1846 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
1847 (FDEV(i
).total_segments
<<
1848 sbi
->log_blocks_per_seg
) - 1 +
1849 le32_to_cpu(raw_super
->segment0_blkaddr
);
1851 FDEV(i
).start_blk
= FDEV(i
- 1).end_blk
+ 1;
1852 FDEV(i
).end_blk
= FDEV(i
).start_blk
+
1853 (FDEV(i
).total_segments
<<
1854 sbi
->log_blocks_per_seg
) - 1;
1856 FDEV(i
).bdev
= blkdev_get_by_path(FDEV(i
).path
,
1857 sbi
->sb
->s_mode
, sbi
->sb
->s_type
);
1859 if (IS_ERR(FDEV(i
).bdev
))
1860 return PTR_ERR(FDEV(i
).bdev
);
1862 /* to release errored devices */
1863 sbi
->s_ndevs
= i
+ 1;
1865 #ifdef CONFIG_BLK_DEV_ZONED
1866 if (bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HM
&&
1867 !f2fs_sb_mounted_blkzoned(sbi
->sb
)) {
1868 f2fs_msg(sbi
->sb
, KERN_ERR
,
1869 "Zoned block device feature not enabled\n");
1872 if (bdev_zoned_model(FDEV(i
).bdev
) != BLK_ZONED_NONE
) {
1873 if (init_blkz_info(sbi
, i
)) {
1874 f2fs_msg(sbi
->sb
, KERN_ERR
,
1875 "Failed to initialize F2FS blkzone information");
1878 if (max_devices
== 1)
1880 f2fs_msg(sbi
->sb
, KERN_INFO
,
1881 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
1883 FDEV(i
).total_segments
,
1884 FDEV(i
).start_blk
, FDEV(i
).end_blk
,
1885 bdev_zoned_model(FDEV(i
).bdev
) == BLK_ZONED_HA
?
1886 "Host-aware" : "Host-managed");
1890 f2fs_msg(sbi
->sb
, KERN_INFO
,
1891 "Mount Device [%2d]: %20s, %8u, %8x - %8x",
1893 FDEV(i
).total_segments
,
1894 FDEV(i
).start_blk
, FDEV(i
).end_blk
);
1896 f2fs_msg(sbi
->sb
, KERN_INFO
,
1897 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi
));
1901 static int f2fs_fill_super(struct super_block
*sb
, void *data
, int silent
)
1903 struct f2fs_sb_info
*sbi
;
1904 struct f2fs_super_block
*raw_super
;
1907 bool retry
= true, need_fsck
= false;
1908 char *options
= NULL
;
1909 int recovery
, i
, valid_super_block
;
1910 struct curseg_info
*seg_i
;
1915 valid_super_block
= -1;
1918 /* allocate memory for f2fs-specific super block info */
1919 sbi
= kzalloc(sizeof(struct f2fs_sb_info
), GFP_KERNEL
);
1925 /* Load the checksum driver */
1926 sbi
->s_chksum_driver
= crypto_alloc_shash("crc32", 0, 0);
1927 if (IS_ERR(sbi
->s_chksum_driver
)) {
1928 f2fs_msg(sb
, KERN_ERR
, "Cannot load crc32 driver.");
1929 err
= PTR_ERR(sbi
->s_chksum_driver
);
1930 sbi
->s_chksum_driver
= NULL
;
1934 /* set a block size */
1935 if (unlikely(!sb_set_blocksize(sb
, F2FS_BLKSIZE
))) {
1936 f2fs_msg(sb
, KERN_ERR
, "unable to set blocksize");
1940 err
= read_raw_super_block(sbi
, &raw_super
, &valid_super_block
,
1945 sb
->s_fs_info
= sbi
;
1946 sbi
->raw_super
= raw_super
;
1949 * The BLKZONED feature indicates that the drive was formatted with
1950 * zone alignment optimization. This is optional for host-aware
1951 * devices, but mandatory for host-managed zoned block devices.
1953 #ifndef CONFIG_BLK_DEV_ZONED
1954 if (f2fs_sb_mounted_blkzoned(sb
)) {
1955 f2fs_msg(sb
, KERN_ERR
,
1956 "Zoned block device support is not enabled\n");
1961 default_options(sbi
);
1962 /* parse mount options */
1963 options
= kstrdup((const char *)data
, GFP_KERNEL
);
1964 if (data
&& !options
) {
1969 err
= parse_options(sb
, options
);
1973 sbi
->max_file_blocks
= max_file_blocks();
1974 sb
->s_maxbytes
= sbi
->max_file_blocks
<<
1975 le32_to_cpu(raw_super
->log_blocksize
);
1976 sb
->s_max_links
= F2FS_LINK_MAX
;
1977 get_random_bytes(&sbi
->s_next_generation
, sizeof(u32
));
1980 sb
->dq_op
= &f2fs_quota_operations
;
1981 sb
->s_qcop
= &f2fs_quotactl_ops
;
1982 sb
->s_quota_types
= QTYPE_MASK_USR
| QTYPE_MASK_GRP
| QTYPE_MASK_PRJ
;
1985 sb
->s_op
= &f2fs_sops
;
1986 sb
->s_cop
= &f2fs_cryptops
;
1987 sb
->s_xattr
= f2fs_xattr_handlers
;
1988 sb
->s_export_op
= &f2fs_export_ops
;
1989 sb
->s_magic
= F2FS_SUPER_MAGIC
;
1990 sb
->s_time_gran
= 1;
1991 sb
->s_flags
= (sb
->s_flags
& ~MS_POSIXACL
) |
1992 (test_opt(sbi
, POSIX_ACL
) ? MS_POSIXACL
: 0);
1993 memcpy(&sb
->s_uuid
, raw_super
->uuid
, sizeof(raw_super
->uuid
));
1995 /* init f2fs-specific super block info */
1996 sbi
->valid_super_block
= valid_super_block
;
1997 mutex_init(&sbi
->gc_mutex
);
1998 mutex_init(&sbi
->cp_mutex
);
1999 init_rwsem(&sbi
->node_write
);
2000 init_rwsem(&sbi
->node_change
);
2002 /* disallow all the data/node/meta page writes */
2003 set_sbi_flag(sbi
, SBI_POR_DOING
);
2004 spin_lock_init(&sbi
->stat_lock
);
2006 for (i
= 0; i
< NR_PAGE_TYPE
; i
++) {
2007 int n
= (i
== META
) ? 1: NR_TEMP_TYPE
;
2010 sbi
->write_io
[i
] = kmalloc(n
* sizeof(struct f2fs_bio_info
),
2012 if (!sbi
->write_io
[i
]) {
2017 for (j
= HOT
; j
< n
; j
++) {
2018 init_rwsem(&sbi
->write_io
[i
][j
].io_rwsem
);
2019 sbi
->write_io
[i
][j
].sbi
= sbi
;
2020 sbi
->write_io
[i
][j
].bio
= NULL
;
2021 spin_lock_init(&sbi
->write_io
[i
][j
].io_lock
);
2022 INIT_LIST_HEAD(&sbi
->write_io
[i
][j
].io_list
);
2026 init_rwsem(&sbi
->cp_rwsem
);
2027 init_waitqueue_head(&sbi
->cp_wait
);
2030 err
= init_percpu_info(sbi
);
2034 if (F2FS_IO_SIZE(sbi
) > 1) {
2035 sbi
->write_io_dummy
=
2036 mempool_create_page_pool(2 * (F2FS_IO_SIZE(sbi
) - 1), 0);
2037 if (!sbi
->write_io_dummy
) {
2043 /* get an inode for meta space */
2044 sbi
->meta_inode
= f2fs_iget(sb
, F2FS_META_INO(sbi
));
2045 if (IS_ERR(sbi
->meta_inode
)) {
2046 f2fs_msg(sb
, KERN_ERR
, "Failed to read F2FS meta data inode");
2047 err
= PTR_ERR(sbi
->meta_inode
);
2051 err
= get_valid_checkpoint(sbi
);
2053 f2fs_msg(sb
, KERN_ERR
, "Failed to get valid F2FS checkpoint");
2054 goto free_meta_inode
;
2057 /* Initialize device list */
2058 err
= f2fs_scan_devices(sbi
);
2060 f2fs_msg(sb
, KERN_ERR
, "Failed to find devices");
2064 sbi
->total_valid_node_count
=
2065 le32_to_cpu(sbi
->ckpt
->valid_node_count
);
2066 percpu_counter_set(&sbi
->total_valid_inode_count
,
2067 le32_to_cpu(sbi
->ckpt
->valid_inode_count
));
2068 sbi
->user_block_count
= le64_to_cpu(sbi
->ckpt
->user_block_count
);
2069 sbi
->total_valid_block_count
=
2070 le64_to_cpu(sbi
->ckpt
->valid_block_count
);
2071 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
2072 sbi
->reserved_blocks
= 0;
2074 for (i
= 0; i
< NR_INODE_TYPE
; i
++) {
2075 INIT_LIST_HEAD(&sbi
->inode_list
[i
]);
2076 spin_lock_init(&sbi
->inode_lock
[i
]);
2079 init_extent_cache_info(sbi
);
2081 init_ino_entry_info(sbi
);
2083 /* setup f2fs internal modules */
2084 err
= build_segment_manager(sbi
);
2086 f2fs_msg(sb
, KERN_ERR
,
2087 "Failed to initialize F2FS segment manager");
2090 err
= build_node_manager(sbi
);
2092 f2fs_msg(sb
, KERN_ERR
,
2093 "Failed to initialize F2FS node manager");
2097 /* For write statistics */
2098 if (sb
->s_bdev
->bd_part
)
2099 sbi
->sectors_written_start
=
2100 (u64
)part_stat_read(sb
->s_bdev
->bd_part
, sectors
[1]);
2102 /* Read accumulated write IO statistics if exists */
2103 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
2104 if (__exist_node_summaries(sbi
))
2105 sbi
->kbytes_written
=
2106 le64_to_cpu(seg_i
->journal
->info
.kbytes_written
);
2108 build_gc_manager(sbi
);
2110 /* get an inode for node space */
2111 sbi
->node_inode
= f2fs_iget(sb
, F2FS_NODE_INO(sbi
));
2112 if (IS_ERR(sbi
->node_inode
)) {
2113 f2fs_msg(sb
, KERN_ERR
, "Failed to read node inode");
2114 err
= PTR_ERR(sbi
->node_inode
);
2118 f2fs_join_shrinker(sbi
);
2120 err
= f2fs_build_stats(sbi
);
2124 /* if there are nt orphan nodes free them */
2125 err
= recover_orphan_inodes(sbi
);
2127 goto free_node_inode
;
2129 /* read root inode and dentry */
2130 root
= f2fs_iget(sb
, F2FS_ROOT_INO(sbi
));
2132 f2fs_msg(sb
, KERN_ERR
, "Failed to read root inode");
2133 err
= PTR_ERR(root
);
2134 goto free_node_inode
;
2136 if (!S_ISDIR(root
->i_mode
) || !root
->i_blocks
|| !root
->i_size
) {
2139 goto free_node_inode
;
2142 sb
->s_root
= d_make_root(root
); /* allocate root dentry */
2145 goto free_root_inode
;
2148 err
= f2fs_init_sysfs(sbi
);
2150 goto free_root_inode
;
2152 /* recover fsynced data */
2153 if (!test_opt(sbi
, DISABLE_ROLL_FORWARD
)) {
2155 * mount should be failed, when device has readonly mode, and
2156 * previous checkpoint was not done by clean system shutdown.
2158 if (bdev_read_only(sb
->s_bdev
) &&
2159 !is_set_ckpt_flags(sbi
, CP_UMOUNT_FLAG
)) {
2165 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
2170 err
= recover_fsync_data(sbi
, false);
2173 f2fs_msg(sb
, KERN_ERR
,
2174 "Cannot recover all fsync data errno=%d", err
);
2178 err
= recover_fsync_data(sbi
, true);
2180 if (!f2fs_readonly(sb
) && err
> 0) {
2182 f2fs_msg(sb
, KERN_ERR
,
2183 "Need to recover fsync data");
2188 /* recover_fsync_data() cleared this already */
2189 clear_sbi_flag(sbi
, SBI_POR_DOING
);
2192 * If filesystem is not mounted as read-only then
2193 * do start the gc_thread.
2195 if (test_opt(sbi
, BG_GC
) && !f2fs_readonly(sb
)) {
2196 /* After POR, we can run background GC thread.*/
2197 err
= start_gc_thread(sbi
);
2203 /* recover broken superblock */
2205 err
= f2fs_commit_super(sbi
, true);
2206 f2fs_msg(sb
, KERN_INFO
,
2207 "Try to recover %dth superblock, ret: %d",
2208 sbi
->valid_super_block
? 1 : 2, err
);
2211 f2fs_msg(sbi
->sb
, KERN_NOTICE
, "Mounted with checkpoint version = %llx",
2212 cur_cp_version(F2FS_CKPT(sbi
)));
2213 f2fs_update_time(sbi
, CP_TIME
);
2214 f2fs_update_time(sbi
, REQ_TIME
);
2218 f2fs_sync_inode_meta(sbi
);
2219 f2fs_exit_sysfs(sbi
);
2224 truncate_inode_pages_final(NODE_MAPPING(sbi
));
2225 mutex_lock(&sbi
->umount_mutex
);
2226 release_ino_entry(sbi
, true);
2227 f2fs_leave_shrinker(sbi
);
2229 * Some dirty meta pages can be produced by recover_orphan_inodes()
2230 * failed by EIO. Then, iput(node_inode) can trigger balance_fs_bg()
2231 * followed by write_checkpoint() through f2fs_write_node_pages(), which
2232 * falls into an infinite loop in sync_meta_pages().
2234 truncate_inode_pages_final(META_MAPPING(sbi
));
2235 iput(sbi
->node_inode
);
2236 mutex_unlock(&sbi
->umount_mutex
);
2237 f2fs_destroy_stats(sbi
);
2239 destroy_node_manager(sbi
);
2241 destroy_segment_manager(sbi
);
2243 destroy_device_list(sbi
);
2246 make_bad_inode(sbi
->meta_inode
);
2247 iput(sbi
->meta_inode
);
2249 mempool_destroy(sbi
->write_io_dummy
);
2251 for (i
= 0; i
< NR_PAGE_TYPE
; i
++)
2252 kfree(sbi
->write_io
[i
]);
2253 destroy_percpu_info(sbi
);
2258 if (sbi
->s_chksum_driver
)
2259 crypto_free_shash(sbi
->s_chksum_driver
);
2262 /* give only one another chance */
2265 shrink_dcache_sb(sb
);
2271 static struct dentry
*f2fs_mount(struct file_system_type
*fs_type
, int flags
,
2272 const char *dev_name
, void *data
)
2274 return mount_bdev(fs_type
, flags
, dev_name
, data
, f2fs_fill_super
);
2277 static void kill_f2fs_super(struct super_block
*sb
)
2280 set_sbi_flag(F2FS_SB(sb
), SBI_IS_CLOSE
);
2281 stop_gc_thread(F2FS_SB(sb
));
2282 stop_discard_thread(F2FS_SB(sb
));
2284 kill_block_super(sb
);
2287 static struct file_system_type f2fs_fs_type
= {
2288 .owner
= THIS_MODULE
,
2290 .mount
= f2fs_mount
,
2291 .kill_sb
= kill_f2fs_super
,
2292 .fs_flags
= FS_REQUIRES_DEV
,
2294 MODULE_ALIAS_FS("f2fs");
2296 static int __init
init_inodecache(void)
2298 f2fs_inode_cachep
= kmem_cache_create("f2fs_inode_cache",
2299 sizeof(struct f2fs_inode_info
), 0,
2300 SLAB_RECLAIM_ACCOUNT
|SLAB_ACCOUNT
, NULL
);
2301 if (!f2fs_inode_cachep
)
2306 static void destroy_inodecache(void)
2309 * Make sure all delayed rcu free inodes are flushed before we
2313 kmem_cache_destroy(f2fs_inode_cachep
);
2316 static int __init
init_f2fs_fs(void)
2320 f2fs_build_trace_ios();
2322 err
= init_inodecache();
2325 err
= create_node_manager_caches();
2327 goto free_inodecache
;
2328 err
= create_segment_manager_caches();
2330 goto free_node_manager_caches
;
2331 err
= create_checkpoint_caches();
2333 goto free_segment_manager_caches
;
2334 err
= create_extent_cache();
2336 goto free_checkpoint_caches
;
2337 err
= f2fs_register_sysfs();
2339 goto free_extent_cache
;
2340 err
= register_shrinker(&f2fs_shrinker_info
);
2343 err
= register_filesystem(&f2fs_fs_type
);
2346 err
= f2fs_create_root_stats();
2348 goto free_filesystem
;
2352 unregister_filesystem(&f2fs_fs_type
);
2354 unregister_shrinker(&f2fs_shrinker_info
);
2356 f2fs_unregister_sysfs();
2358 destroy_extent_cache();
2359 free_checkpoint_caches
:
2360 destroy_checkpoint_caches();
2361 free_segment_manager_caches
:
2362 destroy_segment_manager_caches();
2363 free_node_manager_caches
:
2364 destroy_node_manager_caches();
2366 destroy_inodecache();
2371 static void __exit
exit_f2fs_fs(void)
2373 f2fs_destroy_root_stats();
2374 unregister_filesystem(&f2fs_fs_type
);
2375 unregister_shrinker(&f2fs_shrinker_info
);
2376 f2fs_unregister_sysfs();
2377 destroy_extent_cache();
2378 destroy_checkpoint_caches();
2379 destroy_segment_manager_caches();
2380 destroy_node_manager_caches();
2381 destroy_inodecache();
2382 f2fs_destroy_trace_ios();
2385 module_init(init_f2fs_fs
)
2386 module_exit(exit_f2fs_fs
)
2388 MODULE_AUTHOR("Samsung Electronics's Praesto Team");
2389 MODULE_DESCRIPTION("Flash Friendly File System");
2390 MODULE_LICENSE("GPL");