1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
8 #include <asm/unaligned.h>
10 #include <linux/f2fs_fs.h>
16 * Roll forward recovery scenarios.
18 * [Term] F: fsync_mark, D: dentry_mark
20 * 1. inode(x) | CP | inode(x) | dnode(F)
21 * -> Update the latest inode(x).
23 * 2. inode(x) | CP | inode(F) | dnode(F)
26 * 3. inode(x) | CP | dnode(F) | inode(x)
27 * -> Recover to the latest dnode(F), and drop the last inode(x)
29 * 4. inode(x) | CP | dnode(F) | inode(F)
32 * 5. CP | inode(x) | dnode(F)
33 * -> The inode(DF) was missing. Should drop this dnode(F).
35 * 6. CP | inode(DF) | dnode(F)
38 * 7. CP | dnode(F) | inode(DF)
39 * -> If f2fs_iget fails, then goto next to find inode(DF).
41 * 8. CP | dnode(F) | inode(x)
42 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * But it will fail due to no inode(DF).
46 static struct kmem_cache
*fsync_entry_slab
;
48 bool f2fs_space_for_roll_forward(struct f2fs_sb_info
*sbi
)
50 s64 nalloc
= percpu_counter_sum_positive(&sbi
->alloc_valid_block_count
);
52 if (sbi
->last_valid_block_count
+ nalloc
> sbi
->user_block_count
)
57 static struct fsync_inode_entry
*get_fsync_inode(struct list_head
*head
,
60 struct fsync_inode_entry
*entry
;
62 list_for_each_entry(entry
, head
, list
)
63 if (entry
->inode
->i_ino
== ino
)
69 static struct fsync_inode_entry
*add_fsync_inode(struct f2fs_sb_info
*sbi
,
70 struct list_head
*head
, nid_t ino
, bool quota_inode
)
73 struct fsync_inode_entry
*entry
;
76 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
78 return ERR_CAST(inode
);
80 err
= dquot_initialize(inode
);
85 err
= dquot_alloc_inode(inode
);
90 entry
= f2fs_kmem_cache_alloc(fsync_entry_slab
, GFP_F2FS_ZERO
);
92 list_add_tail(&entry
->list
, head
);
100 static void del_fsync_inode(struct fsync_inode_entry
*entry
, int drop
)
103 /* inode should not be recovered, drop it */
104 f2fs_inode_synced(entry
->inode
);
107 list_del(&entry
->list
);
108 kmem_cache_free(fsync_entry_slab
, entry
);
111 static int init_recovered_filename(const struct inode
*dir
,
112 struct f2fs_inode
*raw_inode
,
113 struct f2fs_filename
*fname
,
114 struct qstr
*usr_fname
)
118 memset(fname
, 0, sizeof(*fname
));
119 fname
->disk_name
.len
= le32_to_cpu(raw_inode
->i_namelen
);
120 fname
->disk_name
.name
= raw_inode
->i_name
;
122 if (WARN_ON(fname
->disk_name
.len
> F2FS_NAME_LEN
))
123 return -ENAMETOOLONG
;
125 if (!IS_ENCRYPTED(dir
)) {
126 usr_fname
->name
= fname
->disk_name
.name
;
127 usr_fname
->len
= fname
->disk_name
.len
;
128 fname
->usr_fname
= usr_fname
;
131 /* Compute the hash of the filename */
132 if (IS_ENCRYPTED(dir
) && IS_CASEFOLDED(dir
)) {
134 * In this case the hash isn't computable without the key, so it
137 if (fname
->disk_name
.len
+ sizeof(f2fs_hash_t
) > F2FS_NAME_LEN
)
139 fname
->hash
= get_unaligned((f2fs_hash_t
*)
140 &raw_inode
->i_name
[fname
->disk_name
.len
]);
141 } else if (IS_CASEFOLDED(dir
)) {
142 err
= f2fs_init_casefolded_name(dir
, fname
);
145 f2fs_hash_filename(dir
, fname
);
146 #ifdef CONFIG_UNICODE
147 /* Case-sensitive match is fine for recovery */
148 kfree(fname
->cf_name
.name
);
149 fname
->cf_name
.name
= NULL
;
152 f2fs_hash_filename(dir
, fname
);
157 static int recover_dentry(struct inode
*inode
, struct page
*ipage
,
158 struct list_head
*dir_list
)
160 struct f2fs_inode
*raw_inode
= F2FS_INODE(ipage
);
161 nid_t pino
= le32_to_cpu(raw_inode
->i_pino
);
162 struct f2fs_dir_entry
*de
;
163 struct f2fs_filename fname
;
164 struct qstr usr_fname
;
166 struct inode
*dir
, *einode
;
167 struct fsync_inode_entry
*entry
;
171 entry
= get_fsync_inode(dir_list
, pino
);
173 entry
= add_fsync_inode(F2FS_I_SB(inode
), dir_list
,
176 dir
= ERR_CAST(entry
);
177 err
= PTR_ERR(entry
);
183 err
= init_recovered_filename(dir
, raw_inode
, &fname
, &usr_fname
);
187 de
= __f2fs_find_entry(dir
, &fname
, &page
);
188 if (de
&& inode
->i_ino
== le32_to_cpu(de
->ino
))
192 einode
= f2fs_iget_retry(inode
->i_sb
, le32_to_cpu(de
->ino
));
193 if (IS_ERR(einode
)) {
195 err
= PTR_ERR(einode
);
201 err
= dquot_initialize(einode
);
207 err
= f2fs_acquire_orphan_inode(F2FS_I_SB(inode
));
212 f2fs_delete_entry(de
, page
, dir
, einode
);
215 } else if (IS_ERR(page
)) {
218 err
= f2fs_add_dentry(dir
, &fname
, inode
,
219 inode
->i_ino
, inode
->i_mode
);
226 f2fs_put_page(page
, 0);
228 if (file_enc_name(inode
))
229 name
= "<encrypted>";
231 name
= raw_inode
->i_name
;
232 f2fs_notice(F2FS_I_SB(inode
), "%s: ino = %x, name = %s, dir = %lx, err = %d",
233 __func__
, ino_of_node(ipage
), name
,
234 IS_ERR(dir
) ? 0 : dir
->i_ino
, err
);
238 static int recover_quota_data(struct inode
*inode
, struct page
*page
)
240 struct f2fs_inode
*raw
= F2FS_INODE(page
);
242 uid_t i_uid
= le32_to_cpu(raw
->i_uid
);
243 gid_t i_gid
= le32_to_cpu(raw
->i_gid
);
246 memset(&attr
, 0, sizeof(attr
));
248 attr
.ia_uid
= make_kuid(inode
->i_sb
->s_user_ns
, i_uid
);
249 attr
.ia_gid
= make_kgid(inode
->i_sb
->s_user_ns
, i_gid
);
251 if (!uid_eq(attr
.ia_uid
, inode
->i_uid
))
252 attr
.ia_valid
|= ATTR_UID
;
253 if (!gid_eq(attr
.ia_gid
, inode
->i_gid
))
254 attr
.ia_valid
|= ATTR_GID
;
259 err
= dquot_transfer(inode
, &attr
);
261 set_sbi_flag(F2FS_I_SB(inode
), SBI_QUOTA_NEED_REPAIR
);
265 static void recover_inline_flags(struct inode
*inode
, struct f2fs_inode
*ri
)
267 if (ri
->i_inline
& F2FS_PIN_FILE
)
268 set_inode_flag(inode
, FI_PIN_FILE
);
270 clear_inode_flag(inode
, FI_PIN_FILE
);
271 if (ri
->i_inline
& F2FS_DATA_EXIST
)
272 set_inode_flag(inode
, FI_DATA_EXIST
);
274 clear_inode_flag(inode
, FI_DATA_EXIST
);
277 static int recover_inode(struct inode
*inode
, struct page
*page
)
279 struct f2fs_inode
*raw
= F2FS_INODE(page
);
283 inode
->i_mode
= le16_to_cpu(raw
->i_mode
);
285 err
= recover_quota_data(inode
, page
);
289 i_uid_write(inode
, le32_to_cpu(raw
->i_uid
));
290 i_gid_write(inode
, le32_to_cpu(raw
->i_gid
));
292 if (raw
->i_inline
& F2FS_EXTRA_ATTR
) {
293 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode
)) &&
294 F2FS_FITS_IN_INODE(raw
, le16_to_cpu(raw
->i_extra_isize
),
299 i_projid
= (projid_t
)le32_to_cpu(raw
->i_projid
);
300 kprojid
= make_kprojid(&init_user_ns
, i_projid
);
302 if (!projid_eq(kprojid
, F2FS_I(inode
)->i_projid
)) {
303 err
= f2fs_transfer_project_quota(inode
,
307 F2FS_I(inode
)->i_projid
= kprojid
;
312 f2fs_i_size_write(inode
, le64_to_cpu(raw
->i_size
));
313 inode
->i_atime
.tv_sec
= le64_to_cpu(raw
->i_atime
);
314 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw
->i_ctime
);
315 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw
->i_mtime
);
316 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw
->i_atime_nsec
);
317 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw
->i_ctime_nsec
);
318 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw
->i_mtime_nsec
);
320 F2FS_I(inode
)->i_advise
= raw
->i_advise
;
321 F2FS_I(inode
)->i_flags
= le32_to_cpu(raw
->i_flags
);
322 f2fs_set_inode_flags(inode
);
323 F2FS_I(inode
)->i_gc_failures
[GC_FAILURE_PIN
] =
324 le16_to_cpu(raw
->i_gc_failures
);
326 recover_inline_flags(inode
, raw
);
328 f2fs_mark_inode_dirty_sync(inode
, true);
330 if (file_enc_name(inode
))
331 name
= "<encrypted>";
333 name
= F2FS_INODE(page
)->i_name
;
335 f2fs_notice(F2FS_I_SB(inode
), "recover_inode: ino = %x, name = %s, inline = %x",
336 ino_of_node(page
), name
, raw
->i_inline
);
340 static int find_fsync_dnodes(struct f2fs_sb_info
*sbi
, struct list_head
*head
,
343 struct curseg_info
*curseg
;
344 struct page
*page
= NULL
;
346 unsigned int loop_cnt
= 0;
347 unsigned int free_blocks
= MAIN_SEGS(sbi
) * sbi
->blocks_per_seg
-
348 valid_user_blocks(sbi
);
351 /* get node pages in the current segment */
352 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
353 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
356 struct fsync_inode_entry
*entry
;
358 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
, META_POR
))
361 page
= f2fs_get_tmp_page(sbi
, blkaddr
);
367 if (!is_recoverable_dnode(page
)) {
368 f2fs_put_page(page
, 1);
372 if (!is_fsync_dnode(page
))
375 entry
= get_fsync_inode(head
, ino_of_node(page
));
377 bool quota_inode
= false;
380 IS_INODE(page
) && is_dent_dnode(page
)) {
381 err
= f2fs_recover_inode_page(sbi
, page
);
383 f2fs_put_page(page
, 1);
390 * CP | dnode(F) | inode(DF)
391 * For this case, we should not give up now.
393 entry
= add_fsync_inode(sbi
, head
, ino_of_node(page
),
396 err
= PTR_ERR(entry
);
397 if (err
== -ENOENT
) {
401 f2fs_put_page(page
, 1);
405 entry
->blkaddr
= blkaddr
;
407 if (IS_INODE(page
) && is_dent_dnode(page
))
408 entry
->last_dentry
= blkaddr
;
410 /* sanity check in order to detect looped node chain */
411 if (++loop_cnt
>= free_blocks
||
412 blkaddr
== next_blkaddr_of_node(page
)) {
413 f2fs_notice(sbi
, "%s: detect looped node chain, blkaddr:%u, next:%u",
415 next_blkaddr_of_node(page
));
416 f2fs_put_page(page
, 1);
421 /* check next segment */
422 blkaddr
= next_blkaddr_of_node(page
);
423 f2fs_put_page(page
, 1);
425 f2fs_ra_meta_pages_cond(sbi
, blkaddr
);
430 static void destroy_fsync_dnodes(struct list_head
*head
, int drop
)
432 struct fsync_inode_entry
*entry
, *tmp
;
434 list_for_each_entry_safe(entry
, tmp
, head
, list
)
435 del_fsync_inode(entry
, drop
);
438 static int check_index_in_prev_nodes(struct f2fs_sb_info
*sbi
,
439 block_t blkaddr
, struct dnode_of_data
*dn
)
441 struct seg_entry
*sentry
;
442 unsigned int segno
= GET_SEGNO(sbi
, blkaddr
);
443 unsigned short blkoff
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
444 struct f2fs_summary_block
*sum_node
;
445 struct f2fs_summary sum
;
446 struct page
*sum_page
, *node_page
;
447 struct dnode_of_data tdn
= *dn
;
454 sentry
= get_seg_entry(sbi
, segno
);
455 if (!f2fs_test_bit(blkoff
, sentry
->cur_valid_map
))
458 /* Get the previous summary */
459 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
460 struct curseg_info
*curseg
= CURSEG_I(sbi
, i
);
461 if (curseg
->segno
== segno
) {
462 sum
= curseg
->sum_blk
->entries
[blkoff
];
467 sum_page
= f2fs_get_sum_page(sbi
, segno
);
468 if (IS_ERR(sum_page
))
469 return PTR_ERR(sum_page
);
470 sum_node
= (struct f2fs_summary_block
*)page_address(sum_page
);
471 sum
= sum_node
->entries
[blkoff
];
472 f2fs_put_page(sum_page
, 1);
474 /* Use the locked dnode page and inode */
475 nid
= le32_to_cpu(sum
.nid
);
476 if (dn
->inode
->i_ino
== nid
) {
478 if (!dn
->inode_page_locked
)
479 lock_page(dn
->inode_page
);
480 tdn
.node_page
= dn
->inode_page
;
481 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
483 } else if (dn
->nid
== nid
) {
484 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
488 /* Get the node page */
489 node_page
= f2fs_get_node_page(sbi
, nid
);
490 if (IS_ERR(node_page
))
491 return PTR_ERR(node_page
);
493 offset
= ofs_of_node(node_page
);
494 ino
= ino_of_node(node_page
);
495 f2fs_put_page(node_page
, 1);
497 if (ino
!= dn
->inode
->i_ino
) {
500 /* Deallocate previous index in the node page */
501 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
503 return PTR_ERR(inode
);
505 ret
= dquot_initialize(inode
);
514 bidx
= f2fs_start_bidx_of_node(offset
, inode
) +
515 le16_to_cpu(sum
.ofs_in_node
);
518 * if inode page is locked, unlock temporarily, but its reference
521 if (ino
== dn
->inode
->i_ino
&& dn
->inode_page_locked
)
522 unlock_page(dn
->inode_page
);
524 set_new_dnode(&tdn
, inode
, NULL
, NULL
, 0);
525 if (f2fs_get_dnode_of_data(&tdn
, bidx
, LOOKUP_NODE
))
528 if (tdn
.data_blkaddr
== blkaddr
)
529 f2fs_truncate_data_blocks_range(&tdn
, 1);
531 f2fs_put_dnode(&tdn
);
533 if (ino
!= dn
->inode
->i_ino
)
535 else if (dn
->inode_page_locked
)
536 lock_page(dn
->inode_page
);
540 if (f2fs_data_blkaddr(&tdn
) == blkaddr
)
541 f2fs_truncate_data_blocks_range(&tdn
, 1);
542 if (dn
->inode
->i_ino
== nid
&& !dn
->inode_page_locked
)
543 unlock_page(dn
->inode_page
);
547 static int do_recover_data(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
550 struct dnode_of_data dn
;
552 unsigned int start
, end
;
553 int err
= 0, recovered
= 0;
555 /* step 1: recover xattr */
556 if (IS_INODE(page
)) {
557 err
= f2fs_recover_inline_xattr(inode
, page
);
560 } else if (f2fs_has_xattr_block(ofs_of_node(page
))) {
561 err
= f2fs_recover_xattr_data(inode
, page
);
567 /* step 2: recover inline data */
568 err
= f2fs_recover_inline_data(inode
, page
);
575 /* step 3: recover data indices */
576 start
= f2fs_start_bidx_of_node(ofs_of_node(page
), inode
);
577 end
= start
+ ADDRS_PER_PAGE(page
, inode
);
579 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
581 err
= f2fs_get_dnode_of_data(&dn
, start
, ALLOC_NODE
);
583 if (err
== -ENOMEM
) {
584 congestion_wait(BLK_RW_ASYNC
, DEFAULT_IO_TIMEOUT
);
590 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
, true, true);
592 err
= f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
596 f2fs_bug_on(sbi
, ni
.ino
!= ino_of_node(page
));
598 if (ofs_of_node(dn
.node_page
) != ofs_of_node(page
)) {
599 f2fs_warn(sbi
, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
600 inode
->i_ino
, ofs_of_node(dn
.node_page
),
606 for (; start
< end
; start
++, dn
.ofs_in_node
++) {
609 src
= f2fs_data_blkaddr(&dn
);
610 dest
= data_blkaddr(dn
.inode
, page
, dn
.ofs_in_node
);
612 if (__is_valid_data_blkaddr(src
) &&
613 !f2fs_is_valid_blkaddr(sbi
, src
, META_POR
)) {
618 if (__is_valid_data_blkaddr(dest
) &&
619 !f2fs_is_valid_blkaddr(sbi
, dest
, META_POR
)) {
624 /* skip recovering if dest is the same as src */
628 /* dest is invalid, just invalidate src block */
629 if (dest
== NULL_ADDR
) {
630 f2fs_truncate_data_blocks_range(&dn
, 1);
634 if (!file_keep_isize(inode
) &&
635 (i_size_read(inode
) <= ((loff_t
)start
<< PAGE_SHIFT
)))
636 f2fs_i_size_write(inode
,
637 (loff_t
)(start
+ 1) << PAGE_SHIFT
);
640 * dest is reserved block, invalidate src block
641 * and then reserve one new block in dnode page.
643 if (dest
== NEW_ADDR
) {
644 f2fs_truncate_data_blocks_range(&dn
, 1);
645 f2fs_reserve_new_block(&dn
);
649 /* dest is valid block, try to recover from src to dest */
650 if (f2fs_is_valid_blkaddr(sbi
, dest
, META_POR
)) {
652 if (src
== NULL_ADDR
) {
653 err
= f2fs_reserve_new_block(&dn
);
655 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION
))
656 err
= f2fs_reserve_new_block(&dn
);
657 /* We should not get -ENOSPC */
658 f2fs_bug_on(sbi
, err
);
663 /* Check the previous node page having this index */
664 err
= check_index_in_prev_nodes(sbi
, dest
, &dn
);
666 if (err
== -ENOMEM
) {
667 congestion_wait(BLK_RW_ASYNC
,
674 /* write dummy data page */
675 f2fs_replace_block(sbi
, &dn
, src
, dest
,
676 ni
.version
, false, false);
681 copy_node_footer(dn
.node_page
, page
);
682 fill_node_footer(dn
.node_page
, dn
.nid
, ni
.ino
,
683 ofs_of_node(page
), false);
684 set_page_dirty(dn
.node_page
);
688 f2fs_notice(sbi
, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
689 inode
->i_ino
, file_keep_isize(inode
) ? "keep" : "recover",
694 static int recover_data(struct f2fs_sb_info
*sbi
, struct list_head
*inode_list
,
695 struct list_head
*tmp_inode_list
, struct list_head
*dir_list
)
697 struct curseg_info
*curseg
;
698 struct page
*page
= NULL
;
702 /* get node pages in the current segment */
703 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
704 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
707 struct fsync_inode_entry
*entry
;
709 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
, META_POR
))
712 f2fs_ra_meta_pages_cond(sbi
, blkaddr
);
714 page
= f2fs_get_tmp_page(sbi
, blkaddr
);
720 if (!is_recoverable_dnode(page
)) {
721 f2fs_put_page(page
, 1);
725 entry
= get_fsync_inode(inode_list
, ino_of_node(page
));
729 * inode(x) | CP | inode(x) | dnode(F)
730 * In this case, we can lose the latest inode(x).
731 * So, call recover_inode for the inode update.
733 if (IS_INODE(page
)) {
734 err
= recover_inode(entry
->inode
, page
);
736 f2fs_put_page(page
, 1);
740 if (entry
->last_dentry
== blkaddr
) {
741 err
= recover_dentry(entry
->inode
, page
, dir_list
);
743 f2fs_put_page(page
, 1);
747 err
= do_recover_data(sbi
, entry
->inode
, page
);
749 f2fs_put_page(page
, 1);
753 if (entry
->blkaddr
== blkaddr
)
754 list_move_tail(&entry
->list
, tmp_inode_list
);
756 /* check next segment */
757 blkaddr
= next_blkaddr_of_node(page
);
758 f2fs_put_page(page
, 1);
761 f2fs_allocate_new_segments(sbi
);
765 int f2fs_recover_fsync_data(struct f2fs_sb_info
*sbi
, bool check_only
)
767 struct list_head inode_list
, tmp_inode_list
;
768 struct list_head dir_list
;
771 unsigned long s_flags
= sbi
->sb
->s_flags
;
772 bool need_writecp
= false;
773 bool fix_curseg_write_pointer
= false;
778 if (s_flags
& SB_RDONLY
) {
779 f2fs_info(sbi
, "recover fsync data on readonly fs");
780 sbi
->sb
->s_flags
&= ~SB_RDONLY
;
784 /* Needed for iput() to work correctly and not trash data */
785 sbi
->sb
->s_flags
|= SB_ACTIVE
;
786 /* Turn on quotas so that they are updated correctly */
787 quota_enabled
= f2fs_enable_quota_files(sbi
, s_flags
& SB_RDONLY
);
790 fsync_entry_slab
= f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
791 sizeof(struct fsync_inode_entry
));
792 if (!fsync_entry_slab
) {
797 INIT_LIST_HEAD(&inode_list
);
798 INIT_LIST_HEAD(&tmp_inode_list
);
799 INIT_LIST_HEAD(&dir_list
);
801 /* prevent checkpoint */
802 down_write(&sbi
->cp_global_sem
);
804 /* step #1: find fsynced inode numbers */
805 err
= find_fsync_dnodes(sbi
, &inode_list
, check_only
);
806 if (err
|| list_empty(&inode_list
))
816 /* step #2: recover data */
817 err
= recover_data(sbi
, &inode_list
, &tmp_inode_list
, &dir_list
);
819 f2fs_bug_on(sbi
, !list_empty(&inode_list
));
821 /* restore s_flags to let iput() trash data */
822 sbi
->sb
->s_flags
= s_flags
;
825 fix_curseg_write_pointer
= !check_only
|| list_empty(&inode_list
);
827 destroy_fsync_dnodes(&inode_list
, err
);
828 destroy_fsync_dnodes(&tmp_inode_list
, err
);
830 /* truncate meta pages to be used by the recovery */
831 truncate_inode_pages_range(META_MAPPING(sbi
),
832 (loff_t
)MAIN_BLKADDR(sbi
) << PAGE_SHIFT
, -1);
835 truncate_inode_pages_final(NODE_MAPPING(sbi
));
836 truncate_inode_pages_final(META_MAPPING(sbi
));
840 * If fsync data succeeds or there is no fsync data to recover,
841 * and the f2fs is not read only, check and fix zoned block devices'
842 * write pointer consistency.
844 if (!err
&& fix_curseg_write_pointer
&& !f2fs_readonly(sbi
->sb
) &&
845 f2fs_sb_has_blkzoned(sbi
)) {
846 err
= f2fs_fix_curseg_write_pointer(sbi
);
851 clear_sbi_flag(sbi
, SBI_POR_DOING
);
853 up_write(&sbi
->cp_global_sem
);
855 /* let's drop all the directory inodes for clean checkpoint */
856 destroy_fsync_dnodes(&dir_list
, err
);
859 set_sbi_flag(sbi
, SBI_IS_RECOVERED
);
862 struct cp_control cpc
= {
863 .reason
= CP_RECOVERY
,
865 err
= f2fs_write_checkpoint(sbi
, &cpc
);
869 kmem_cache_destroy(fsync_entry_slab
);
872 /* Turn quotas off */
874 f2fs_quota_off_umount(sbi
->sb
);
876 sbi
->sb
->s_flags
= s_flags
; /* Restore SB_RDONLY status */
878 return ret
? ret
: err
;