4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
18 * Roll forward recovery scenarios.
20 * [Term] F: fsync_mark, D: dentry_mark
22 * 1. inode(x) | CP | inode(x) | dnode(F)
23 * -> Update the latest inode(x).
25 * 2. inode(x) | CP | inode(F) | dnode(F)
28 * 3. inode(x) | CP | dnode(F) | inode(x)
29 * -> Recover to the latest dnode(F), and drop the last inode(x)
31 * 4. inode(x) | CP | dnode(F) | inode(F)
34 * 5. CP | inode(x) | dnode(F)
35 * -> The inode(DF) was missing. Should drop this dnode(F).
37 * 6. CP | inode(DF) | dnode(F)
40 * 7. CP | dnode(F) | inode(DF)
41 * -> If f2fs_iget fails, then goto next to find inode(DF).
43 * 8. CP | dnode(F) | inode(x)
44 * -> If f2fs_iget fails, then goto next to find inode(DF).
45 * But it will fail due to no inode(DF).
48 static struct kmem_cache
*fsync_entry_slab
;
50 bool f2fs_space_for_roll_forward(struct f2fs_sb_info
*sbi
)
52 s64 nalloc
= percpu_counter_sum_positive(&sbi
->alloc_valid_block_count
);
54 if (sbi
->last_valid_block_count
+ nalloc
> sbi
->user_block_count
)
59 static struct fsync_inode_entry
*get_fsync_inode(struct list_head
*head
,
62 struct fsync_inode_entry
*entry
;
64 list_for_each_entry(entry
, head
, list
)
65 if (entry
->inode
->i_ino
== ino
)
71 static struct fsync_inode_entry
*add_fsync_inode(struct f2fs_sb_info
*sbi
,
72 struct list_head
*head
, nid_t ino
, bool quota_inode
)
75 struct fsync_inode_entry
*entry
;
78 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
80 return ERR_CAST(inode
);
82 err
= dquot_initialize(inode
);
87 err
= dquot_alloc_inode(inode
);
92 entry
= f2fs_kmem_cache_alloc(fsync_entry_slab
, GFP_F2FS_ZERO
);
94 list_add_tail(&entry
->list
, head
);
102 static void del_fsync_inode(struct fsync_inode_entry
*entry
)
105 list_del(&entry
->list
);
106 kmem_cache_free(fsync_entry_slab
, entry
);
109 static int recover_dentry(struct inode
*inode
, struct page
*ipage
,
110 struct list_head
*dir_list
)
112 struct f2fs_inode
*raw_inode
= F2FS_INODE(ipage
);
113 nid_t pino
= le32_to_cpu(raw_inode
->i_pino
);
114 struct f2fs_dir_entry
*de
;
115 struct fscrypt_name fname
;
117 struct inode
*dir
, *einode
;
118 struct fsync_inode_entry
*entry
;
122 entry
= get_fsync_inode(dir_list
, pino
);
124 entry
= add_fsync_inode(F2FS_I_SB(inode
), dir_list
,
127 dir
= ERR_CAST(entry
);
128 err
= PTR_ERR(entry
);
135 memset(&fname
, 0, sizeof(struct fscrypt_name
));
136 fname
.disk_name
.len
= le32_to_cpu(raw_inode
->i_namelen
);
137 fname
.disk_name
.name
= raw_inode
->i_name
;
139 if (unlikely(fname
.disk_name
.len
> F2FS_NAME_LEN
)) {
145 de
= __f2fs_find_entry(dir
, &fname
, &page
);
146 if (de
&& inode
->i_ino
== le32_to_cpu(de
->ino
))
150 einode
= f2fs_iget_retry(inode
->i_sb
, le32_to_cpu(de
->ino
));
151 if (IS_ERR(einode
)) {
153 err
= PTR_ERR(einode
);
159 err
= dquot_initialize(einode
);
165 err
= f2fs_acquire_orphan_inode(F2FS_I_SB(inode
));
170 f2fs_delete_entry(de
, page
, dir
, einode
);
173 } else if (IS_ERR(page
)) {
176 err
= f2fs_add_dentry(dir
, &fname
, inode
,
177 inode
->i_ino
, inode
->i_mode
);
184 f2fs_put_page(page
, 0);
186 if (file_enc_name(inode
))
187 name
= "<encrypted>";
189 name
= raw_inode
->i_name
;
190 f2fs_msg(inode
->i_sb
, KERN_NOTICE
,
191 "%s: ino = %x, name = %s, dir = %lx, err = %d",
192 __func__
, ino_of_node(ipage
), name
,
193 IS_ERR(dir
) ? 0 : dir
->i_ino
, err
);
197 static void recover_inline_flags(struct inode
*inode
, struct f2fs_inode
*ri
)
199 if (ri
->i_inline
& F2FS_PIN_FILE
)
200 set_inode_flag(inode
, FI_PIN_FILE
);
202 clear_inode_flag(inode
, FI_PIN_FILE
);
203 if (ri
->i_inline
& F2FS_DATA_EXIST
)
204 set_inode_flag(inode
, FI_DATA_EXIST
);
206 clear_inode_flag(inode
, FI_DATA_EXIST
);
209 static void recover_inode(struct inode
*inode
, struct page
*page
)
211 struct f2fs_inode
*raw
= F2FS_INODE(page
);
214 inode
->i_mode
= le16_to_cpu(raw
->i_mode
);
215 f2fs_i_size_write(inode
, le64_to_cpu(raw
->i_size
));
216 inode
->i_atime
.tv_sec
= le64_to_cpu(raw
->i_atime
);
217 inode
->i_ctime
.tv_sec
= le64_to_cpu(raw
->i_ctime
);
218 inode
->i_mtime
.tv_sec
= le64_to_cpu(raw
->i_mtime
);
219 inode
->i_atime
.tv_nsec
= le32_to_cpu(raw
->i_atime_nsec
);
220 inode
->i_ctime
.tv_nsec
= le32_to_cpu(raw
->i_ctime_nsec
);
221 inode
->i_mtime
.tv_nsec
= le32_to_cpu(raw
->i_mtime_nsec
);
223 F2FS_I(inode
)->i_advise
= raw
->i_advise
;
225 recover_inline_flags(inode
, raw
);
227 if (file_enc_name(inode
))
228 name
= "<encrypted>";
230 name
= F2FS_INODE(page
)->i_name
;
232 f2fs_msg(inode
->i_sb
, KERN_NOTICE
,
233 "recover_inode: ino = %x, name = %s, inline = %x",
234 ino_of_node(page
), name
, raw
->i_inline
);
237 static int find_fsync_dnodes(struct f2fs_sb_info
*sbi
, struct list_head
*head
,
240 struct curseg_info
*curseg
;
241 struct page
*page
= NULL
;
243 unsigned int loop_cnt
= 0;
244 unsigned int free_blocks
= MAIN_SEGS(sbi
) * sbi
->blocks_per_seg
-
245 valid_user_blocks(sbi
);
248 /* get node pages in the current segment */
249 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
250 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
253 struct fsync_inode_entry
*entry
;
255 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
, META_POR
))
258 page
= f2fs_get_tmp_page(sbi
, blkaddr
);
264 if (!is_recoverable_dnode(page
))
267 if (!is_fsync_dnode(page
))
270 entry
= get_fsync_inode(head
, ino_of_node(page
));
272 bool quota_inode
= false;
275 IS_INODE(page
) && is_dent_dnode(page
)) {
276 err
= f2fs_recover_inode_page(sbi
, page
);
283 * CP | dnode(F) | inode(DF)
284 * For this case, we should not give up now.
286 entry
= add_fsync_inode(sbi
, head
, ino_of_node(page
),
289 err
= PTR_ERR(entry
);
290 if (err
== -ENOENT
) {
297 entry
->blkaddr
= blkaddr
;
299 if (IS_INODE(page
) && is_dent_dnode(page
))
300 entry
->last_dentry
= blkaddr
;
302 /* sanity check in order to detect looped node chain */
303 if (++loop_cnt
>= free_blocks
||
304 blkaddr
== next_blkaddr_of_node(page
)) {
305 f2fs_msg(sbi
->sb
, KERN_NOTICE
,
306 "%s: detect looped node chain, "
307 "blkaddr:%u, next:%u",
308 __func__
, blkaddr
, next_blkaddr_of_node(page
));
313 /* check next segment */
314 blkaddr
= next_blkaddr_of_node(page
);
315 f2fs_put_page(page
, 1);
317 f2fs_ra_meta_pages_cond(sbi
, blkaddr
);
319 f2fs_put_page(page
, 1);
323 static void destroy_fsync_dnodes(struct list_head
*head
)
325 struct fsync_inode_entry
*entry
, *tmp
;
327 list_for_each_entry_safe(entry
, tmp
, head
, list
)
328 del_fsync_inode(entry
);
331 static int check_index_in_prev_nodes(struct f2fs_sb_info
*sbi
,
332 block_t blkaddr
, struct dnode_of_data
*dn
)
334 struct seg_entry
*sentry
;
335 unsigned int segno
= GET_SEGNO(sbi
, blkaddr
);
336 unsigned short blkoff
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
337 struct f2fs_summary_block
*sum_node
;
338 struct f2fs_summary sum
;
339 struct page
*sum_page
, *node_page
;
340 struct dnode_of_data tdn
= *dn
;
347 sentry
= get_seg_entry(sbi
, segno
);
348 if (!f2fs_test_bit(blkoff
, sentry
->cur_valid_map
))
351 /* Get the previous summary */
352 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
353 struct curseg_info
*curseg
= CURSEG_I(sbi
, i
);
354 if (curseg
->segno
== segno
) {
355 sum
= curseg
->sum_blk
->entries
[blkoff
];
360 sum_page
= f2fs_get_sum_page(sbi
, segno
);
361 sum_node
= (struct f2fs_summary_block
*)page_address(sum_page
);
362 sum
= sum_node
->entries
[blkoff
];
363 f2fs_put_page(sum_page
, 1);
365 /* Use the locked dnode page and inode */
366 nid
= le32_to_cpu(sum
.nid
);
367 if (dn
->inode
->i_ino
== nid
) {
369 if (!dn
->inode_page_locked
)
370 lock_page(dn
->inode_page
);
371 tdn
.node_page
= dn
->inode_page
;
372 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
374 } else if (dn
->nid
== nid
) {
375 tdn
.ofs_in_node
= le16_to_cpu(sum
.ofs_in_node
);
379 /* Get the node page */
380 node_page
= f2fs_get_node_page(sbi
, nid
);
381 if (IS_ERR(node_page
))
382 return PTR_ERR(node_page
);
384 offset
= ofs_of_node(node_page
);
385 ino
= ino_of_node(node_page
);
386 f2fs_put_page(node_page
, 1);
388 if (ino
!= dn
->inode
->i_ino
) {
391 /* Deallocate previous index in the node page */
392 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
394 return PTR_ERR(inode
);
396 ret
= dquot_initialize(inode
);
405 bidx
= f2fs_start_bidx_of_node(offset
, inode
) +
406 le16_to_cpu(sum
.ofs_in_node
);
409 * if inode page is locked, unlock temporarily, but its reference
412 if (ino
== dn
->inode
->i_ino
&& dn
->inode_page_locked
)
413 unlock_page(dn
->inode_page
);
415 set_new_dnode(&tdn
, inode
, NULL
, NULL
, 0);
416 if (f2fs_get_dnode_of_data(&tdn
, bidx
, LOOKUP_NODE
))
419 if (tdn
.data_blkaddr
== blkaddr
)
420 f2fs_truncate_data_blocks_range(&tdn
, 1);
422 f2fs_put_dnode(&tdn
);
424 if (ino
!= dn
->inode
->i_ino
)
426 else if (dn
->inode_page_locked
)
427 lock_page(dn
->inode_page
);
431 if (datablock_addr(tdn
.inode
, tdn
.node_page
,
432 tdn
.ofs_in_node
) == blkaddr
)
433 f2fs_truncate_data_blocks_range(&tdn
, 1);
434 if (dn
->inode
->i_ino
== nid
&& !dn
->inode_page_locked
)
435 unlock_page(dn
->inode_page
);
439 static int do_recover_data(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
442 struct dnode_of_data dn
;
444 unsigned int start
, end
;
445 int err
= 0, recovered
= 0;
447 /* step 1: recover xattr */
448 if (IS_INODE(page
)) {
449 f2fs_recover_inline_xattr(inode
, page
);
450 } else if (f2fs_has_xattr_block(ofs_of_node(page
))) {
451 err
= f2fs_recover_xattr_data(inode
, page
);
457 /* step 2: recover inline data */
458 if (f2fs_recover_inline_data(inode
, page
))
461 /* step 3: recover data indices */
462 start
= f2fs_start_bidx_of_node(ofs_of_node(page
), inode
);
463 end
= start
+ ADDRS_PER_PAGE(page
, inode
);
465 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
467 err
= f2fs_get_dnode_of_data(&dn
, start
, ALLOC_NODE
);
469 if (err
== -ENOMEM
) {
470 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
476 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
, true);
478 err
= f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
482 f2fs_bug_on(sbi
, ni
.ino
!= ino_of_node(page
));
483 f2fs_bug_on(sbi
, ofs_of_node(dn
.node_page
) != ofs_of_node(page
));
485 for (; start
< end
; start
++, dn
.ofs_in_node
++) {
488 src
= datablock_addr(dn
.inode
, dn
.node_page
, dn
.ofs_in_node
);
489 dest
= datablock_addr(dn
.inode
, page
, dn
.ofs_in_node
);
491 /* skip recovering if dest is the same as src */
495 /* dest is invalid, just invalidate src block */
496 if (dest
== NULL_ADDR
) {
497 f2fs_truncate_data_blocks_range(&dn
, 1);
501 if (!file_keep_isize(inode
) &&
502 (i_size_read(inode
) <= ((loff_t
)start
<< PAGE_SHIFT
)))
503 f2fs_i_size_write(inode
,
504 (loff_t
)(start
+ 1) << PAGE_SHIFT
);
507 * dest is reserved block, invalidate src block
508 * and then reserve one new block in dnode page.
510 if (dest
== NEW_ADDR
) {
511 f2fs_truncate_data_blocks_range(&dn
, 1);
512 f2fs_reserve_new_block(&dn
);
516 /* dest is valid block, try to recover from src to dest */
517 if (f2fs_is_valid_blkaddr(sbi
, dest
, META_POR
)) {
519 if (src
== NULL_ADDR
) {
520 err
= f2fs_reserve_new_block(&dn
);
522 IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION
))
523 err
= f2fs_reserve_new_block(&dn
);
524 /* We should not get -ENOSPC */
525 f2fs_bug_on(sbi
, err
);
530 /* Check the previous node page having this index */
531 err
= check_index_in_prev_nodes(sbi
, dest
, &dn
);
533 if (err
== -ENOMEM
) {
534 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
540 /* write dummy data page */
541 f2fs_replace_block(sbi
, &dn
, src
, dest
,
542 ni
.version
, false, false);
547 copy_node_footer(dn
.node_page
, page
);
548 fill_node_footer(dn
.node_page
, dn
.nid
, ni
.ino
,
549 ofs_of_node(page
), false);
550 set_page_dirty(dn
.node_page
);
554 f2fs_msg(sbi
->sb
, KERN_NOTICE
,
555 "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
557 file_keep_isize(inode
) ? "keep" : "recover",
562 static int recover_data(struct f2fs_sb_info
*sbi
, struct list_head
*inode_list
,
563 struct list_head
*dir_list
)
565 struct curseg_info
*curseg
;
566 struct page
*page
= NULL
;
570 /* get node pages in the current segment */
571 curseg
= CURSEG_I(sbi
, CURSEG_WARM_NODE
);
572 blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
575 struct fsync_inode_entry
*entry
;
577 if (!f2fs_is_valid_blkaddr(sbi
, blkaddr
, META_POR
))
580 f2fs_ra_meta_pages_cond(sbi
, blkaddr
);
582 page
= f2fs_get_tmp_page(sbi
, blkaddr
);
588 if (!is_recoverable_dnode(page
)) {
589 f2fs_put_page(page
, 1);
593 entry
= get_fsync_inode(inode_list
, ino_of_node(page
));
597 * inode(x) | CP | inode(x) | dnode(F)
598 * In this case, we can lose the latest inode(x).
599 * So, call recover_inode for the inode update.
602 recover_inode(entry
->inode
, page
);
603 if (entry
->last_dentry
== blkaddr
) {
604 err
= recover_dentry(entry
->inode
, page
, dir_list
);
606 f2fs_put_page(page
, 1);
610 err
= do_recover_data(sbi
, entry
->inode
, page
);
612 f2fs_put_page(page
, 1);
616 if (entry
->blkaddr
== blkaddr
)
617 del_fsync_inode(entry
);
619 /* check next segment */
620 blkaddr
= next_blkaddr_of_node(page
);
621 f2fs_put_page(page
, 1);
624 f2fs_allocate_new_segments(sbi
);
628 int f2fs_recover_fsync_data(struct f2fs_sb_info
*sbi
, bool check_only
)
630 struct list_head inode_list
;
631 struct list_head dir_list
;
634 unsigned long s_flags
= sbi
->sb
->s_flags
;
635 bool need_writecp
= false;
640 if (s_flags
& SB_RDONLY
) {
641 f2fs_msg(sbi
->sb
, KERN_INFO
,
642 "recover fsync data on readonly fs");
643 sbi
->sb
->s_flags
&= ~SB_RDONLY
;
647 /* Needed for iput() to work correctly and not trash data */
648 sbi
->sb
->s_flags
|= SB_ACTIVE
;
649 /* Turn on quotas so that they are updated correctly */
650 quota_enabled
= f2fs_enable_quota_files(sbi
, s_flags
& SB_RDONLY
);
653 fsync_entry_slab
= f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
654 sizeof(struct fsync_inode_entry
));
655 if (!fsync_entry_slab
) {
660 INIT_LIST_HEAD(&inode_list
);
661 INIT_LIST_HEAD(&dir_list
);
663 /* prevent checkpoint */
664 mutex_lock(&sbi
->cp_mutex
);
666 /* step #1: find fsynced inode numbers */
667 err
= find_fsync_dnodes(sbi
, &inode_list
, check_only
);
668 if (err
|| list_empty(&inode_list
))
678 /* step #2: recover data */
679 err
= recover_data(sbi
, &inode_list
, &dir_list
);
681 f2fs_bug_on(sbi
, !list_empty(&inode_list
));
683 destroy_fsync_dnodes(&inode_list
);
685 /* truncate meta pages to be used by the recovery */
686 truncate_inode_pages_range(META_MAPPING(sbi
),
687 (loff_t
)MAIN_BLKADDR(sbi
) << PAGE_SHIFT
, -1);
690 truncate_inode_pages_final(NODE_MAPPING(sbi
));
691 truncate_inode_pages_final(META_MAPPING(sbi
));
694 clear_sbi_flag(sbi
, SBI_POR_DOING
);
695 mutex_unlock(&sbi
->cp_mutex
);
697 /* let's drop all the directory inodes for clean checkpoint */
698 destroy_fsync_dnodes(&dir_list
);
700 if (!err
&& need_writecp
) {
701 struct cp_control cpc
= {
702 .reason
= CP_RECOVERY
,
704 err
= f2fs_write_checkpoint(sbi
, &cpc
);
707 kmem_cache_destroy(fsync_entry_slab
);
710 /* Turn quotas off */
712 f2fs_quota_off_umount(sbi
->sb
);
714 sbi
->sb
->s_flags
= s_flags
; /* Restore SB_RDONLY status */
716 return ret
? ret
: err
;