1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
10 #include <linux/mpage.h>
11 #include <linux/writeback.h>
12 #include <linux/blkdev.h>
13 #include <linux/f2fs_fs.h>
14 #include <linux/pagevec.h>
15 #include <linux/swap.h>
16 #include <linux/kthread.h>
21 #include <trace/events/f2fs.h>
23 #define DEFAULT_CHECKPOINT_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
25 static struct kmem_cache
*ino_entry_slab
;
26 struct kmem_cache
*f2fs_inode_entry_slab
;
28 void f2fs_stop_checkpoint(struct f2fs_sb_info
*sbi
, bool end_io
)
30 f2fs_build_fault_attr(sbi
, 0, 0);
31 set_ckpt_flags(sbi
, CP_ERROR_FLAG
);
33 f2fs_flush_merged_writes(sbi
);
37 * We guarantee no failure on the returned page.
39 struct page
*f2fs_grab_meta_page(struct f2fs_sb_info
*sbi
, pgoff_t index
)
41 struct address_space
*mapping
= META_MAPPING(sbi
);
44 page
= f2fs_grab_cache_page(mapping
, index
, false);
49 f2fs_wait_on_page_writeback(page
, META
, true, true);
50 if (!PageUptodate(page
))
51 SetPageUptodate(page
);
55 static struct page
*__get_meta_page(struct f2fs_sb_info
*sbi
, pgoff_t index
,
58 struct address_space
*mapping
= META_MAPPING(sbi
);
60 struct f2fs_io_info fio
= {
64 .op_flags
= REQ_META
| REQ_PRIO
,
67 .encrypted_page
= NULL
,
72 if (unlikely(!is_meta
))
73 fio
.op_flags
&= ~REQ_META
;
75 page
= f2fs_grab_cache_page(mapping
, index
, false);
80 if (PageUptodate(page
))
85 err
= f2fs_submit_page_bio(&fio
);
87 f2fs_put_page(page
, 1);
91 f2fs_update_iostat(sbi
, FS_META_READ_IO
, F2FS_BLKSIZE
);
94 if (unlikely(page
->mapping
!= mapping
)) {
95 f2fs_put_page(page
, 1);
99 if (unlikely(!PageUptodate(page
))) {
100 f2fs_put_page(page
, 1);
101 return ERR_PTR(-EIO
);
107 struct page
*f2fs_get_meta_page(struct f2fs_sb_info
*sbi
, pgoff_t index
)
109 return __get_meta_page(sbi
, index
, true);
112 struct page
*f2fs_get_meta_page_retry(struct f2fs_sb_info
*sbi
, pgoff_t index
)
118 page
= __get_meta_page(sbi
, index
, true);
120 if (PTR_ERR(page
) == -EIO
&&
121 ++count
<= DEFAULT_RETRY_IO_COUNT
)
123 f2fs_stop_checkpoint(sbi
, false);
129 struct page
*f2fs_get_tmp_page(struct f2fs_sb_info
*sbi
, pgoff_t index
)
131 return __get_meta_page(sbi
, index
, false);
134 static bool __is_bitmap_valid(struct f2fs_sb_info
*sbi
, block_t blkaddr
,
137 struct seg_entry
*se
;
138 unsigned int segno
, offset
;
141 if (type
!= DATA_GENERIC_ENHANCE
&& type
!= DATA_GENERIC_ENHANCE_READ
)
144 segno
= GET_SEGNO(sbi
, blkaddr
);
145 offset
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
146 se
= get_seg_entry(sbi
, segno
);
148 exist
= f2fs_test_bit(offset
, se
->cur_valid_map
);
149 if (!exist
&& type
== DATA_GENERIC_ENHANCE
) {
150 f2fs_err(sbi
, "Inconsistent error blkaddr:%u, sit bitmap:%d",
152 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
158 bool f2fs_is_valid_blkaddr(struct f2fs_sb_info
*sbi
,
159 block_t blkaddr
, int type
)
165 if (unlikely(blkaddr
>= SIT_BLK_CNT(sbi
)))
169 if (unlikely(blkaddr
>= MAIN_BLKADDR(sbi
) ||
170 blkaddr
< SM_I(sbi
)->ssa_blkaddr
))
174 if (unlikely(blkaddr
>= SIT_I(sbi
)->sit_base_addr
||
175 blkaddr
< __start_cp_addr(sbi
)))
179 if (unlikely(blkaddr
>= MAX_BLKADDR(sbi
) ||
180 blkaddr
< MAIN_BLKADDR(sbi
)))
184 case DATA_GENERIC_ENHANCE
:
185 case DATA_GENERIC_ENHANCE_READ
:
186 if (unlikely(blkaddr
>= MAX_BLKADDR(sbi
) ||
187 blkaddr
< MAIN_BLKADDR(sbi
))) {
188 f2fs_warn(sbi
, "access invalid blkaddr:%u",
190 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
194 return __is_bitmap_valid(sbi
, blkaddr
, type
);
198 if (unlikely(blkaddr
< SEG0_BLKADDR(sbi
) ||
199 blkaddr
>= MAIN_BLKADDR(sbi
)))
210 * Readahead CP/NAT/SIT/SSA/POR pages
212 int f2fs_ra_meta_pages(struct f2fs_sb_info
*sbi
, block_t start
, int nrpages
,
216 block_t blkno
= start
;
217 struct f2fs_io_info fio
= {
221 .op_flags
= sync
? (REQ_META
| REQ_PRIO
) : REQ_RAHEAD
,
222 .encrypted_page
= NULL
,
224 .is_por
= (type
== META_POR
),
226 struct blk_plug plug
;
229 if (unlikely(type
== META_POR
))
230 fio
.op_flags
&= ~REQ_META
;
232 blk_start_plug(&plug
);
233 for (; nrpages
-- > 0; blkno
++) {
235 if (!f2fs_is_valid_blkaddr(sbi
, blkno
, type
))
240 if (unlikely(blkno
>=
241 NAT_BLOCK_OFFSET(NM_I(sbi
)->max_nid
)))
243 /* get nat block addr */
244 fio
.new_blkaddr
= current_nat_addr(sbi
,
245 blkno
* NAT_ENTRY_PER_BLOCK
);
248 if (unlikely(blkno
>= TOTAL_SEGS(sbi
)))
250 /* get sit block addr */
251 fio
.new_blkaddr
= current_sit_addr(sbi
,
252 blkno
* SIT_ENTRY_PER_BLOCK
);
257 fio
.new_blkaddr
= blkno
;
263 page
= f2fs_grab_cache_page(META_MAPPING(sbi
),
264 fio
.new_blkaddr
, false);
267 if (PageUptodate(page
)) {
268 f2fs_put_page(page
, 1);
273 err
= f2fs_submit_page_bio(&fio
);
274 f2fs_put_page(page
, err
? 1 : 0);
277 f2fs_update_iostat(sbi
, FS_META_READ_IO
, F2FS_BLKSIZE
);
280 blk_finish_plug(&plug
);
281 return blkno
- start
;
284 void f2fs_ra_meta_pages_cond(struct f2fs_sb_info
*sbi
, pgoff_t index
)
287 bool readahead
= false;
289 page
= find_get_page(META_MAPPING(sbi
), index
);
290 if (!page
|| !PageUptodate(page
))
292 f2fs_put_page(page
, 0);
295 f2fs_ra_meta_pages(sbi
, index
, BIO_MAX_VECS
, META_POR
, true);
298 static int __f2fs_write_meta_page(struct page
*page
,
299 struct writeback_control
*wbc
,
300 enum iostat_type io_type
)
302 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
304 trace_f2fs_writepage(page
, META
);
306 if (unlikely(f2fs_cp_error(sbi
)))
308 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
310 if (wbc
->for_reclaim
&& page
->index
< GET_SUM_BLOCK(sbi
, 0))
313 f2fs_do_write_meta_page(sbi
, page
, io_type
);
314 dec_page_count(sbi
, F2FS_DIRTY_META
);
316 if (wbc
->for_reclaim
)
317 f2fs_submit_merged_write_cond(sbi
, NULL
, page
, 0, META
);
321 if (unlikely(f2fs_cp_error(sbi
)))
322 f2fs_submit_merged_write(sbi
, META
);
327 redirty_page_for_writepage(wbc
, page
);
328 return AOP_WRITEPAGE_ACTIVATE
;
331 static int f2fs_write_meta_page(struct page
*page
,
332 struct writeback_control
*wbc
)
334 return __f2fs_write_meta_page(page
, wbc
, FS_META_IO
);
337 static int f2fs_write_meta_pages(struct address_space
*mapping
,
338 struct writeback_control
*wbc
)
340 struct f2fs_sb_info
*sbi
= F2FS_M_SB(mapping
);
343 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
346 /* collect a number of dirty meta pages and write together */
347 if (wbc
->sync_mode
!= WB_SYNC_ALL
&&
348 get_pages(sbi
, F2FS_DIRTY_META
) <
349 nr_pages_to_skip(sbi
, META
))
352 /* if locked failed, cp will flush dirty pages instead */
353 if (!down_write_trylock(&sbi
->cp_global_sem
))
356 trace_f2fs_writepages(mapping
->host
, wbc
, META
);
357 diff
= nr_pages_to_write(sbi
, META
, wbc
);
358 written
= f2fs_sync_meta_pages(sbi
, META
, wbc
->nr_to_write
, FS_META_IO
);
359 up_write(&sbi
->cp_global_sem
);
360 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- written
- diff
);
364 wbc
->pages_skipped
+= get_pages(sbi
, F2FS_DIRTY_META
);
365 trace_f2fs_writepages(mapping
->host
, wbc
, META
);
369 long f2fs_sync_meta_pages(struct f2fs_sb_info
*sbi
, enum page_type type
,
370 long nr_to_write
, enum iostat_type io_type
)
372 struct address_space
*mapping
= META_MAPPING(sbi
);
373 pgoff_t index
= 0, prev
= ULONG_MAX
;
377 struct writeback_control wbc
= {
380 struct blk_plug plug
;
384 blk_start_plug(&plug
);
386 while ((nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
387 PAGECACHE_TAG_DIRTY
))) {
390 for (i
= 0; i
< nr_pages
; i
++) {
391 struct page
*page
= pvec
.pages
[i
];
393 if (prev
== ULONG_MAX
)
394 prev
= page
->index
- 1;
395 if (nr_to_write
!= LONG_MAX
&& page
->index
!= prev
+ 1) {
396 pagevec_release(&pvec
);
402 if (unlikely(page
->mapping
!= mapping
)) {
407 if (!PageDirty(page
)) {
408 /* someone wrote it for us */
409 goto continue_unlock
;
412 f2fs_wait_on_page_writeback(page
, META
, true, true);
414 if (!clear_page_dirty_for_io(page
))
415 goto continue_unlock
;
417 if (__f2fs_write_meta_page(page
, &wbc
, io_type
)) {
423 if (unlikely(nwritten
>= nr_to_write
))
426 pagevec_release(&pvec
);
431 f2fs_submit_merged_write(sbi
, type
);
433 blk_finish_plug(&plug
);
438 static int f2fs_set_meta_page_dirty(struct page
*page
)
440 trace_f2fs_set_page_dirty(page
, META
);
442 if (!PageUptodate(page
))
443 SetPageUptodate(page
);
444 if (!PageDirty(page
)) {
445 __set_page_dirty_nobuffers(page
);
446 inc_page_count(F2FS_P_SB(page
), F2FS_DIRTY_META
);
447 set_page_private_reference(page
);
453 const struct address_space_operations f2fs_meta_aops
= {
454 .writepage
= f2fs_write_meta_page
,
455 .writepages
= f2fs_write_meta_pages
,
456 .set_page_dirty
= f2fs_set_meta_page_dirty
,
457 .invalidatepage
= f2fs_invalidate_page
,
458 .releasepage
= f2fs_release_page
,
459 #ifdef CONFIG_MIGRATION
460 .migratepage
= f2fs_migrate_page
,
464 static void __add_ino_entry(struct f2fs_sb_info
*sbi
, nid_t ino
,
465 unsigned int devidx
, int type
)
467 struct inode_management
*im
= &sbi
->im
[type
];
468 struct ino_entry
*e
, *tmp
;
470 tmp
= f2fs_kmem_cache_alloc(ino_entry_slab
, GFP_NOFS
);
472 radix_tree_preload(GFP_NOFS
| __GFP_NOFAIL
);
474 spin_lock(&im
->ino_lock
);
475 e
= radix_tree_lookup(&im
->ino_root
, ino
);
478 if (unlikely(radix_tree_insert(&im
->ino_root
, ino
, e
)))
481 memset(e
, 0, sizeof(struct ino_entry
));
484 list_add_tail(&e
->list
, &im
->ino_list
);
485 if (type
!= ORPHAN_INO
)
489 if (type
== FLUSH_INO
)
490 f2fs_set_bit(devidx
, (char *)&e
->dirty_device
);
492 spin_unlock(&im
->ino_lock
);
493 radix_tree_preload_end();
496 kmem_cache_free(ino_entry_slab
, tmp
);
499 static void __remove_ino_entry(struct f2fs_sb_info
*sbi
, nid_t ino
, int type
)
501 struct inode_management
*im
= &sbi
->im
[type
];
504 spin_lock(&im
->ino_lock
);
505 e
= radix_tree_lookup(&im
->ino_root
, ino
);
508 radix_tree_delete(&im
->ino_root
, ino
);
510 spin_unlock(&im
->ino_lock
);
511 kmem_cache_free(ino_entry_slab
, e
);
514 spin_unlock(&im
->ino_lock
);
517 void f2fs_add_ino_entry(struct f2fs_sb_info
*sbi
, nid_t ino
, int type
)
519 /* add new dirty ino entry into list */
520 __add_ino_entry(sbi
, ino
, 0, type
);
523 void f2fs_remove_ino_entry(struct f2fs_sb_info
*sbi
, nid_t ino
, int type
)
525 /* remove dirty ino entry from list */
526 __remove_ino_entry(sbi
, ino
, type
);
529 /* mode should be APPEND_INO, UPDATE_INO or TRANS_DIR_INO */
530 bool f2fs_exist_written_data(struct f2fs_sb_info
*sbi
, nid_t ino
, int mode
)
532 struct inode_management
*im
= &sbi
->im
[mode
];
535 spin_lock(&im
->ino_lock
);
536 e
= radix_tree_lookup(&im
->ino_root
, ino
);
537 spin_unlock(&im
->ino_lock
);
538 return e
? true : false;
541 void f2fs_release_ino_entry(struct f2fs_sb_info
*sbi
, bool all
)
543 struct ino_entry
*e
, *tmp
;
546 for (i
= all
? ORPHAN_INO
: APPEND_INO
; i
< MAX_INO_ENTRY
; i
++) {
547 struct inode_management
*im
= &sbi
->im
[i
];
549 spin_lock(&im
->ino_lock
);
550 list_for_each_entry_safe(e
, tmp
, &im
->ino_list
, list
) {
552 radix_tree_delete(&im
->ino_root
, e
->ino
);
553 kmem_cache_free(ino_entry_slab
, e
);
556 spin_unlock(&im
->ino_lock
);
560 void f2fs_set_dirty_device(struct f2fs_sb_info
*sbi
, nid_t ino
,
561 unsigned int devidx
, int type
)
563 __add_ino_entry(sbi
, ino
, devidx
, type
);
566 bool f2fs_is_dirty_device(struct f2fs_sb_info
*sbi
, nid_t ino
,
567 unsigned int devidx
, int type
)
569 struct inode_management
*im
= &sbi
->im
[type
];
571 bool is_dirty
= false;
573 spin_lock(&im
->ino_lock
);
574 e
= radix_tree_lookup(&im
->ino_root
, ino
);
575 if (e
&& f2fs_test_bit(devidx
, (char *)&e
->dirty_device
))
577 spin_unlock(&im
->ino_lock
);
581 int f2fs_acquire_orphan_inode(struct f2fs_sb_info
*sbi
)
583 struct inode_management
*im
= &sbi
->im
[ORPHAN_INO
];
586 spin_lock(&im
->ino_lock
);
588 if (time_to_inject(sbi
, FAULT_ORPHAN
)) {
589 spin_unlock(&im
->ino_lock
);
590 f2fs_show_injection_info(sbi
, FAULT_ORPHAN
);
594 if (unlikely(im
->ino_num
>= sbi
->max_orphans
))
598 spin_unlock(&im
->ino_lock
);
603 void f2fs_release_orphan_inode(struct f2fs_sb_info
*sbi
)
605 struct inode_management
*im
= &sbi
->im
[ORPHAN_INO
];
607 spin_lock(&im
->ino_lock
);
608 f2fs_bug_on(sbi
, im
->ino_num
== 0);
610 spin_unlock(&im
->ino_lock
);
613 void f2fs_add_orphan_inode(struct inode
*inode
)
615 /* add new orphan ino entry into list */
616 __add_ino_entry(F2FS_I_SB(inode
), inode
->i_ino
, 0, ORPHAN_INO
);
617 f2fs_update_inode_page(inode
);
620 void f2fs_remove_orphan_inode(struct f2fs_sb_info
*sbi
, nid_t ino
)
622 /* remove orphan entry from orphan list */
623 __remove_ino_entry(sbi
, ino
, ORPHAN_INO
);
626 static int recover_orphan_inode(struct f2fs_sb_info
*sbi
, nid_t ino
)
632 inode
= f2fs_iget_retry(sbi
->sb
, ino
);
635 * there should be a bug that we can't find the entry
638 f2fs_bug_on(sbi
, PTR_ERR(inode
) == -ENOENT
);
639 return PTR_ERR(inode
);
642 err
= dquot_initialize(inode
);
650 /* truncate all the data during iput */
653 err
= f2fs_get_node_info(sbi
, ino
, &ni
);
657 /* ENOMEM was fully retried in f2fs_evict_inode. */
658 if (ni
.blk_addr
!= NULL_ADDR
) {
665 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
666 f2fs_warn(sbi
, "%s: orphan failed (ino=%x), run fsck to fix.",
671 int f2fs_recover_orphan_inodes(struct f2fs_sb_info
*sbi
)
673 block_t start_blk
, orphan_blocks
, i
, j
;
674 unsigned int s_flags
= sbi
->sb
->s_flags
;
680 if (!is_set_ckpt_flags(sbi
, CP_ORPHAN_PRESENT_FLAG
))
683 if (bdev_read_only(sbi
->sb
->s_bdev
)) {
684 f2fs_info(sbi
, "write access unavailable, skipping orphan cleanup");
688 if (s_flags
& SB_RDONLY
) {
689 f2fs_info(sbi
, "orphan cleanup on readonly fs");
690 sbi
->sb
->s_flags
&= ~SB_RDONLY
;
694 /* Needed for iput() to work correctly and not trash data */
695 sbi
->sb
->s_flags
|= SB_ACTIVE
;
698 * Turn on quotas which were not enabled for read-only mounts if
699 * filesystem has quota feature, so that they are updated correctly.
701 quota_enabled
= f2fs_enable_quota_files(sbi
, s_flags
& SB_RDONLY
);
704 start_blk
= __start_cp_addr(sbi
) + 1 + __cp_payload(sbi
);
705 orphan_blocks
= __start_sum_addr(sbi
) - 1 - __cp_payload(sbi
);
707 f2fs_ra_meta_pages(sbi
, start_blk
, orphan_blocks
, META_CP
, true);
709 for (i
= 0; i
< orphan_blocks
; i
++) {
711 struct f2fs_orphan_block
*orphan_blk
;
713 page
= f2fs_get_meta_page(sbi
, start_blk
+ i
);
719 orphan_blk
= (struct f2fs_orphan_block
*)page_address(page
);
720 for (j
= 0; j
< le32_to_cpu(orphan_blk
->entry_count
); j
++) {
721 nid_t ino
= le32_to_cpu(orphan_blk
->ino
[j
]);
723 err
= recover_orphan_inode(sbi
, ino
);
725 f2fs_put_page(page
, 1);
729 f2fs_put_page(page
, 1);
731 /* clear Orphan Flag */
732 clear_ckpt_flags(sbi
, CP_ORPHAN_PRESENT_FLAG
);
734 set_sbi_flag(sbi
, SBI_IS_RECOVERED
);
737 /* Turn quotas off */
739 f2fs_quota_off_umount(sbi
->sb
);
741 sbi
->sb
->s_flags
= s_flags
; /* Restore SB_RDONLY status */
746 static void write_orphan_inodes(struct f2fs_sb_info
*sbi
, block_t start_blk
)
748 struct list_head
*head
;
749 struct f2fs_orphan_block
*orphan_blk
= NULL
;
750 unsigned int nentries
= 0;
751 unsigned short index
= 1;
752 unsigned short orphan_blocks
;
753 struct page
*page
= NULL
;
754 struct ino_entry
*orphan
= NULL
;
755 struct inode_management
*im
= &sbi
->im
[ORPHAN_INO
];
757 orphan_blocks
= GET_ORPHAN_BLOCKS(im
->ino_num
);
760 * we don't need to do spin_lock(&im->ino_lock) here, since all the
761 * orphan inode operations are covered under f2fs_lock_op().
762 * And, spin_lock should be avoided due to page operations below.
764 head
= &im
->ino_list
;
766 /* loop for each orphan inode entry and write them in Jornal block */
767 list_for_each_entry(orphan
, head
, list
) {
769 page
= f2fs_grab_meta_page(sbi
, start_blk
++);
771 (struct f2fs_orphan_block
*)page_address(page
);
772 memset(orphan_blk
, 0, sizeof(*orphan_blk
));
775 orphan_blk
->ino
[nentries
++] = cpu_to_le32(orphan
->ino
);
777 if (nentries
== F2FS_ORPHANS_PER_BLOCK
) {
779 * an orphan block is full of 1020 entries,
780 * then we need to flush current orphan blocks
781 * and bring another one in memory
783 orphan_blk
->blk_addr
= cpu_to_le16(index
);
784 orphan_blk
->blk_count
= cpu_to_le16(orphan_blocks
);
785 orphan_blk
->entry_count
= cpu_to_le32(nentries
);
786 set_page_dirty(page
);
787 f2fs_put_page(page
, 1);
795 orphan_blk
->blk_addr
= cpu_to_le16(index
);
796 orphan_blk
->blk_count
= cpu_to_le16(orphan_blocks
);
797 orphan_blk
->entry_count
= cpu_to_le32(nentries
);
798 set_page_dirty(page
);
799 f2fs_put_page(page
, 1);
803 static __u32
f2fs_checkpoint_chksum(struct f2fs_sb_info
*sbi
,
804 struct f2fs_checkpoint
*ckpt
)
806 unsigned int chksum_ofs
= le32_to_cpu(ckpt
->checksum_offset
);
809 chksum
= f2fs_crc32(sbi
, ckpt
, chksum_ofs
);
810 if (chksum_ofs
< CP_CHKSUM_OFFSET
) {
811 chksum_ofs
+= sizeof(chksum
);
812 chksum
= f2fs_chksum(sbi
, chksum
, (__u8
*)ckpt
+ chksum_ofs
,
813 F2FS_BLKSIZE
- chksum_ofs
);
818 static int get_checkpoint_version(struct f2fs_sb_info
*sbi
, block_t cp_addr
,
819 struct f2fs_checkpoint
**cp_block
, struct page
**cp_page
,
820 unsigned long long *version
)
822 size_t crc_offset
= 0;
825 *cp_page
= f2fs_get_meta_page(sbi
, cp_addr
);
826 if (IS_ERR(*cp_page
))
827 return PTR_ERR(*cp_page
);
829 *cp_block
= (struct f2fs_checkpoint
*)page_address(*cp_page
);
831 crc_offset
= le32_to_cpu((*cp_block
)->checksum_offset
);
832 if (crc_offset
< CP_MIN_CHKSUM_OFFSET
||
833 crc_offset
> CP_CHKSUM_OFFSET
) {
834 f2fs_put_page(*cp_page
, 1);
835 f2fs_warn(sbi
, "invalid crc_offset: %zu", crc_offset
);
839 crc
= f2fs_checkpoint_chksum(sbi
, *cp_block
);
840 if (crc
!= cur_cp_crc(*cp_block
)) {
841 f2fs_put_page(*cp_page
, 1);
842 f2fs_warn(sbi
, "invalid crc value");
846 *version
= cur_cp_version(*cp_block
);
850 static struct page
*validate_checkpoint(struct f2fs_sb_info
*sbi
,
851 block_t cp_addr
, unsigned long long *version
)
853 struct page
*cp_page_1
= NULL
, *cp_page_2
= NULL
;
854 struct f2fs_checkpoint
*cp_block
= NULL
;
855 unsigned long long cur_version
= 0, pre_version
= 0;
858 err
= get_checkpoint_version(sbi
, cp_addr
, &cp_block
,
859 &cp_page_1
, version
);
863 if (le32_to_cpu(cp_block
->cp_pack_total_block_count
) >
864 sbi
->blocks_per_seg
) {
865 f2fs_warn(sbi
, "invalid cp_pack_total_block_count:%u",
866 le32_to_cpu(cp_block
->cp_pack_total_block_count
));
869 pre_version
= *version
;
871 cp_addr
+= le32_to_cpu(cp_block
->cp_pack_total_block_count
) - 1;
872 err
= get_checkpoint_version(sbi
, cp_addr
, &cp_block
,
873 &cp_page_2
, version
);
876 cur_version
= *version
;
878 if (cur_version
== pre_version
) {
879 *version
= cur_version
;
880 f2fs_put_page(cp_page_2
, 1);
883 f2fs_put_page(cp_page_2
, 1);
885 f2fs_put_page(cp_page_1
, 1);
889 int f2fs_get_valid_checkpoint(struct f2fs_sb_info
*sbi
)
891 struct f2fs_checkpoint
*cp_block
;
892 struct f2fs_super_block
*fsb
= sbi
->raw_super
;
893 struct page
*cp1
, *cp2
, *cur_page
;
894 unsigned long blk_size
= sbi
->blocksize
;
895 unsigned long long cp1_version
= 0, cp2_version
= 0;
896 unsigned long long cp_start_blk_no
;
897 unsigned int cp_blks
= 1 + __cp_payload(sbi
);
902 sbi
->ckpt
= f2fs_kvzalloc(sbi
, array_size(blk_size
, cp_blks
),
907 * Finding out valid cp block involves read both
908 * sets( cp pack 1 and cp pack 2)
910 cp_start_blk_no
= le32_to_cpu(fsb
->cp_blkaddr
);
911 cp1
= validate_checkpoint(sbi
, cp_start_blk_no
, &cp1_version
);
913 /* The second checkpoint pack should start at the next segment */
914 cp_start_blk_no
+= ((unsigned long long)1) <<
915 le32_to_cpu(fsb
->log_blocks_per_seg
);
916 cp2
= validate_checkpoint(sbi
, cp_start_blk_no
, &cp2_version
);
919 if (ver_after(cp2_version
, cp1_version
))
932 cp_block
= (struct f2fs_checkpoint
*)page_address(cur_page
);
933 memcpy(sbi
->ckpt
, cp_block
, blk_size
);
936 sbi
->cur_cp_pack
= 1;
938 sbi
->cur_cp_pack
= 2;
940 /* Sanity checking of checkpoint */
941 if (f2fs_sanity_check_ckpt(sbi
)) {
943 goto free_fail_no_cp
;
949 cp_blk_no
= le32_to_cpu(fsb
->cp_blkaddr
);
951 cp_blk_no
+= 1 << le32_to_cpu(fsb
->log_blocks_per_seg
);
953 for (i
= 1; i
< cp_blks
; i
++) {
954 void *sit_bitmap_ptr
;
955 unsigned char *ckpt
= (unsigned char *)sbi
->ckpt
;
957 cur_page
= f2fs_get_meta_page(sbi
, cp_blk_no
+ i
);
958 if (IS_ERR(cur_page
)) {
959 err
= PTR_ERR(cur_page
);
960 goto free_fail_no_cp
;
962 sit_bitmap_ptr
= page_address(cur_page
);
963 memcpy(ckpt
+ i
* blk_size
, sit_bitmap_ptr
, blk_size
);
964 f2fs_put_page(cur_page
, 1);
967 f2fs_put_page(cp1
, 1);
968 f2fs_put_page(cp2
, 1);
972 f2fs_put_page(cp1
, 1);
973 f2fs_put_page(cp2
, 1);
979 static void __add_dirty_inode(struct inode
*inode
, enum inode_type type
)
981 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
982 int flag
= (type
== DIR_INODE
) ? FI_DIRTY_DIR
: FI_DIRTY_FILE
;
984 if (is_inode_flag_set(inode
, flag
))
987 set_inode_flag(inode
, flag
);
988 if (!f2fs_is_volatile_file(inode
))
989 list_add_tail(&F2FS_I(inode
)->dirty_list
,
990 &sbi
->inode_list
[type
]);
991 stat_inc_dirty_inode(sbi
, type
);
994 static void __remove_dirty_inode(struct inode
*inode
, enum inode_type type
)
996 int flag
= (type
== DIR_INODE
) ? FI_DIRTY_DIR
: FI_DIRTY_FILE
;
998 if (get_dirty_pages(inode
) || !is_inode_flag_set(inode
, flag
))
1001 list_del_init(&F2FS_I(inode
)->dirty_list
);
1002 clear_inode_flag(inode
, flag
);
1003 stat_dec_dirty_inode(F2FS_I_SB(inode
), type
);
1006 void f2fs_update_dirty_page(struct inode
*inode
, struct page
*page
)
1008 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1009 enum inode_type type
= S_ISDIR(inode
->i_mode
) ? DIR_INODE
: FILE_INODE
;
1011 if (!S_ISDIR(inode
->i_mode
) && !S_ISREG(inode
->i_mode
) &&
1012 !S_ISLNK(inode
->i_mode
))
1015 spin_lock(&sbi
->inode_lock
[type
]);
1016 if (type
!= FILE_INODE
|| test_opt(sbi
, DATA_FLUSH
))
1017 __add_dirty_inode(inode
, type
);
1018 inode_inc_dirty_pages(inode
);
1019 spin_unlock(&sbi
->inode_lock
[type
]);
1021 set_page_private_reference(page
);
1024 void f2fs_remove_dirty_inode(struct inode
*inode
)
1026 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1027 enum inode_type type
= S_ISDIR(inode
->i_mode
) ? DIR_INODE
: FILE_INODE
;
1029 if (!S_ISDIR(inode
->i_mode
) && !S_ISREG(inode
->i_mode
) &&
1030 !S_ISLNK(inode
->i_mode
))
1033 if (type
== FILE_INODE
&& !test_opt(sbi
, DATA_FLUSH
))
1036 spin_lock(&sbi
->inode_lock
[type
]);
1037 __remove_dirty_inode(inode
, type
);
1038 spin_unlock(&sbi
->inode_lock
[type
]);
1041 int f2fs_sync_dirty_inodes(struct f2fs_sb_info
*sbi
, enum inode_type type
)
1043 struct list_head
*head
;
1044 struct inode
*inode
;
1045 struct f2fs_inode_info
*fi
;
1046 bool is_dir
= (type
== DIR_INODE
);
1047 unsigned long ino
= 0;
1049 trace_f2fs_sync_dirty_inodes_enter(sbi
->sb
, is_dir
,
1050 get_pages(sbi
, is_dir
?
1051 F2FS_DIRTY_DENTS
: F2FS_DIRTY_DATA
));
1053 if (unlikely(f2fs_cp_error(sbi
))) {
1054 trace_f2fs_sync_dirty_inodes_exit(sbi
->sb
, is_dir
,
1055 get_pages(sbi
, is_dir
?
1056 F2FS_DIRTY_DENTS
: F2FS_DIRTY_DATA
));
1060 spin_lock(&sbi
->inode_lock
[type
]);
1062 head
= &sbi
->inode_list
[type
];
1063 if (list_empty(head
)) {
1064 spin_unlock(&sbi
->inode_lock
[type
]);
1065 trace_f2fs_sync_dirty_inodes_exit(sbi
->sb
, is_dir
,
1066 get_pages(sbi
, is_dir
?
1067 F2FS_DIRTY_DENTS
: F2FS_DIRTY_DATA
));
1070 fi
= list_first_entry(head
, struct f2fs_inode_info
, dirty_list
);
1071 inode
= igrab(&fi
->vfs_inode
);
1072 spin_unlock(&sbi
->inode_lock
[type
]);
1074 unsigned long cur_ino
= inode
->i_ino
;
1076 F2FS_I(inode
)->cp_task
= current
;
1078 filemap_fdatawrite(inode
->i_mapping
);
1080 F2FS_I(inode
)->cp_task
= NULL
;
1083 /* We need to give cpu to another writers. */
1090 * We should submit bio, since it exists several
1091 * wribacking dentry pages in the freeing inode.
1093 f2fs_submit_merged_write(sbi
, DATA
);
1099 int f2fs_sync_inode_meta(struct f2fs_sb_info
*sbi
)
1101 struct list_head
*head
= &sbi
->inode_list
[DIRTY_META
];
1102 struct inode
*inode
;
1103 struct f2fs_inode_info
*fi
;
1104 s64 total
= get_pages(sbi
, F2FS_DIRTY_IMETA
);
1107 if (unlikely(f2fs_cp_error(sbi
)))
1110 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
1111 if (list_empty(head
)) {
1112 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1115 fi
= list_first_entry(head
, struct f2fs_inode_info
,
1117 inode
= igrab(&fi
->vfs_inode
);
1118 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1120 sync_inode_metadata(inode
, 0);
1122 /* it's on eviction */
1123 if (is_inode_flag_set(inode
, FI_DIRTY_INODE
))
1124 f2fs_update_inode_page(inode
);
1131 static void __prepare_cp_block(struct f2fs_sb_info
*sbi
)
1133 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1134 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1135 nid_t last_nid
= nm_i
->next_scan_nid
;
1137 next_free_nid(sbi
, &last_nid
);
1138 ckpt
->valid_block_count
= cpu_to_le64(valid_user_blocks(sbi
));
1139 ckpt
->valid_node_count
= cpu_to_le32(valid_node_count(sbi
));
1140 ckpt
->valid_inode_count
= cpu_to_le32(valid_inode_count(sbi
));
1141 ckpt
->next_free_nid
= cpu_to_le32(last_nid
);
1144 static bool __need_flush_quota(struct f2fs_sb_info
*sbi
)
1148 if (!is_journalled_quota(sbi
))
1151 down_write(&sbi
->quota_sem
);
1152 if (is_sbi_flag_set(sbi
, SBI_QUOTA_SKIP_FLUSH
)) {
1154 } else if (is_sbi_flag_set(sbi
, SBI_QUOTA_NEED_REPAIR
)) {
1156 } else if (is_sbi_flag_set(sbi
, SBI_QUOTA_NEED_FLUSH
)) {
1157 clear_sbi_flag(sbi
, SBI_QUOTA_NEED_FLUSH
);
1159 } else if (get_pages(sbi
, F2FS_DIRTY_QDATA
)) {
1162 up_write(&sbi
->quota_sem
);
1167 * Freeze all the FS-operations for checkpoint.
1169 static int block_operations(struct f2fs_sb_info
*sbi
)
1171 struct writeback_control wbc
= {
1172 .sync_mode
= WB_SYNC_ALL
,
1173 .nr_to_write
= LONG_MAX
,
1176 int err
= 0, cnt
= 0;
1179 * Let's flush inline_data in dirty node pages.
1181 f2fs_flush_inline_data(sbi
);
1185 if (__need_flush_quota(sbi
)) {
1188 if (++cnt
> DEFAULT_RETRY_QUOTA_FLUSH_COUNT
) {
1189 set_sbi_flag(sbi
, SBI_QUOTA_SKIP_FLUSH
);
1190 set_sbi_flag(sbi
, SBI_QUOTA_NEED_FLUSH
);
1191 goto retry_flush_dents
;
1193 f2fs_unlock_all(sbi
);
1195 /* only failed during mount/umount/freeze/quotactl */
1196 locked
= down_read_trylock(&sbi
->sb
->s_umount
);
1197 f2fs_quota_sync(sbi
->sb
, -1);
1199 up_read(&sbi
->sb
->s_umount
);
1201 goto retry_flush_quotas
;
1205 /* write all the dirty dentry pages */
1206 if (get_pages(sbi
, F2FS_DIRTY_DENTS
)) {
1207 f2fs_unlock_all(sbi
);
1208 err
= f2fs_sync_dirty_inodes(sbi
, DIR_INODE
);
1212 goto retry_flush_quotas
;
1216 * POR: we should ensure that there are no dirty node pages
1217 * until finishing nat/sit flush. inode->i_blocks can be updated.
1219 down_write(&sbi
->node_change
);
1221 if (get_pages(sbi
, F2FS_DIRTY_IMETA
)) {
1222 up_write(&sbi
->node_change
);
1223 f2fs_unlock_all(sbi
);
1224 err
= f2fs_sync_inode_meta(sbi
);
1228 goto retry_flush_quotas
;
1232 down_write(&sbi
->node_write
);
1234 if (get_pages(sbi
, F2FS_DIRTY_NODES
)) {
1235 up_write(&sbi
->node_write
);
1236 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
1237 err
= f2fs_sync_node_pages(sbi
, &wbc
, false, FS_CP_NODE_IO
);
1238 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
1240 up_write(&sbi
->node_change
);
1241 f2fs_unlock_all(sbi
);
1245 goto retry_flush_nodes
;
1249 * sbi->node_change is used only for AIO write_begin path which produces
1250 * dirty node blocks and some checkpoint values by block allocation.
1252 __prepare_cp_block(sbi
);
1253 up_write(&sbi
->node_change
);
1257 static void unblock_operations(struct f2fs_sb_info
*sbi
)
1259 up_write(&sbi
->node_write
);
1260 f2fs_unlock_all(sbi
);
1263 void f2fs_wait_on_all_pages(struct f2fs_sb_info
*sbi
, int type
)
1268 if (!get_pages(sbi
, type
))
1271 if (unlikely(f2fs_cp_error(sbi
)))
1274 if (type
== F2FS_DIRTY_META
)
1275 f2fs_sync_meta_pages(sbi
, META
, LONG_MAX
,
1277 else if (type
== F2FS_WB_CP_DATA
)
1278 f2fs_submit_merged_write(sbi
, DATA
);
1280 prepare_to_wait(&sbi
->cp_wait
, &wait
, TASK_UNINTERRUPTIBLE
);
1281 io_schedule_timeout(DEFAULT_IO_TIMEOUT
);
1283 finish_wait(&sbi
->cp_wait
, &wait
);
1286 static void update_ckpt_flags(struct f2fs_sb_info
*sbi
, struct cp_control
*cpc
)
1288 unsigned long orphan_num
= sbi
->im
[ORPHAN_INO
].ino_num
;
1289 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1290 unsigned long flags
;
1292 spin_lock_irqsave(&sbi
->cp_lock
, flags
);
1294 if ((cpc
->reason
& CP_UMOUNT
) &&
1295 le32_to_cpu(ckpt
->cp_pack_total_block_count
) >
1296 sbi
->blocks_per_seg
- NM_I(sbi
)->nat_bits_blocks
)
1297 disable_nat_bits(sbi
, false);
1299 if (cpc
->reason
& CP_TRIMMED
)
1300 __set_ckpt_flags(ckpt
, CP_TRIMMED_FLAG
);
1302 __clear_ckpt_flags(ckpt
, CP_TRIMMED_FLAG
);
1304 if (cpc
->reason
& CP_UMOUNT
)
1305 __set_ckpt_flags(ckpt
, CP_UMOUNT_FLAG
);
1307 __clear_ckpt_flags(ckpt
, CP_UMOUNT_FLAG
);
1309 if (cpc
->reason
& CP_FASTBOOT
)
1310 __set_ckpt_flags(ckpt
, CP_FASTBOOT_FLAG
);
1312 __clear_ckpt_flags(ckpt
, CP_FASTBOOT_FLAG
);
1315 __set_ckpt_flags(ckpt
, CP_ORPHAN_PRESENT_FLAG
);
1317 __clear_ckpt_flags(ckpt
, CP_ORPHAN_PRESENT_FLAG
);
1319 if (is_sbi_flag_set(sbi
, SBI_NEED_FSCK
))
1320 __set_ckpt_flags(ckpt
, CP_FSCK_FLAG
);
1322 if (is_sbi_flag_set(sbi
, SBI_IS_RESIZEFS
))
1323 __set_ckpt_flags(ckpt
, CP_RESIZEFS_FLAG
);
1325 __clear_ckpt_flags(ckpt
, CP_RESIZEFS_FLAG
);
1327 if (is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))
1328 __set_ckpt_flags(ckpt
, CP_DISABLED_FLAG
);
1330 __clear_ckpt_flags(ckpt
, CP_DISABLED_FLAG
);
1332 if (is_sbi_flag_set(sbi
, SBI_CP_DISABLED_QUICK
))
1333 __set_ckpt_flags(ckpt
, CP_DISABLED_QUICK_FLAG
);
1335 __clear_ckpt_flags(ckpt
, CP_DISABLED_QUICK_FLAG
);
1337 if (is_sbi_flag_set(sbi
, SBI_QUOTA_SKIP_FLUSH
))
1338 __set_ckpt_flags(ckpt
, CP_QUOTA_NEED_FSCK_FLAG
);
1340 __clear_ckpt_flags(ckpt
, CP_QUOTA_NEED_FSCK_FLAG
);
1342 if (is_sbi_flag_set(sbi
, SBI_QUOTA_NEED_REPAIR
))
1343 __set_ckpt_flags(ckpt
, CP_QUOTA_NEED_FSCK_FLAG
);
1345 /* set this flag to activate crc|cp_ver for recovery */
1346 __set_ckpt_flags(ckpt
, CP_CRC_RECOVERY_FLAG
);
1347 __clear_ckpt_flags(ckpt
, CP_NOCRC_RECOVERY_FLAG
);
1349 spin_unlock_irqrestore(&sbi
->cp_lock
, flags
);
1352 static void commit_checkpoint(struct f2fs_sb_info
*sbi
,
1353 void *src
, block_t blk_addr
)
1355 struct writeback_control wbc
= {
1360 * pagevec_lookup_tag and lock_page again will take
1361 * some extra time. Therefore, f2fs_update_meta_pages and
1362 * f2fs_sync_meta_pages are combined in this function.
1364 struct page
*page
= f2fs_grab_meta_page(sbi
, blk_addr
);
1367 f2fs_wait_on_page_writeback(page
, META
, true, true);
1369 memcpy(page_address(page
), src
, PAGE_SIZE
);
1371 set_page_dirty(page
);
1372 if (unlikely(!clear_page_dirty_for_io(page
)))
1373 f2fs_bug_on(sbi
, 1);
1375 /* writeout cp pack 2 page */
1376 err
= __f2fs_write_meta_page(page
, &wbc
, FS_CP_META_IO
);
1377 if (unlikely(err
&& f2fs_cp_error(sbi
))) {
1378 f2fs_put_page(page
, 1);
1382 f2fs_bug_on(sbi
, err
);
1383 f2fs_put_page(page
, 0);
1385 /* submit checkpoint (with barrier if NOBARRIER is not set) */
1386 f2fs_submit_merged_write(sbi
, META_FLUSH
);
1389 static inline u64
get_sectors_written(struct block_device
*bdev
)
1391 return (u64
)part_stat_read(bdev
, sectors
[STAT_WRITE
]);
1394 u64
f2fs_get_sectors_written(struct f2fs_sb_info
*sbi
)
1396 if (f2fs_is_multi_device(sbi
)) {
1400 for (i
= 0; i
< sbi
->s_ndevs
; i
++)
1401 sectors
+= get_sectors_written(FDEV(i
).bdev
);
1406 return get_sectors_written(sbi
->sb
->s_bdev
);
1409 static int do_checkpoint(struct f2fs_sb_info
*sbi
, struct cp_control
*cpc
)
1411 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1412 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1413 unsigned long orphan_num
= sbi
->im
[ORPHAN_INO
].ino_num
, flags
;
1415 unsigned int data_sum_blocks
, orphan_blocks
;
1418 int cp_payload_blks
= __cp_payload(sbi
);
1419 struct curseg_info
*seg_i
= CURSEG_I(sbi
, CURSEG_HOT_NODE
);
1423 /* Flush all the NAT/SIT pages */
1424 f2fs_sync_meta_pages(sbi
, META
, LONG_MAX
, FS_CP_META_IO
);
1426 /* start to update checkpoint, cp ver is already updated previously */
1427 ckpt
->elapsed_time
= cpu_to_le64(get_mtime(sbi
, true));
1428 ckpt
->free_segment_count
= cpu_to_le32(free_segments(sbi
));
1429 for (i
= 0; i
< NR_CURSEG_NODE_TYPE
; i
++) {
1430 ckpt
->cur_node_segno
[i
] =
1431 cpu_to_le32(curseg_segno(sbi
, i
+ CURSEG_HOT_NODE
));
1432 ckpt
->cur_node_blkoff
[i
] =
1433 cpu_to_le16(curseg_blkoff(sbi
, i
+ CURSEG_HOT_NODE
));
1434 ckpt
->alloc_type
[i
+ CURSEG_HOT_NODE
] =
1435 curseg_alloc_type(sbi
, i
+ CURSEG_HOT_NODE
);
1437 for (i
= 0; i
< NR_CURSEG_DATA_TYPE
; i
++) {
1438 ckpt
->cur_data_segno
[i
] =
1439 cpu_to_le32(curseg_segno(sbi
, i
+ CURSEG_HOT_DATA
));
1440 ckpt
->cur_data_blkoff
[i
] =
1441 cpu_to_le16(curseg_blkoff(sbi
, i
+ CURSEG_HOT_DATA
));
1442 ckpt
->alloc_type
[i
+ CURSEG_HOT_DATA
] =
1443 curseg_alloc_type(sbi
, i
+ CURSEG_HOT_DATA
);
1446 /* 2 cp + n data seg summary + orphan inode blocks */
1447 data_sum_blocks
= f2fs_npages_for_summary_flush(sbi
, false);
1448 spin_lock_irqsave(&sbi
->cp_lock
, flags
);
1449 if (data_sum_blocks
< NR_CURSEG_DATA_TYPE
)
1450 __set_ckpt_flags(ckpt
, CP_COMPACT_SUM_FLAG
);
1452 __clear_ckpt_flags(ckpt
, CP_COMPACT_SUM_FLAG
);
1453 spin_unlock_irqrestore(&sbi
->cp_lock
, flags
);
1455 orphan_blocks
= GET_ORPHAN_BLOCKS(orphan_num
);
1456 ckpt
->cp_pack_start_sum
= cpu_to_le32(1 + cp_payload_blks
+
1459 if (__remain_node_summaries(cpc
->reason
))
1460 ckpt
->cp_pack_total_block_count
= cpu_to_le32(F2FS_CP_PACKS
+
1461 cp_payload_blks
+ data_sum_blocks
+
1462 orphan_blocks
+ NR_CURSEG_NODE_TYPE
);
1464 ckpt
->cp_pack_total_block_count
= cpu_to_le32(F2FS_CP_PACKS
+
1465 cp_payload_blks
+ data_sum_blocks
+
1468 /* update ckpt flag for checkpoint */
1469 update_ckpt_flags(sbi
, cpc
);
1471 /* update SIT/NAT bitmap */
1472 get_sit_bitmap(sbi
, __bitmap_ptr(sbi
, SIT_BITMAP
));
1473 get_nat_bitmap(sbi
, __bitmap_ptr(sbi
, NAT_BITMAP
));
1475 crc32
= f2fs_checkpoint_chksum(sbi
, ckpt
);
1476 *((__le32
*)((unsigned char *)ckpt
+
1477 le32_to_cpu(ckpt
->checksum_offset
)))
1478 = cpu_to_le32(crc32
);
1480 start_blk
= __start_cp_next_addr(sbi
);
1482 /* write nat bits */
1483 if (enabled_nat_bits(sbi
, cpc
)) {
1484 __u64 cp_ver
= cur_cp_version(ckpt
);
1487 cp_ver
|= ((__u64
)crc32
<< 32);
1488 *(__le64
*)nm_i
->nat_bits
= cpu_to_le64(cp_ver
);
1490 blk
= start_blk
+ sbi
->blocks_per_seg
- nm_i
->nat_bits_blocks
;
1491 for (i
= 0; i
< nm_i
->nat_bits_blocks
; i
++)
1492 f2fs_update_meta_page(sbi
, nm_i
->nat_bits
+
1493 (i
<< F2FS_BLKSIZE_BITS
), blk
+ i
);
1496 /* write out checkpoint buffer at block 0 */
1497 f2fs_update_meta_page(sbi
, ckpt
, start_blk
++);
1499 for (i
= 1; i
< 1 + cp_payload_blks
; i
++)
1500 f2fs_update_meta_page(sbi
, (char *)ckpt
+ i
* F2FS_BLKSIZE
,
1504 write_orphan_inodes(sbi
, start_blk
);
1505 start_blk
+= orphan_blocks
;
1508 f2fs_write_data_summaries(sbi
, start_blk
);
1509 start_blk
+= data_sum_blocks
;
1511 /* Record write statistics in the hot node summary */
1512 kbytes_written
= sbi
->kbytes_written
;
1513 kbytes_written
+= (f2fs_get_sectors_written(sbi
) -
1514 sbi
->sectors_written_start
) >> 1;
1515 seg_i
->journal
->info
.kbytes_written
= cpu_to_le64(kbytes_written
);
1517 if (__remain_node_summaries(cpc
->reason
)) {
1518 f2fs_write_node_summaries(sbi
, start_blk
);
1519 start_blk
+= NR_CURSEG_NODE_TYPE
;
1522 /* update user_block_counts */
1523 sbi
->last_valid_block_count
= sbi
->total_valid_block_count
;
1524 percpu_counter_set(&sbi
->alloc_valid_block_count
, 0);
1526 /* Here, we have one bio having CP pack except cp pack 2 page */
1527 f2fs_sync_meta_pages(sbi
, META
, LONG_MAX
, FS_CP_META_IO
);
1528 /* Wait for all dirty meta pages to be submitted for IO */
1529 f2fs_wait_on_all_pages(sbi
, F2FS_DIRTY_META
);
1531 /* wait for previous submitted meta pages writeback */
1532 f2fs_wait_on_all_pages(sbi
, F2FS_WB_CP_DATA
);
1534 /* flush all device cache */
1535 err
= f2fs_flush_device_cache(sbi
);
1539 /* barrier and flush checkpoint cp pack 2 page if it can */
1540 commit_checkpoint(sbi
, ckpt
, start_blk
);
1541 f2fs_wait_on_all_pages(sbi
, F2FS_WB_CP_DATA
);
1544 * invalidate intermediate page cache borrowed from meta inode which are
1545 * used for migration of encrypted, verity or compressed inode's blocks.
1547 if (f2fs_sb_has_encrypt(sbi
) || f2fs_sb_has_verity(sbi
) ||
1548 f2fs_sb_has_compression(sbi
))
1549 invalidate_mapping_pages(META_MAPPING(sbi
),
1550 MAIN_BLKADDR(sbi
), MAX_BLKADDR(sbi
) - 1);
1552 f2fs_release_ino_entry(sbi
, false);
1554 f2fs_reset_fsync_node_info(sbi
);
1556 clear_sbi_flag(sbi
, SBI_IS_DIRTY
);
1557 clear_sbi_flag(sbi
, SBI_NEED_CP
);
1558 clear_sbi_flag(sbi
, SBI_QUOTA_SKIP_FLUSH
);
1560 spin_lock(&sbi
->stat_lock
);
1561 sbi
->unusable_block_count
= 0;
1562 spin_unlock(&sbi
->stat_lock
);
1564 __set_cp_next_pack(sbi
);
1567 * redirty superblock if metadata like node page or inode cache is
1568 * updated during writing checkpoint.
1570 if (get_pages(sbi
, F2FS_DIRTY_NODES
) ||
1571 get_pages(sbi
, F2FS_DIRTY_IMETA
))
1572 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
1574 f2fs_bug_on(sbi
, get_pages(sbi
, F2FS_DIRTY_DENTS
));
1576 return unlikely(f2fs_cp_error(sbi
)) ? -EIO
: 0;
1579 int f2fs_write_checkpoint(struct f2fs_sb_info
*sbi
, struct cp_control
*cpc
)
1581 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
1582 unsigned long long ckpt_ver
;
1585 if (f2fs_readonly(sbi
->sb
) || f2fs_hw_is_readonly(sbi
))
1588 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
1589 if (cpc
->reason
!= CP_PAUSE
)
1591 f2fs_warn(sbi
, "Start checkpoint disabled!");
1593 if (cpc
->reason
!= CP_RESIZE
)
1594 down_write(&sbi
->cp_global_sem
);
1596 if (!is_sbi_flag_set(sbi
, SBI_IS_DIRTY
) &&
1597 ((cpc
->reason
& CP_FASTBOOT
) || (cpc
->reason
& CP_SYNC
) ||
1598 ((cpc
->reason
& CP_DISCARD
) && !sbi
->discard_blks
)))
1600 if (unlikely(f2fs_cp_error(sbi
))) {
1605 trace_f2fs_write_checkpoint(sbi
->sb
, cpc
->reason
, "start block_ops");
1607 err
= block_operations(sbi
);
1611 trace_f2fs_write_checkpoint(sbi
->sb
, cpc
->reason
, "finish block_ops");
1613 f2fs_flush_merged_writes(sbi
);
1615 /* this is the case of multiple fstrims without any changes */
1616 if (cpc
->reason
& CP_DISCARD
) {
1617 if (!f2fs_exist_trim_candidates(sbi
, cpc
)) {
1618 unblock_operations(sbi
);
1622 if (NM_I(sbi
)->nat_cnt
[DIRTY_NAT
] == 0 &&
1623 SIT_I(sbi
)->dirty_sentries
== 0 &&
1624 prefree_segments(sbi
) == 0) {
1625 f2fs_flush_sit_entries(sbi
, cpc
);
1626 f2fs_clear_prefree_segments(sbi
, cpc
);
1627 unblock_operations(sbi
);
1633 * update checkpoint pack index
1634 * Increase the version number so that
1635 * SIT entries and seg summaries are written at correct place
1637 ckpt_ver
= cur_cp_version(ckpt
);
1638 ckpt
->checkpoint_ver
= cpu_to_le64(++ckpt_ver
);
1640 /* write cached NAT/SIT entries to NAT/SIT area */
1641 err
= f2fs_flush_nat_entries(sbi
, cpc
);
1643 f2fs_err(sbi
, "f2fs_flush_nat_entries failed err:%d, stop checkpoint", err
);
1644 f2fs_bug_on(sbi
, !f2fs_cp_error(sbi
));
1648 f2fs_flush_sit_entries(sbi
, cpc
);
1650 /* save inmem log status */
1651 f2fs_save_inmem_curseg(sbi
);
1653 err
= do_checkpoint(sbi
, cpc
);
1655 f2fs_err(sbi
, "do_checkpoint failed err:%d, stop checkpoint", err
);
1656 f2fs_bug_on(sbi
, !f2fs_cp_error(sbi
));
1657 f2fs_release_discard_addrs(sbi
);
1659 f2fs_clear_prefree_segments(sbi
, cpc
);
1662 f2fs_restore_inmem_curseg(sbi
);
1664 unblock_operations(sbi
);
1665 stat_inc_cp_count(sbi
->stat_info
);
1667 if (cpc
->reason
& CP_RECOVERY
)
1668 f2fs_notice(sbi
, "checkpoint: version = %llx", ckpt_ver
);
1670 /* update CP_TIME to trigger checkpoint periodically */
1671 f2fs_update_time(sbi
, CP_TIME
);
1672 trace_f2fs_write_checkpoint(sbi
->sb
, cpc
->reason
, "finish checkpoint");
1674 if (cpc
->reason
!= CP_RESIZE
)
1675 up_write(&sbi
->cp_global_sem
);
1679 void f2fs_init_ino_entry_info(struct f2fs_sb_info
*sbi
)
1683 for (i
= 0; i
< MAX_INO_ENTRY
; i
++) {
1684 struct inode_management
*im
= &sbi
->im
[i
];
1686 INIT_RADIX_TREE(&im
->ino_root
, GFP_ATOMIC
);
1687 spin_lock_init(&im
->ino_lock
);
1688 INIT_LIST_HEAD(&im
->ino_list
);
1692 sbi
->max_orphans
= (sbi
->blocks_per_seg
- F2FS_CP_PACKS
-
1693 NR_CURSEG_PERSIST_TYPE
- __cp_payload(sbi
)) *
1694 F2FS_ORPHANS_PER_BLOCK
;
1697 int __init
f2fs_create_checkpoint_caches(void)
1699 ino_entry_slab
= f2fs_kmem_cache_create("f2fs_ino_entry",
1700 sizeof(struct ino_entry
));
1701 if (!ino_entry_slab
)
1703 f2fs_inode_entry_slab
= f2fs_kmem_cache_create("f2fs_inode_entry",
1704 sizeof(struct inode_entry
));
1705 if (!f2fs_inode_entry_slab
) {
1706 kmem_cache_destroy(ino_entry_slab
);
1712 void f2fs_destroy_checkpoint_caches(void)
1714 kmem_cache_destroy(ino_entry_slab
);
1715 kmem_cache_destroy(f2fs_inode_entry_slab
);
1718 static int __write_checkpoint_sync(struct f2fs_sb_info
*sbi
)
1720 struct cp_control cpc
= { .reason
= CP_SYNC
, };
1723 down_write(&sbi
->gc_lock
);
1724 err
= f2fs_write_checkpoint(sbi
, &cpc
);
1725 up_write(&sbi
->gc_lock
);
1730 static void __checkpoint_and_complete_reqs(struct f2fs_sb_info
*sbi
)
1732 struct ckpt_req_control
*cprc
= &sbi
->cprc_info
;
1733 struct ckpt_req
*req
, *next
;
1734 struct llist_node
*dispatch_list
;
1735 u64 sum_diff
= 0, diff
, count
= 0;
1738 dispatch_list
= llist_del_all(&cprc
->issue_list
);
1741 dispatch_list
= llist_reverse_order(dispatch_list
);
1743 ret
= __write_checkpoint_sync(sbi
);
1744 atomic_inc(&cprc
->issued_ckpt
);
1746 llist_for_each_entry_safe(req
, next
, dispatch_list
, llnode
) {
1747 diff
= (u64
)ktime_ms_delta(ktime_get(), req
->queue_time
);
1749 complete(&req
->wait
);
1754 atomic_sub(count
, &cprc
->queued_ckpt
);
1755 atomic_add(count
, &cprc
->total_ckpt
);
1757 spin_lock(&cprc
->stat_lock
);
1758 cprc
->cur_time
= (unsigned int)div64_u64(sum_diff
, count
);
1759 if (cprc
->peak_time
< cprc
->cur_time
)
1760 cprc
->peak_time
= cprc
->cur_time
;
1761 spin_unlock(&cprc
->stat_lock
);
1764 static int issue_checkpoint_thread(void *data
)
1766 struct f2fs_sb_info
*sbi
= data
;
1767 struct ckpt_req_control
*cprc
= &sbi
->cprc_info
;
1768 wait_queue_head_t
*q
= &cprc
->ckpt_wait_queue
;
1770 if (kthread_should_stop())
1773 if (!llist_empty(&cprc
->issue_list
))
1774 __checkpoint_and_complete_reqs(sbi
);
1776 wait_event_interruptible(*q
,
1777 kthread_should_stop() || !llist_empty(&cprc
->issue_list
));
1781 static void flush_remained_ckpt_reqs(struct f2fs_sb_info
*sbi
,
1782 struct ckpt_req
*wait_req
)
1784 struct ckpt_req_control
*cprc
= &sbi
->cprc_info
;
1786 if (!llist_empty(&cprc
->issue_list
)) {
1787 __checkpoint_and_complete_reqs(sbi
);
1789 /* already dispatched by issue_checkpoint_thread */
1791 wait_for_completion(&wait_req
->wait
);
1795 static void init_ckpt_req(struct ckpt_req
*req
)
1797 memset(req
, 0, sizeof(struct ckpt_req
));
1799 init_completion(&req
->wait
);
1800 req
->queue_time
= ktime_get();
1803 int f2fs_issue_checkpoint(struct f2fs_sb_info
*sbi
)
1805 struct ckpt_req_control
*cprc
= &sbi
->cprc_info
;
1806 struct ckpt_req req
;
1807 struct cp_control cpc
;
1809 cpc
.reason
= __get_cp_reason(sbi
);
1810 if (!test_opt(sbi
, MERGE_CHECKPOINT
) || cpc
.reason
!= CP_SYNC
) {
1813 down_write(&sbi
->gc_lock
);
1814 ret
= f2fs_write_checkpoint(sbi
, &cpc
);
1815 up_write(&sbi
->gc_lock
);
1820 if (!cprc
->f2fs_issue_ckpt
)
1821 return __write_checkpoint_sync(sbi
);
1823 init_ckpt_req(&req
);
1825 llist_add(&req
.llnode
, &cprc
->issue_list
);
1826 atomic_inc(&cprc
->queued_ckpt
);
1829 * update issue_list before we wake up issue_checkpoint thread,
1830 * this smp_mb() pairs with another barrier in ___wait_event(),
1831 * see more details in comments of waitqueue_active().
1835 if (waitqueue_active(&cprc
->ckpt_wait_queue
))
1836 wake_up(&cprc
->ckpt_wait_queue
);
1838 if (cprc
->f2fs_issue_ckpt
)
1839 wait_for_completion(&req
.wait
);
1841 flush_remained_ckpt_reqs(sbi
, &req
);
1846 int f2fs_start_ckpt_thread(struct f2fs_sb_info
*sbi
)
1848 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
1849 struct ckpt_req_control
*cprc
= &sbi
->cprc_info
;
1851 if (cprc
->f2fs_issue_ckpt
)
1854 cprc
->f2fs_issue_ckpt
= kthread_run(issue_checkpoint_thread
, sbi
,
1855 "f2fs_ckpt-%u:%u", MAJOR(dev
), MINOR(dev
));
1856 if (IS_ERR(cprc
->f2fs_issue_ckpt
)) {
1857 cprc
->f2fs_issue_ckpt
= NULL
;
1861 set_task_ioprio(cprc
->f2fs_issue_ckpt
, cprc
->ckpt_thread_ioprio
);
1866 void f2fs_stop_ckpt_thread(struct f2fs_sb_info
*sbi
)
1868 struct ckpt_req_control
*cprc
= &sbi
->cprc_info
;
1870 if (cprc
->f2fs_issue_ckpt
) {
1871 struct task_struct
*ckpt_task
= cprc
->f2fs_issue_ckpt
;
1873 cprc
->f2fs_issue_ckpt
= NULL
;
1874 kthread_stop(ckpt_task
);
1876 flush_remained_ckpt_reqs(sbi
, NULL
);
1880 void f2fs_init_ckpt_req_control(struct f2fs_sb_info
*sbi
)
1882 struct ckpt_req_control
*cprc
= &sbi
->cprc_info
;
1884 atomic_set(&cprc
->issued_ckpt
, 0);
1885 atomic_set(&cprc
->total_ckpt
, 0);
1886 atomic_set(&cprc
->queued_ckpt
, 0);
1887 cprc
->ckpt_thread_ioprio
= DEFAULT_CHECKPOINT_IOPRIO
;
1888 init_waitqueue_head(&cprc
->ckpt_wait_queue
);
1889 init_llist_head(&cprc
->issue_list
);
1890 spin_lock_init(&cprc
->stat_lock
);