1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/bio.h>
11 #include <linux/blkdev.h>
12 #include <linux/prefetch.h>
13 #include <linux/kthread.h>
14 #include <linux/swap.h>
15 #include <linux/timer.h>
16 #include <linux/freezer.h>
17 #include <linux/sched/signal.h>
23 #include <trace/events/f2fs.h>
25 #define __reverse_ffz(x) __reverse_ffs(~(x))
27 static struct kmem_cache
*discard_entry_slab
;
28 static struct kmem_cache
*discard_cmd_slab
;
29 static struct kmem_cache
*sit_entry_set_slab
;
30 static struct kmem_cache
*inmem_entry_slab
;
32 static unsigned long __reverse_ulong(unsigned char *str
)
34 unsigned long tmp
= 0;
35 int shift
= 24, idx
= 0;
37 #if BITS_PER_LONG == 64
41 tmp
|= (unsigned long)str
[idx
++] << shift
;
42 shift
-= BITS_PER_BYTE
;
48 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
49 * MSB and LSB are reversed in a byte by f2fs_set_bit.
51 static inline unsigned long __reverse_ffs(unsigned long word
)
55 #if BITS_PER_LONG == 64
56 if ((word
& 0xffffffff00000000UL
) == 0)
61 if ((word
& 0xffff0000) == 0)
66 if ((word
& 0xff00) == 0)
71 if ((word
& 0xf0) == 0)
76 if ((word
& 0xc) == 0)
81 if ((word
& 0x2) == 0)
87 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
88 * f2fs_set_bit makes MSB and LSB reversed in a byte.
89 * @size must be integral times of unsigned long.
92 * f2fs_set_bit(0, bitmap) => 1000 0000
93 * f2fs_set_bit(7, bitmap) => 0000 0001
95 static unsigned long __find_rev_next_bit(const unsigned long *addr
,
96 unsigned long size
, unsigned long offset
)
98 const unsigned long *p
= addr
+ BIT_WORD(offset
);
99 unsigned long result
= size
;
105 size
-= (offset
& ~(BITS_PER_LONG
- 1));
106 offset
%= BITS_PER_LONG
;
112 tmp
= __reverse_ulong((unsigned char *)p
);
114 tmp
&= ~0UL >> offset
;
115 if (size
< BITS_PER_LONG
)
116 tmp
&= (~0UL << (BITS_PER_LONG
- size
));
120 if (size
<= BITS_PER_LONG
)
122 size
-= BITS_PER_LONG
;
128 return result
- size
+ __reverse_ffs(tmp
);
131 static unsigned long __find_rev_next_zero_bit(const unsigned long *addr
,
132 unsigned long size
, unsigned long offset
)
134 const unsigned long *p
= addr
+ BIT_WORD(offset
);
135 unsigned long result
= size
;
141 size
-= (offset
& ~(BITS_PER_LONG
- 1));
142 offset
%= BITS_PER_LONG
;
148 tmp
= __reverse_ulong((unsigned char *)p
);
151 tmp
|= ~0UL << (BITS_PER_LONG
- offset
);
152 if (size
< BITS_PER_LONG
)
157 if (size
<= BITS_PER_LONG
)
159 size
-= BITS_PER_LONG
;
165 return result
- size
+ __reverse_ffz(tmp
);
168 bool f2fs_need_SSR(struct f2fs_sb_info
*sbi
)
170 int node_secs
= get_blocktype_secs(sbi
, F2FS_DIRTY_NODES
);
171 int dent_secs
= get_blocktype_secs(sbi
, F2FS_DIRTY_DENTS
);
172 int imeta_secs
= get_blocktype_secs(sbi
, F2FS_DIRTY_IMETA
);
174 if (f2fs_lfs_mode(sbi
))
176 if (sbi
->gc_mode
== GC_URGENT_HIGH
)
178 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
181 return free_sections(sbi
) <= (node_secs
+ 2 * dent_secs
+ imeta_secs
+
182 SM_I(sbi
)->min_ssr_sections
+ reserved_sections(sbi
));
185 void f2fs_register_inmem_page(struct inode
*inode
, struct page
*page
)
187 struct inmem_pages
*new;
189 f2fs_set_page_private(page
, ATOMIC_WRITTEN_PAGE
);
191 new = f2fs_kmem_cache_alloc(inmem_entry_slab
, GFP_NOFS
);
193 /* add atomic page indices to the list */
195 INIT_LIST_HEAD(&new->list
);
197 /* increase reference count with clean state */
199 mutex_lock(&F2FS_I(inode
)->inmem_lock
);
200 list_add_tail(&new->list
, &F2FS_I(inode
)->inmem_pages
);
201 inc_page_count(F2FS_I_SB(inode
), F2FS_INMEM_PAGES
);
202 mutex_unlock(&F2FS_I(inode
)->inmem_lock
);
204 trace_f2fs_register_inmem_page(page
, INMEM
);
207 static int __revoke_inmem_pages(struct inode
*inode
,
208 struct list_head
*head
, bool drop
, bool recover
,
211 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
212 struct inmem_pages
*cur
, *tmp
;
215 list_for_each_entry_safe(cur
, tmp
, head
, list
) {
216 struct page
*page
= cur
->page
;
219 trace_f2fs_commit_inmem_page(page
, INMEM_DROP
);
223 * to avoid deadlock in between page lock and
226 if (!trylock_page(page
))
232 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
235 struct dnode_of_data dn
;
238 trace_f2fs_commit_inmem_page(page
, INMEM_REVOKE
);
240 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
241 err
= f2fs_get_dnode_of_data(&dn
, page
->index
,
244 if (err
== -ENOMEM
) {
245 congestion_wait(BLK_RW_ASYNC
,
254 err
= f2fs_get_node_info(sbi
, dn
.nid
, &ni
);
260 if (cur
->old_addr
== NEW_ADDR
) {
261 f2fs_invalidate_blocks(sbi
, dn
.data_blkaddr
);
262 f2fs_update_data_blkaddr(&dn
, NEW_ADDR
);
264 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
,
265 cur
->old_addr
, ni
.version
, true, true);
269 /* we don't need to invalidate this in the sccessful status */
270 if (drop
|| recover
) {
271 ClearPageUptodate(page
);
272 clear_cold_data(page
);
274 f2fs_clear_page_private(page
);
275 f2fs_put_page(page
, 1);
277 list_del(&cur
->list
);
278 kmem_cache_free(inmem_entry_slab
, cur
);
279 dec_page_count(F2FS_I_SB(inode
), F2FS_INMEM_PAGES
);
284 void f2fs_drop_inmem_pages_all(struct f2fs_sb_info
*sbi
, bool gc_failure
)
286 struct list_head
*head
= &sbi
->inode_list
[ATOMIC_FILE
];
288 struct f2fs_inode_info
*fi
;
289 unsigned int count
= sbi
->atomic_files
;
290 unsigned int looped
= 0;
292 spin_lock(&sbi
->inode_lock
[ATOMIC_FILE
]);
293 if (list_empty(head
)) {
294 spin_unlock(&sbi
->inode_lock
[ATOMIC_FILE
]);
297 fi
= list_first_entry(head
, struct f2fs_inode_info
, inmem_ilist
);
298 inode
= igrab(&fi
->vfs_inode
);
300 list_move_tail(&fi
->inmem_ilist
, head
);
301 spin_unlock(&sbi
->inode_lock
[ATOMIC_FILE
]);
305 if (!fi
->i_gc_failures
[GC_FAILURE_ATOMIC
])
308 set_inode_flag(inode
, FI_ATOMIC_REVOKE_REQUEST
);
309 f2fs_drop_inmem_pages(inode
);
313 congestion_wait(BLK_RW_ASYNC
, DEFAULT_IO_TIMEOUT
);
316 if (++looped
>= count
)
322 void f2fs_drop_inmem_pages(struct inode
*inode
)
324 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
325 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
327 while (!list_empty(&fi
->inmem_pages
)) {
328 mutex_lock(&fi
->inmem_lock
);
329 __revoke_inmem_pages(inode
, &fi
->inmem_pages
,
331 mutex_unlock(&fi
->inmem_lock
);
334 fi
->i_gc_failures
[GC_FAILURE_ATOMIC
] = 0;
336 spin_lock(&sbi
->inode_lock
[ATOMIC_FILE
]);
337 if (!list_empty(&fi
->inmem_ilist
))
338 list_del_init(&fi
->inmem_ilist
);
339 if (f2fs_is_atomic_file(inode
)) {
340 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
343 spin_unlock(&sbi
->inode_lock
[ATOMIC_FILE
]);
346 void f2fs_drop_inmem_page(struct inode
*inode
, struct page
*page
)
348 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
349 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
350 struct list_head
*head
= &fi
->inmem_pages
;
351 struct inmem_pages
*cur
= NULL
;
353 f2fs_bug_on(sbi
, !IS_ATOMIC_WRITTEN_PAGE(page
));
355 mutex_lock(&fi
->inmem_lock
);
356 list_for_each_entry(cur
, head
, list
) {
357 if (cur
->page
== page
)
361 f2fs_bug_on(sbi
, list_empty(head
) || cur
->page
!= page
);
362 list_del(&cur
->list
);
363 mutex_unlock(&fi
->inmem_lock
);
365 dec_page_count(sbi
, F2FS_INMEM_PAGES
);
366 kmem_cache_free(inmem_entry_slab
, cur
);
368 ClearPageUptodate(page
);
369 f2fs_clear_page_private(page
);
370 f2fs_put_page(page
, 0);
372 trace_f2fs_commit_inmem_page(page
, INMEM_INVALIDATE
);
375 static int __f2fs_commit_inmem_pages(struct inode
*inode
)
377 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
378 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
379 struct inmem_pages
*cur
, *tmp
;
380 struct f2fs_io_info fio
= {
385 .op_flags
= REQ_SYNC
| REQ_PRIO
,
386 .io_type
= FS_DATA_IO
,
388 struct list_head revoke_list
;
389 bool submit_bio
= false;
392 INIT_LIST_HEAD(&revoke_list
);
394 list_for_each_entry_safe(cur
, tmp
, &fi
->inmem_pages
, list
) {
395 struct page
*page
= cur
->page
;
398 if (page
->mapping
== inode
->i_mapping
) {
399 trace_f2fs_commit_inmem_page(page
, INMEM
);
401 f2fs_wait_on_page_writeback(page
, DATA
, true, true);
403 set_page_dirty(page
);
404 if (clear_page_dirty_for_io(page
)) {
405 inode_dec_dirty_pages(inode
);
406 f2fs_remove_dirty_inode(inode
);
410 fio
.old_blkaddr
= NULL_ADDR
;
411 fio
.encrypted_page
= NULL
;
412 fio
.need_lock
= LOCK_DONE
;
413 err
= f2fs_do_write_data_page(&fio
);
415 if (err
== -ENOMEM
) {
416 congestion_wait(BLK_RW_ASYNC
,
424 /* record old blkaddr for revoking */
425 cur
->old_addr
= fio
.old_blkaddr
;
429 list_move_tail(&cur
->list
, &revoke_list
);
433 f2fs_submit_merged_write_cond(sbi
, inode
, NULL
, 0, DATA
);
437 * try to revoke all committed pages, but still we could fail
438 * due to no memory or other reason, if that happened, EAGAIN
439 * will be returned, which means in such case, transaction is
440 * already not integrity, caller should use journal to do the
441 * recovery or rewrite & commit last transaction. For other
442 * error number, revoking was done by filesystem itself.
444 err
= __revoke_inmem_pages(inode
, &revoke_list
,
447 /* drop all uncommitted pages */
448 __revoke_inmem_pages(inode
, &fi
->inmem_pages
,
451 __revoke_inmem_pages(inode
, &revoke_list
,
452 false, false, false);
458 int f2fs_commit_inmem_pages(struct inode
*inode
)
460 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
461 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
464 f2fs_balance_fs(sbi
, true);
466 down_write(&fi
->i_gc_rwsem
[WRITE
]);
469 set_inode_flag(inode
, FI_ATOMIC_COMMIT
);
471 mutex_lock(&fi
->inmem_lock
);
472 err
= __f2fs_commit_inmem_pages(inode
);
473 mutex_unlock(&fi
->inmem_lock
);
475 clear_inode_flag(inode
, FI_ATOMIC_COMMIT
);
478 up_write(&fi
->i_gc_rwsem
[WRITE
]);
484 * This function balances dirty node and dentry pages.
485 * In addition, it controls garbage collection.
487 void f2fs_balance_fs(struct f2fs_sb_info
*sbi
, bool need
)
489 if (time_to_inject(sbi
, FAULT_CHECKPOINT
)) {
490 f2fs_show_injection_info(sbi
, FAULT_CHECKPOINT
);
491 f2fs_stop_checkpoint(sbi
, false);
494 /* balance_fs_bg is able to be pending */
495 if (need
&& excess_cached_nats(sbi
))
496 f2fs_balance_fs_bg(sbi
, false);
498 if (!f2fs_is_checkpoint_ready(sbi
))
502 * We should do GC or end up with checkpoint, if there are so many dirty
503 * dir/node pages without enough free segments.
505 if (has_not_enough_free_secs(sbi
, 0, 0)) {
506 down_write(&sbi
->gc_lock
);
507 f2fs_gc(sbi
, false, false, NULL_SEGNO
);
511 void f2fs_balance_fs_bg(struct f2fs_sb_info
*sbi
, bool from_bg
)
513 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
516 /* try to shrink extent cache when there is no enough memory */
517 if (!f2fs_available_free_memory(sbi
, EXTENT_CACHE
))
518 f2fs_shrink_extent_tree(sbi
, EXTENT_CACHE_SHRINK_NUMBER
);
520 /* check the # of cached NAT entries */
521 if (!f2fs_available_free_memory(sbi
, NAT_ENTRIES
))
522 f2fs_try_to_free_nats(sbi
, NAT_ENTRY_PER_BLOCK
);
524 if (!f2fs_available_free_memory(sbi
, FREE_NIDS
))
525 f2fs_try_to_free_nids(sbi
, MAX_FREE_NIDS
);
527 f2fs_build_free_nids(sbi
, false, false);
529 if (excess_dirty_nats(sbi
) || excess_dirty_nodes(sbi
) ||
530 excess_prefree_segs(sbi
))
533 /* there is background inflight IO or foreground operation recently */
534 if (is_inflight_io(sbi
, REQ_TIME
) ||
535 (!f2fs_time_over(sbi
, REQ_TIME
) && rwsem_is_locked(&sbi
->cp_rwsem
)))
538 /* exceed periodical checkpoint timeout threshold */
539 if (f2fs_time_over(sbi
, CP_TIME
))
542 /* checkpoint is the only way to shrink partial cached entries */
543 if (f2fs_available_free_memory(sbi
, NAT_ENTRIES
) ||
544 f2fs_available_free_memory(sbi
, INO_ENTRIES
))
548 if (test_opt(sbi
, DATA_FLUSH
) && from_bg
) {
549 struct blk_plug plug
;
551 mutex_lock(&sbi
->flush_lock
);
553 blk_start_plug(&plug
);
554 f2fs_sync_dirty_inodes(sbi
, FILE_INODE
);
555 blk_finish_plug(&plug
);
557 mutex_unlock(&sbi
->flush_lock
);
559 f2fs_sync_fs(sbi
->sb
, true);
560 stat_inc_bg_cp_count(sbi
->stat_info
);
563 static int __submit_flush_wait(struct f2fs_sb_info
*sbi
,
564 struct block_device
*bdev
)
566 int ret
= blkdev_issue_flush(bdev
);
568 trace_f2fs_issue_flush(bdev
, test_opt(sbi
, NOBARRIER
),
569 test_opt(sbi
, FLUSH_MERGE
), ret
);
573 static int submit_flush_wait(struct f2fs_sb_info
*sbi
, nid_t ino
)
578 if (!f2fs_is_multi_device(sbi
))
579 return __submit_flush_wait(sbi
, sbi
->sb
->s_bdev
);
581 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
582 if (!f2fs_is_dirty_device(sbi
, ino
, i
, FLUSH_INO
))
584 ret
= __submit_flush_wait(sbi
, FDEV(i
).bdev
);
591 static int issue_flush_thread(void *data
)
593 struct f2fs_sb_info
*sbi
= data
;
594 struct flush_cmd_control
*fcc
= SM_I(sbi
)->fcc_info
;
595 wait_queue_head_t
*q
= &fcc
->flush_wait_queue
;
597 if (kthread_should_stop())
600 if (!llist_empty(&fcc
->issue_list
)) {
601 struct flush_cmd
*cmd
, *next
;
604 fcc
->dispatch_list
= llist_del_all(&fcc
->issue_list
);
605 fcc
->dispatch_list
= llist_reverse_order(fcc
->dispatch_list
);
607 cmd
= llist_entry(fcc
->dispatch_list
, struct flush_cmd
, llnode
);
609 ret
= submit_flush_wait(sbi
, cmd
->ino
);
610 atomic_inc(&fcc
->issued_flush
);
612 llist_for_each_entry_safe(cmd
, next
,
613 fcc
->dispatch_list
, llnode
) {
615 complete(&cmd
->wait
);
617 fcc
->dispatch_list
= NULL
;
620 wait_event_interruptible(*q
,
621 kthread_should_stop() || !llist_empty(&fcc
->issue_list
));
625 int f2fs_issue_flush(struct f2fs_sb_info
*sbi
, nid_t ino
)
627 struct flush_cmd_control
*fcc
= SM_I(sbi
)->fcc_info
;
628 struct flush_cmd cmd
;
631 if (test_opt(sbi
, NOBARRIER
))
634 if (!test_opt(sbi
, FLUSH_MERGE
)) {
635 atomic_inc(&fcc
->queued_flush
);
636 ret
= submit_flush_wait(sbi
, ino
);
637 atomic_dec(&fcc
->queued_flush
);
638 atomic_inc(&fcc
->issued_flush
);
642 if (atomic_inc_return(&fcc
->queued_flush
) == 1 ||
643 f2fs_is_multi_device(sbi
)) {
644 ret
= submit_flush_wait(sbi
, ino
);
645 atomic_dec(&fcc
->queued_flush
);
647 atomic_inc(&fcc
->issued_flush
);
652 init_completion(&cmd
.wait
);
654 llist_add(&cmd
.llnode
, &fcc
->issue_list
);
656 /* update issue_list before we wake up issue_flush thread */
659 if (waitqueue_active(&fcc
->flush_wait_queue
))
660 wake_up(&fcc
->flush_wait_queue
);
662 if (fcc
->f2fs_issue_flush
) {
663 wait_for_completion(&cmd
.wait
);
664 atomic_dec(&fcc
->queued_flush
);
666 struct llist_node
*list
;
668 list
= llist_del_all(&fcc
->issue_list
);
670 wait_for_completion(&cmd
.wait
);
671 atomic_dec(&fcc
->queued_flush
);
673 struct flush_cmd
*tmp
, *next
;
675 ret
= submit_flush_wait(sbi
, ino
);
677 llist_for_each_entry_safe(tmp
, next
, list
, llnode
) {
680 atomic_dec(&fcc
->queued_flush
);
684 complete(&tmp
->wait
);
692 int f2fs_create_flush_cmd_control(struct f2fs_sb_info
*sbi
)
694 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
695 struct flush_cmd_control
*fcc
;
698 if (SM_I(sbi
)->fcc_info
) {
699 fcc
= SM_I(sbi
)->fcc_info
;
700 if (fcc
->f2fs_issue_flush
)
705 fcc
= f2fs_kzalloc(sbi
, sizeof(struct flush_cmd_control
), GFP_KERNEL
);
708 atomic_set(&fcc
->issued_flush
, 0);
709 atomic_set(&fcc
->queued_flush
, 0);
710 init_waitqueue_head(&fcc
->flush_wait_queue
);
711 init_llist_head(&fcc
->issue_list
);
712 SM_I(sbi
)->fcc_info
= fcc
;
713 if (!test_opt(sbi
, FLUSH_MERGE
))
717 fcc
->f2fs_issue_flush
= kthread_run(issue_flush_thread
, sbi
,
718 "f2fs_flush-%u:%u", MAJOR(dev
), MINOR(dev
));
719 if (IS_ERR(fcc
->f2fs_issue_flush
)) {
720 err
= PTR_ERR(fcc
->f2fs_issue_flush
);
722 SM_I(sbi
)->fcc_info
= NULL
;
729 void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info
*sbi
, bool free
)
731 struct flush_cmd_control
*fcc
= SM_I(sbi
)->fcc_info
;
733 if (fcc
&& fcc
->f2fs_issue_flush
) {
734 struct task_struct
*flush_thread
= fcc
->f2fs_issue_flush
;
736 fcc
->f2fs_issue_flush
= NULL
;
737 kthread_stop(flush_thread
);
741 SM_I(sbi
)->fcc_info
= NULL
;
745 int f2fs_flush_device_cache(struct f2fs_sb_info
*sbi
)
749 if (!f2fs_is_multi_device(sbi
))
752 if (test_opt(sbi
, NOBARRIER
))
755 for (i
= 1; i
< sbi
->s_ndevs
; i
++) {
756 if (!f2fs_test_bit(i
, (char *)&sbi
->dirty_device
))
758 ret
= __submit_flush_wait(sbi
, FDEV(i
).bdev
);
762 spin_lock(&sbi
->dev_lock
);
763 f2fs_clear_bit(i
, (char *)&sbi
->dirty_device
);
764 spin_unlock(&sbi
->dev_lock
);
770 static void __locate_dirty_segment(struct f2fs_sb_info
*sbi
, unsigned int segno
,
771 enum dirty_type dirty_type
)
773 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
775 /* need not be added */
776 if (IS_CURSEG(sbi
, segno
))
779 if (!test_and_set_bit(segno
, dirty_i
->dirty_segmap
[dirty_type
]))
780 dirty_i
->nr_dirty
[dirty_type
]++;
782 if (dirty_type
== DIRTY
) {
783 struct seg_entry
*sentry
= get_seg_entry(sbi
, segno
);
784 enum dirty_type t
= sentry
->type
;
786 if (unlikely(t
>= DIRTY
)) {
790 if (!test_and_set_bit(segno
, dirty_i
->dirty_segmap
[t
]))
791 dirty_i
->nr_dirty
[t
]++;
793 if (__is_large_section(sbi
)) {
794 unsigned int secno
= GET_SEC_FROM_SEG(sbi
, segno
);
795 block_t valid_blocks
=
796 get_valid_blocks(sbi
, segno
, true);
798 f2fs_bug_on(sbi
, unlikely(!valid_blocks
||
799 valid_blocks
== BLKS_PER_SEC(sbi
)));
801 if (!IS_CURSEC(sbi
, secno
))
802 set_bit(secno
, dirty_i
->dirty_secmap
);
807 static void __remove_dirty_segment(struct f2fs_sb_info
*sbi
, unsigned int segno
,
808 enum dirty_type dirty_type
)
810 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
811 block_t valid_blocks
;
813 if (test_and_clear_bit(segno
, dirty_i
->dirty_segmap
[dirty_type
]))
814 dirty_i
->nr_dirty
[dirty_type
]--;
816 if (dirty_type
== DIRTY
) {
817 struct seg_entry
*sentry
= get_seg_entry(sbi
, segno
);
818 enum dirty_type t
= sentry
->type
;
820 if (test_and_clear_bit(segno
, dirty_i
->dirty_segmap
[t
]))
821 dirty_i
->nr_dirty
[t
]--;
823 valid_blocks
= get_valid_blocks(sbi
, segno
, true);
824 if (valid_blocks
== 0) {
825 clear_bit(GET_SEC_FROM_SEG(sbi
, segno
),
826 dirty_i
->victim_secmap
);
827 #ifdef CONFIG_F2FS_CHECK_FS
828 clear_bit(segno
, SIT_I(sbi
)->invalid_segmap
);
831 if (__is_large_section(sbi
)) {
832 unsigned int secno
= GET_SEC_FROM_SEG(sbi
, segno
);
835 valid_blocks
== BLKS_PER_SEC(sbi
)) {
836 clear_bit(secno
, dirty_i
->dirty_secmap
);
840 if (!IS_CURSEC(sbi
, secno
))
841 set_bit(secno
, dirty_i
->dirty_secmap
);
847 * Should not occur error such as -ENOMEM.
848 * Adding dirty entry into seglist is not critical operation.
849 * If a given segment is one of current working segments, it won't be added.
851 static void locate_dirty_segment(struct f2fs_sb_info
*sbi
, unsigned int segno
)
853 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
854 unsigned short valid_blocks
, ckpt_valid_blocks
;
855 unsigned int usable_blocks
;
857 if (segno
== NULL_SEGNO
|| IS_CURSEG(sbi
, segno
))
860 usable_blocks
= f2fs_usable_blks_in_seg(sbi
, segno
);
861 mutex_lock(&dirty_i
->seglist_lock
);
863 valid_blocks
= get_valid_blocks(sbi
, segno
, false);
864 ckpt_valid_blocks
= get_ckpt_valid_blocks(sbi
, segno
);
866 if (valid_blocks
== 0 && (!is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) ||
867 ckpt_valid_blocks
== usable_blocks
)) {
868 __locate_dirty_segment(sbi
, segno
, PRE
);
869 __remove_dirty_segment(sbi
, segno
, DIRTY
);
870 } else if (valid_blocks
< usable_blocks
) {
871 __locate_dirty_segment(sbi
, segno
, DIRTY
);
873 /* Recovery routine with SSR needs this */
874 __remove_dirty_segment(sbi
, segno
, DIRTY
);
877 mutex_unlock(&dirty_i
->seglist_lock
);
880 /* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
881 void f2fs_dirty_to_prefree(struct f2fs_sb_info
*sbi
)
883 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
886 mutex_lock(&dirty_i
->seglist_lock
);
887 for_each_set_bit(segno
, dirty_i
->dirty_segmap
[DIRTY
], MAIN_SEGS(sbi
)) {
888 if (get_valid_blocks(sbi
, segno
, false))
890 if (IS_CURSEG(sbi
, segno
))
892 __locate_dirty_segment(sbi
, segno
, PRE
);
893 __remove_dirty_segment(sbi
, segno
, DIRTY
);
895 mutex_unlock(&dirty_i
->seglist_lock
);
898 block_t
f2fs_get_unusable_blocks(struct f2fs_sb_info
*sbi
)
901 (overprovision_segments(sbi
) - reserved_segments(sbi
));
902 block_t ovp_holes
= ovp_hole_segs
<< sbi
->log_blocks_per_seg
;
903 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
904 block_t holes
[2] = {0, 0}; /* DATA and NODE */
906 struct seg_entry
*se
;
909 mutex_lock(&dirty_i
->seglist_lock
);
910 for_each_set_bit(segno
, dirty_i
->dirty_segmap
[DIRTY
], MAIN_SEGS(sbi
)) {
911 se
= get_seg_entry(sbi
, segno
);
912 if (IS_NODESEG(se
->type
))
913 holes
[NODE
] += f2fs_usable_blks_in_seg(sbi
, segno
) -
916 holes
[DATA
] += f2fs_usable_blks_in_seg(sbi
, segno
) -
919 mutex_unlock(&dirty_i
->seglist_lock
);
921 unusable
= holes
[DATA
] > holes
[NODE
] ? holes
[DATA
] : holes
[NODE
];
922 if (unusable
> ovp_holes
)
923 return unusable
- ovp_holes
;
927 int f2fs_disable_cp_again(struct f2fs_sb_info
*sbi
, block_t unusable
)
930 (overprovision_segments(sbi
) - reserved_segments(sbi
));
931 if (unusable
> F2FS_OPTION(sbi
).unusable_cap
)
933 if (is_sbi_flag_set(sbi
, SBI_CP_DISABLED_QUICK
) &&
934 dirty_segments(sbi
) > ovp_hole_segs
)
939 /* This is only used by SBI_CP_DISABLED */
940 static unsigned int get_free_segment(struct f2fs_sb_info
*sbi
)
942 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
943 unsigned int segno
= 0;
945 mutex_lock(&dirty_i
->seglist_lock
);
946 for_each_set_bit(segno
, dirty_i
->dirty_segmap
[DIRTY
], MAIN_SEGS(sbi
)) {
947 if (get_valid_blocks(sbi
, segno
, false))
949 if (get_ckpt_valid_blocks(sbi
, segno
))
951 mutex_unlock(&dirty_i
->seglist_lock
);
954 mutex_unlock(&dirty_i
->seglist_lock
);
958 static struct discard_cmd
*__create_discard_cmd(struct f2fs_sb_info
*sbi
,
959 struct block_device
*bdev
, block_t lstart
,
960 block_t start
, block_t len
)
962 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
963 struct list_head
*pend_list
;
964 struct discard_cmd
*dc
;
966 f2fs_bug_on(sbi
, !len
);
968 pend_list
= &dcc
->pend_list
[plist_idx(len
)];
970 dc
= f2fs_kmem_cache_alloc(discard_cmd_slab
, GFP_NOFS
);
971 INIT_LIST_HEAD(&dc
->list
);
980 init_completion(&dc
->wait
);
981 list_add_tail(&dc
->list
, pend_list
);
982 spin_lock_init(&dc
->lock
);
984 atomic_inc(&dcc
->discard_cmd_cnt
);
985 dcc
->undiscard_blks
+= len
;
990 static struct discard_cmd
*__attach_discard_cmd(struct f2fs_sb_info
*sbi
,
991 struct block_device
*bdev
, block_t lstart
,
992 block_t start
, block_t len
,
993 struct rb_node
*parent
, struct rb_node
**p
,
996 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
997 struct discard_cmd
*dc
;
999 dc
= __create_discard_cmd(sbi
, bdev
, lstart
, start
, len
);
1001 rb_link_node(&dc
->rb_node
, parent
, p
);
1002 rb_insert_color_cached(&dc
->rb_node
, &dcc
->root
, leftmost
);
1007 static void __detach_discard_cmd(struct discard_cmd_control
*dcc
,
1008 struct discard_cmd
*dc
)
1010 if (dc
->state
== D_DONE
)
1011 atomic_sub(dc
->queued
, &dcc
->queued_discard
);
1013 list_del(&dc
->list
);
1014 rb_erase_cached(&dc
->rb_node
, &dcc
->root
);
1015 dcc
->undiscard_blks
-= dc
->len
;
1017 kmem_cache_free(discard_cmd_slab
, dc
);
1019 atomic_dec(&dcc
->discard_cmd_cnt
);
1022 static void __remove_discard_cmd(struct f2fs_sb_info
*sbi
,
1023 struct discard_cmd
*dc
)
1025 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1026 unsigned long flags
;
1028 trace_f2fs_remove_discard(dc
->bdev
, dc
->start
, dc
->len
);
1030 spin_lock_irqsave(&dc
->lock
, flags
);
1032 spin_unlock_irqrestore(&dc
->lock
, flags
);
1035 spin_unlock_irqrestore(&dc
->lock
, flags
);
1037 f2fs_bug_on(sbi
, dc
->ref
);
1039 if (dc
->error
== -EOPNOTSUPP
)
1044 "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1045 KERN_INFO
, sbi
->sb
->s_id
,
1046 dc
->lstart
, dc
->start
, dc
->len
, dc
->error
);
1047 __detach_discard_cmd(dcc
, dc
);
1050 static void f2fs_submit_discard_endio(struct bio
*bio
)
1052 struct discard_cmd
*dc
= (struct discard_cmd
*)bio
->bi_private
;
1053 unsigned long flags
;
1055 spin_lock_irqsave(&dc
->lock
, flags
);
1057 dc
->error
= blk_status_to_errno(bio
->bi_status
);
1059 if (!dc
->bio_ref
&& dc
->state
== D_SUBMIT
) {
1061 complete_all(&dc
->wait
);
1063 spin_unlock_irqrestore(&dc
->lock
, flags
);
1067 static void __check_sit_bitmap(struct f2fs_sb_info
*sbi
,
1068 block_t start
, block_t end
)
1070 #ifdef CONFIG_F2FS_CHECK_FS
1071 struct seg_entry
*sentry
;
1073 block_t blk
= start
;
1074 unsigned long offset
, size
, max_blocks
= sbi
->blocks_per_seg
;
1078 segno
= GET_SEGNO(sbi
, blk
);
1079 sentry
= get_seg_entry(sbi
, segno
);
1080 offset
= GET_BLKOFF_FROM_SEG0(sbi
, blk
);
1082 if (end
< START_BLOCK(sbi
, segno
+ 1))
1083 size
= GET_BLKOFF_FROM_SEG0(sbi
, end
);
1086 map
= (unsigned long *)(sentry
->cur_valid_map
);
1087 offset
= __find_rev_next_bit(map
, size
, offset
);
1088 f2fs_bug_on(sbi
, offset
!= size
);
1089 blk
= START_BLOCK(sbi
, segno
+ 1);
1094 static void __init_discard_policy(struct f2fs_sb_info
*sbi
,
1095 struct discard_policy
*dpolicy
,
1096 int discard_type
, unsigned int granularity
)
1099 dpolicy
->type
= discard_type
;
1100 dpolicy
->sync
= true;
1101 dpolicy
->ordered
= false;
1102 dpolicy
->granularity
= granularity
;
1104 dpolicy
->max_requests
= DEF_MAX_DISCARD_REQUEST
;
1105 dpolicy
->io_aware_gran
= MAX_PLIST_NUM
;
1106 dpolicy
->timeout
= false;
1108 if (discard_type
== DPOLICY_BG
) {
1109 dpolicy
->min_interval
= DEF_MIN_DISCARD_ISSUE_TIME
;
1110 dpolicy
->mid_interval
= DEF_MID_DISCARD_ISSUE_TIME
;
1111 dpolicy
->max_interval
= DEF_MAX_DISCARD_ISSUE_TIME
;
1112 dpolicy
->io_aware
= true;
1113 dpolicy
->sync
= false;
1114 dpolicy
->ordered
= true;
1115 if (utilization(sbi
) > DEF_DISCARD_URGENT_UTIL
) {
1116 dpolicy
->granularity
= 1;
1117 dpolicy
->max_interval
= DEF_MIN_DISCARD_ISSUE_TIME
;
1119 } else if (discard_type
== DPOLICY_FORCE
) {
1120 dpolicy
->min_interval
= DEF_MIN_DISCARD_ISSUE_TIME
;
1121 dpolicy
->mid_interval
= DEF_MID_DISCARD_ISSUE_TIME
;
1122 dpolicy
->max_interval
= DEF_MAX_DISCARD_ISSUE_TIME
;
1123 dpolicy
->io_aware
= false;
1124 } else if (discard_type
== DPOLICY_FSTRIM
) {
1125 dpolicy
->io_aware
= false;
1126 } else if (discard_type
== DPOLICY_UMOUNT
) {
1127 dpolicy
->io_aware
= false;
1128 /* we need to issue all to keep CP_TRIMMED_FLAG */
1129 dpolicy
->granularity
= 1;
1130 dpolicy
->timeout
= true;
1134 static void __update_discard_tree_range(struct f2fs_sb_info
*sbi
,
1135 struct block_device
*bdev
, block_t lstart
,
1136 block_t start
, block_t len
);
1137 /* this function is copied from blkdev_issue_discard from block/blk-lib.c */
1138 static int __submit_discard_cmd(struct f2fs_sb_info
*sbi
,
1139 struct discard_policy
*dpolicy
,
1140 struct discard_cmd
*dc
,
1141 unsigned int *issued
)
1143 struct block_device
*bdev
= dc
->bdev
;
1144 struct request_queue
*q
= bdev_get_queue(bdev
);
1145 unsigned int max_discard_blocks
=
1146 SECTOR_TO_BLOCK(q
->limits
.max_discard_sectors
);
1147 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1148 struct list_head
*wait_list
= (dpolicy
->type
== DPOLICY_FSTRIM
) ?
1149 &(dcc
->fstrim_list
) : &(dcc
->wait_list
);
1150 int flag
= dpolicy
->sync
? REQ_SYNC
: 0;
1151 block_t lstart
, start
, len
, total_len
;
1154 if (dc
->state
!= D_PREP
)
1157 if (is_sbi_flag_set(sbi
, SBI_NEED_FSCK
))
1160 trace_f2fs_issue_discard(bdev
, dc
->start
, dc
->len
);
1162 lstart
= dc
->lstart
;
1169 while (total_len
&& *issued
< dpolicy
->max_requests
&& !err
) {
1170 struct bio
*bio
= NULL
;
1171 unsigned long flags
;
1174 if (len
> max_discard_blocks
) {
1175 len
= max_discard_blocks
;
1180 if (*issued
== dpolicy
->max_requests
)
1185 if (time_to_inject(sbi
, FAULT_DISCARD
)) {
1186 f2fs_show_injection_info(sbi
, FAULT_DISCARD
);
1190 err
= __blkdev_issue_discard(bdev
,
1191 SECTOR_FROM_BLOCK(start
),
1192 SECTOR_FROM_BLOCK(len
),
1196 spin_lock_irqsave(&dc
->lock
, flags
);
1197 if (dc
->state
== D_PARTIAL
)
1198 dc
->state
= D_SUBMIT
;
1199 spin_unlock_irqrestore(&dc
->lock
, flags
);
1204 f2fs_bug_on(sbi
, !bio
);
1207 * should keep before submission to avoid D_DONE
1210 spin_lock_irqsave(&dc
->lock
, flags
);
1212 dc
->state
= D_SUBMIT
;
1214 dc
->state
= D_PARTIAL
;
1216 spin_unlock_irqrestore(&dc
->lock
, flags
);
1218 atomic_inc(&dcc
->queued_discard
);
1220 list_move_tail(&dc
->list
, wait_list
);
1222 /* sanity check on discard range */
1223 __check_sit_bitmap(sbi
, lstart
, lstart
+ len
);
1225 bio
->bi_private
= dc
;
1226 bio
->bi_end_io
= f2fs_submit_discard_endio
;
1227 bio
->bi_opf
|= flag
;
1230 atomic_inc(&dcc
->issued_discard
);
1232 f2fs_update_iostat(sbi
, FS_DISCARD
, 1);
1241 dcc
->undiscard_blks
-= len
;
1242 __update_discard_tree_range(sbi
, bdev
, lstart
, start
, len
);
1247 static void __insert_discard_tree(struct f2fs_sb_info
*sbi
,
1248 struct block_device
*bdev
, block_t lstart
,
1249 block_t start
, block_t len
,
1250 struct rb_node
**insert_p
,
1251 struct rb_node
*insert_parent
)
1253 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1255 struct rb_node
*parent
= NULL
;
1256 bool leftmost
= true;
1258 if (insert_p
&& insert_parent
) {
1259 parent
= insert_parent
;
1264 p
= f2fs_lookup_rb_tree_for_insert(sbi
, &dcc
->root
, &parent
,
1267 __attach_discard_cmd(sbi
, bdev
, lstart
, start
, len
, parent
,
1271 static void __relocate_discard_cmd(struct discard_cmd_control
*dcc
,
1272 struct discard_cmd
*dc
)
1274 list_move_tail(&dc
->list
, &dcc
->pend_list
[plist_idx(dc
->len
)]);
1277 static void __punch_discard_cmd(struct f2fs_sb_info
*sbi
,
1278 struct discard_cmd
*dc
, block_t blkaddr
)
1280 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1281 struct discard_info di
= dc
->di
;
1282 bool modified
= false;
1284 if (dc
->state
== D_DONE
|| dc
->len
== 1) {
1285 __remove_discard_cmd(sbi
, dc
);
1289 dcc
->undiscard_blks
-= di
.len
;
1291 if (blkaddr
> di
.lstart
) {
1292 dc
->len
= blkaddr
- dc
->lstart
;
1293 dcc
->undiscard_blks
+= dc
->len
;
1294 __relocate_discard_cmd(dcc
, dc
);
1298 if (blkaddr
< di
.lstart
+ di
.len
- 1) {
1300 __insert_discard_tree(sbi
, dc
->bdev
, blkaddr
+ 1,
1301 di
.start
+ blkaddr
+ 1 - di
.lstart
,
1302 di
.lstart
+ di
.len
- 1 - blkaddr
,
1308 dcc
->undiscard_blks
+= dc
->len
;
1309 __relocate_discard_cmd(dcc
, dc
);
1314 static void __update_discard_tree_range(struct f2fs_sb_info
*sbi
,
1315 struct block_device
*bdev
, block_t lstart
,
1316 block_t start
, block_t len
)
1318 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1319 struct discard_cmd
*prev_dc
= NULL
, *next_dc
= NULL
;
1320 struct discard_cmd
*dc
;
1321 struct discard_info di
= {0};
1322 struct rb_node
**insert_p
= NULL
, *insert_parent
= NULL
;
1323 struct request_queue
*q
= bdev_get_queue(bdev
);
1324 unsigned int max_discard_blocks
=
1325 SECTOR_TO_BLOCK(q
->limits
.max_discard_sectors
);
1326 block_t end
= lstart
+ len
;
1328 dc
= (struct discard_cmd
*)f2fs_lookup_rb_tree_ret(&dcc
->root
,
1330 (struct rb_entry
**)&prev_dc
,
1331 (struct rb_entry
**)&next_dc
,
1332 &insert_p
, &insert_parent
, true, NULL
);
1338 di
.len
= next_dc
? next_dc
->lstart
- lstart
: len
;
1339 di
.len
= min(di
.len
, len
);
1344 struct rb_node
*node
;
1345 bool merged
= false;
1346 struct discard_cmd
*tdc
= NULL
;
1349 di
.lstart
= prev_dc
->lstart
+ prev_dc
->len
;
1350 if (di
.lstart
< lstart
)
1352 if (di
.lstart
>= end
)
1355 if (!next_dc
|| next_dc
->lstart
> end
)
1356 di
.len
= end
- di
.lstart
;
1358 di
.len
= next_dc
->lstart
- di
.lstart
;
1359 di
.start
= start
+ di
.lstart
- lstart
;
1365 if (prev_dc
&& prev_dc
->state
== D_PREP
&&
1366 prev_dc
->bdev
== bdev
&&
1367 __is_discard_back_mergeable(&di
, &prev_dc
->di
,
1368 max_discard_blocks
)) {
1369 prev_dc
->di
.len
+= di
.len
;
1370 dcc
->undiscard_blks
+= di
.len
;
1371 __relocate_discard_cmd(dcc
, prev_dc
);
1377 if (next_dc
&& next_dc
->state
== D_PREP
&&
1378 next_dc
->bdev
== bdev
&&
1379 __is_discard_front_mergeable(&di
, &next_dc
->di
,
1380 max_discard_blocks
)) {
1381 next_dc
->di
.lstart
= di
.lstart
;
1382 next_dc
->di
.len
+= di
.len
;
1383 next_dc
->di
.start
= di
.start
;
1384 dcc
->undiscard_blks
+= di
.len
;
1385 __relocate_discard_cmd(dcc
, next_dc
);
1387 __remove_discard_cmd(sbi
, tdc
);
1392 __insert_discard_tree(sbi
, bdev
, di
.lstart
, di
.start
,
1393 di
.len
, NULL
, NULL
);
1400 node
= rb_next(&prev_dc
->rb_node
);
1401 next_dc
= rb_entry_safe(node
, struct discard_cmd
, rb_node
);
1405 static int __queue_discard_cmd(struct f2fs_sb_info
*sbi
,
1406 struct block_device
*bdev
, block_t blkstart
, block_t blklen
)
1408 block_t lblkstart
= blkstart
;
1410 if (!f2fs_bdev_support_discard(bdev
))
1413 trace_f2fs_queue_discard(bdev
, blkstart
, blklen
);
1415 if (f2fs_is_multi_device(sbi
)) {
1416 int devi
= f2fs_target_device_index(sbi
, blkstart
);
1418 blkstart
-= FDEV(devi
).start_blk
;
1420 mutex_lock(&SM_I(sbi
)->dcc_info
->cmd_lock
);
1421 __update_discard_tree_range(sbi
, bdev
, lblkstart
, blkstart
, blklen
);
1422 mutex_unlock(&SM_I(sbi
)->dcc_info
->cmd_lock
);
1426 static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info
*sbi
,
1427 struct discard_policy
*dpolicy
)
1429 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1430 struct discard_cmd
*prev_dc
= NULL
, *next_dc
= NULL
;
1431 struct rb_node
**insert_p
= NULL
, *insert_parent
= NULL
;
1432 struct discard_cmd
*dc
;
1433 struct blk_plug plug
;
1434 unsigned int pos
= dcc
->next_pos
;
1435 unsigned int issued
= 0;
1436 bool io_interrupted
= false;
1438 mutex_lock(&dcc
->cmd_lock
);
1439 dc
= (struct discard_cmd
*)f2fs_lookup_rb_tree_ret(&dcc
->root
,
1441 (struct rb_entry
**)&prev_dc
,
1442 (struct rb_entry
**)&next_dc
,
1443 &insert_p
, &insert_parent
, true, NULL
);
1447 blk_start_plug(&plug
);
1450 struct rb_node
*node
;
1453 if (dc
->state
!= D_PREP
)
1456 if (dpolicy
->io_aware
&& !is_idle(sbi
, DISCARD_TIME
)) {
1457 io_interrupted
= true;
1461 dcc
->next_pos
= dc
->lstart
+ dc
->len
;
1462 err
= __submit_discard_cmd(sbi
, dpolicy
, dc
, &issued
);
1464 if (issued
>= dpolicy
->max_requests
)
1467 node
= rb_next(&dc
->rb_node
);
1469 __remove_discard_cmd(sbi
, dc
);
1470 dc
= rb_entry_safe(node
, struct discard_cmd
, rb_node
);
1473 blk_finish_plug(&plug
);
1478 mutex_unlock(&dcc
->cmd_lock
);
1480 if (!issued
&& io_interrupted
)
1485 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info
*sbi
,
1486 struct discard_policy
*dpolicy
);
1488 static int __issue_discard_cmd(struct f2fs_sb_info
*sbi
,
1489 struct discard_policy
*dpolicy
)
1491 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1492 struct list_head
*pend_list
;
1493 struct discard_cmd
*dc
, *tmp
;
1494 struct blk_plug plug
;
1496 bool io_interrupted
= false;
1498 if (dpolicy
->timeout
)
1499 f2fs_update_time(sbi
, UMOUNT_DISCARD_TIMEOUT
);
1503 for (i
= MAX_PLIST_NUM
- 1; i
>= 0; i
--) {
1504 if (dpolicy
->timeout
&&
1505 f2fs_time_over(sbi
, UMOUNT_DISCARD_TIMEOUT
))
1508 if (i
+ 1 < dpolicy
->granularity
)
1511 if (i
< DEFAULT_DISCARD_GRANULARITY
&& dpolicy
->ordered
)
1512 return __issue_discard_cmd_orderly(sbi
, dpolicy
);
1514 pend_list
= &dcc
->pend_list
[i
];
1516 mutex_lock(&dcc
->cmd_lock
);
1517 if (list_empty(pend_list
))
1519 if (unlikely(dcc
->rbtree_check
))
1520 f2fs_bug_on(sbi
, !f2fs_check_rb_tree_consistence(sbi
,
1521 &dcc
->root
, false));
1522 blk_start_plug(&plug
);
1523 list_for_each_entry_safe(dc
, tmp
, pend_list
, list
) {
1524 f2fs_bug_on(sbi
, dc
->state
!= D_PREP
);
1526 if (dpolicy
->timeout
&&
1527 f2fs_time_over(sbi
, UMOUNT_DISCARD_TIMEOUT
))
1530 if (dpolicy
->io_aware
&& i
< dpolicy
->io_aware_gran
&&
1531 !is_idle(sbi
, DISCARD_TIME
)) {
1532 io_interrupted
= true;
1536 __submit_discard_cmd(sbi
, dpolicy
, dc
, &issued
);
1538 if (issued
>= dpolicy
->max_requests
)
1541 blk_finish_plug(&plug
);
1543 mutex_unlock(&dcc
->cmd_lock
);
1545 if (issued
>= dpolicy
->max_requests
|| io_interrupted
)
1549 if (dpolicy
->type
== DPOLICY_UMOUNT
&& issued
) {
1550 __wait_all_discard_cmd(sbi
, dpolicy
);
1554 if (!issued
&& io_interrupted
)
1560 static bool __drop_discard_cmd(struct f2fs_sb_info
*sbi
)
1562 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1563 struct list_head
*pend_list
;
1564 struct discard_cmd
*dc
, *tmp
;
1566 bool dropped
= false;
1568 mutex_lock(&dcc
->cmd_lock
);
1569 for (i
= MAX_PLIST_NUM
- 1; i
>= 0; i
--) {
1570 pend_list
= &dcc
->pend_list
[i
];
1571 list_for_each_entry_safe(dc
, tmp
, pend_list
, list
) {
1572 f2fs_bug_on(sbi
, dc
->state
!= D_PREP
);
1573 __remove_discard_cmd(sbi
, dc
);
1577 mutex_unlock(&dcc
->cmd_lock
);
1582 void f2fs_drop_discard_cmd(struct f2fs_sb_info
*sbi
)
1584 __drop_discard_cmd(sbi
);
1587 static unsigned int __wait_one_discard_bio(struct f2fs_sb_info
*sbi
,
1588 struct discard_cmd
*dc
)
1590 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1591 unsigned int len
= 0;
1593 wait_for_completion_io(&dc
->wait
);
1594 mutex_lock(&dcc
->cmd_lock
);
1595 f2fs_bug_on(sbi
, dc
->state
!= D_DONE
);
1600 __remove_discard_cmd(sbi
, dc
);
1602 mutex_unlock(&dcc
->cmd_lock
);
1607 static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info
*sbi
,
1608 struct discard_policy
*dpolicy
,
1609 block_t start
, block_t end
)
1611 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1612 struct list_head
*wait_list
= (dpolicy
->type
== DPOLICY_FSTRIM
) ?
1613 &(dcc
->fstrim_list
) : &(dcc
->wait_list
);
1614 struct discard_cmd
*dc
, *tmp
;
1616 unsigned int trimmed
= 0;
1621 mutex_lock(&dcc
->cmd_lock
);
1622 list_for_each_entry_safe(dc
, tmp
, wait_list
, list
) {
1623 if (dc
->lstart
+ dc
->len
<= start
|| end
<= dc
->lstart
)
1625 if (dc
->len
< dpolicy
->granularity
)
1627 if (dc
->state
== D_DONE
&& !dc
->ref
) {
1628 wait_for_completion_io(&dc
->wait
);
1631 __remove_discard_cmd(sbi
, dc
);
1638 mutex_unlock(&dcc
->cmd_lock
);
1641 trimmed
+= __wait_one_discard_bio(sbi
, dc
);
1648 static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info
*sbi
,
1649 struct discard_policy
*dpolicy
)
1651 struct discard_policy dp
;
1652 unsigned int discard_blks
;
1655 return __wait_discard_cmd_range(sbi
, dpolicy
, 0, UINT_MAX
);
1658 __init_discard_policy(sbi
, &dp
, DPOLICY_FSTRIM
, 1);
1659 discard_blks
= __wait_discard_cmd_range(sbi
, &dp
, 0, UINT_MAX
);
1660 __init_discard_policy(sbi
, &dp
, DPOLICY_UMOUNT
, 1);
1661 discard_blks
+= __wait_discard_cmd_range(sbi
, &dp
, 0, UINT_MAX
);
1663 return discard_blks
;
1666 /* This should be covered by global mutex, &sit_i->sentry_lock */
1667 static void f2fs_wait_discard_bio(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
1669 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1670 struct discard_cmd
*dc
;
1671 bool need_wait
= false;
1673 mutex_lock(&dcc
->cmd_lock
);
1674 dc
= (struct discard_cmd
*)f2fs_lookup_rb_tree(&dcc
->root
,
1677 if (dc
->state
== D_PREP
) {
1678 __punch_discard_cmd(sbi
, dc
, blkaddr
);
1684 mutex_unlock(&dcc
->cmd_lock
);
1687 __wait_one_discard_bio(sbi
, dc
);
1690 void f2fs_stop_discard_thread(struct f2fs_sb_info
*sbi
)
1692 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1694 if (dcc
&& dcc
->f2fs_issue_discard
) {
1695 struct task_struct
*discard_thread
= dcc
->f2fs_issue_discard
;
1697 dcc
->f2fs_issue_discard
= NULL
;
1698 kthread_stop(discard_thread
);
1702 /* This comes from f2fs_put_super */
1703 bool f2fs_issue_discard_timeout(struct f2fs_sb_info
*sbi
)
1705 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1706 struct discard_policy dpolicy
;
1709 __init_discard_policy(sbi
, &dpolicy
, DPOLICY_UMOUNT
,
1710 dcc
->discard_granularity
);
1711 __issue_discard_cmd(sbi
, &dpolicy
);
1712 dropped
= __drop_discard_cmd(sbi
);
1714 /* just to make sure there is no pending discard commands */
1715 __wait_all_discard_cmd(sbi
, NULL
);
1717 f2fs_bug_on(sbi
, atomic_read(&dcc
->discard_cmd_cnt
));
1721 static int issue_discard_thread(void *data
)
1723 struct f2fs_sb_info
*sbi
= data
;
1724 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1725 wait_queue_head_t
*q
= &dcc
->discard_wait_queue
;
1726 struct discard_policy dpolicy
;
1727 unsigned int wait_ms
= DEF_MIN_DISCARD_ISSUE_TIME
;
1733 __init_discard_policy(sbi
, &dpolicy
, DPOLICY_BG
,
1734 dcc
->discard_granularity
);
1736 wait_event_interruptible_timeout(*q
,
1737 kthread_should_stop() || freezing(current
) ||
1739 msecs_to_jiffies(wait_ms
));
1741 if (dcc
->discard_wake
)
1742 dcc
->discard_wake
= 0;
1744 /* clean up pending candidates before going to sleep */
1745 if (atomic_read(&dcc
->queued_discard
))
1746 __wait_all_discard_cmd(sbi
, NULL
);
1748 if (try_to_freeze())
1750 if (f2fs_readonly(sbi
->sb
))
1752 if (kthread_should_stop())
1754 if (is_sbi_flag_set(sbi
, SBI_NEED_FSCK
)) {
1755 wait_ms
= dpolicy
.max_interval
;
1759 if (sbi
->gc_mode
== GC_URGENT_HIGH
)
1760 __init_discard_policy(sbi
, &dpolicy
, DPOLICY_FORCE
, 1);
1762 sb_start_intwrite(sbi
->sb
);
1764 issued
= __issue_discard_cmd(sbi
, &dpolicy
);
1766 __wait_all_discard_cmd(sbi
, &dpolicy
);
1767 wait_ms
= dpolicy
.min_interval
;
1768 } else if (issued
== -1){
1769 wait_ms
= f2fs_time_to_wait(sbi
, DISCARD_TIME
);
1771 wait_ms
= dpolicy
.mid_interval
;
1773 wait_ms
= dpolicy
.max_interval
;
1776 sb_end_intwrite(sbi
->sb
);
1778 } while (!kthread_should_stop());
1782 #ifdef CONFIG_BLK_DEV_ZONED
1783 static int __f2fs_issue_discard_zone(struct f2fs_sb_info
*sbi
,
1784 struct block_device
*bdev
, block_t blkstart
, block_t blklen
)
1786 sector_t sector
, nr_sects
;
1787 block_t lblkstart
= blkstart
;
1790 if (f2fs_is_multi_device(sbi
)) {
1791 devi
= f2fs_target_device_index(sbi
, blkstart
);
1792 if (blkstart
< FDEV(devi
).start_blk
||
1793 blkstart
> FDEV(devi
).end_blk
) {
1794 f2fs_err(sbi
, "Invalid block %x", blkstart
);
1797 blkstart
-= FDEV(devi
).start_blk
;
1800 /* For sequential zones, reset the zone write pointer */
1801 if (f2fs_blkz_is_seq(sbi
, devi
, blkstart
)) {
1802 sector
= SECTOR_FROM_BLOCK(blkstart
);
1803 nr_sects
= SECTOR_FROM_BLOCK(blklen
);
1805 if (sector
& (bdev_zone_sectors(bdev
) - 1) ||
1806 nr_sects
!= bdev_zone_sectors(bdev
)) {
1807 f2fs_err(sbi
, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1808 devi
, sbi
->s_ndevs
? FDEV(devi
).path
: "",
1812 trace_f2fs_issue_reset_zone(bdev
, blkstart
);
1813 return blkdev_zone_mgmt(bdev
, REQ_OP_ZONE_RESET
,
1814 sector
, nr_sects
, GFP_NOFS
);
1817 /* For conventional zones, use regular discard if supported */
1818 return __queue_discard_cmd(sbi
, bdev
, lblkstart
, blklen
);
1822 static int __issue_discard_async(struct f2fs_sb_info
*sbi
,
1823 struct block_device
*bdev
, block_t blkstart
, block_t blklen
)
1825 #ifdef CONFIG_BLK_DEV_ZONED
1826 if (f2fs_sb_has_blkzoned(sbi
) && bdev_is_zoned(bdev
))
1827 return __f2fs_issue_discard_zone(sbi
, bdev
, blkstart
, blklen
);
1829 return __queue_discard_cmd(sbi
, bdev
, blkstart
, blklen
);
1832 static int f2fs_issue_discard(struct f2fs_sb_info
*sbi
,
1833 block_t blkstart
, block_t blklen
)
1835 sector_t start
= blkstart
, len
= 0;
1836 struct block_device
*bdev
;
1837 struct seg_entry
*se
;
1838 unsigned int offset
;
1842 bdev
= f2fs_target_device(sbi
, blkstart
, NULL
);
1844 for (i
= blkstart
; i
< blkstart
+ blklen
; i
++, len
++) {
1846 struct block_device
*bdev2
=
1847 f2fs_target_device(sbi
, i
, NULL
);
1849 if (bdev2
!= bdev
) {
1850 err
= __issue_discard_async(sbi
, bdev
,
1860 se
= get_seg_entry(sbi
, GET_SEGNO(sbi
, i
));
1861 offset
= GET_BLKOFF_FROM_SEG0(sbi
, i
);
1863 if (!f2fs_test_and_set_bit(offset
, se
->discard_map
))
1864 sbi
->discard_blks
--;
1868 err
= __issue_discard_async(sbi
, bdev
, start
, len
);
1872 static bool add_discard_addrs(struct f2fs_sb_info
*sbi
, struct cp_control
*cpc
,
1875 int entries
= SIT_VBLOCK_MAP_SIZE
/ sizeof(unsigned long);
1876 int max_blocks
= sbi
->blocks_per_seg
;
1877 struct seg_entry
*se
= get_seg_entry(sbi
, cpc
->trim_start
);
1878 unsigned long *cur_map
= (unsigned long *)se
->cur_valid_map
;
1879 unsigned long *ckpt_map
= (unsigned long *)se
->ckpt_valid_map
;
1880 unsigned long *discard_map
= (unsigned long *)se
->discard_map
;
1881 unsigned long *dmap
= SIT_I(sbi
)->tmp_map
;
1882 unsigned int start
= 0, end
= -1;
1883 bool force
= (cpc
->reason
& CP_DISCARD
);
1884 struct discard_entry
*de
= NULL
;
1885 struct list_head
*head
= &SM_I(sbi
)->dcc_info
->entry_list
;
1888 if (se
->valid_blocks
== max_blocks
|| !f2fs_hw_support_discard(sbi
))
1892 if (!f2fs_realtime_discard_enable(sbi
) || !se
->valid_blocks
||
1893 SM_I(sbi
)->dcc_info
->nr_discards
>=
1894 SM_I(sbi
)->dcc_info
->max_discards
)
1898 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1899 for (i
= 0; i
< entries
; i
++)
1900 dmap
[i
] = force
? ~ckpt_map
[i
] & ~discard_map
[i
] :
1901 (cur_map
[i
] ^ ckpt_map
[i
]) & ckpt_map
[i
];
1903 while (force
|| SM_I(sbi
)->dcc_info
->nr_discards
<=
1904 SM_I(sbi
)->dcc_info
->max_discards
) {
1905 start
= __find_rev_next_bit(dmap
, max_blocks
, end
+ 1);
1906 if (start
>= max_blocks
)
1909 end
= __find_rev_next_zero_bit(dmap
, max_blocks
, start
+ 1);
1910 if (force
&& start
&& end
!= max_blocks
1911 && (end
- start
) < cpc
->trim_minlen
)
1918 de
= f2fs_kmem_cache_alloc(discard_entry_slab
,
1920 de
->start_blkaddr
= START_BLOCK(sbi
, cpc
->trim_start
);
1921 list_add_tail(&de
->list
, head
);
1924 for (i
= start
; i
< end
; i
++)
1925 __set_bit_le(i
, (void *)de
->discard_map
);
1927 SM_I(sbi
)->dcc_info
->nr_discards
+= end
- start
;
1932 static void release_discard_addr(struct discard_entry
*entry
)
1934 list_del(&entry
->list
);
1935 kmem_cache_free(discard_entry_slab
, entry
);
1938 void f2fs_release_discard_addrs(struct f2fs_sb_info
*sbi
)
1940 struct list_head
*head
= &(SM_I(sbi
)->dcc_info
->entry_list
);
1941 struct discard_entry
*entry
, *this;
1944 list_for_each_entry_safe(entry
, this, head
, list
)
1945 release_discard_addr(entry
);
1949 * Should call f2fs_clear_prefree_segments after checkpoint is done.
1951 static void set_prefree_as_free_segments(struct f2fs_sb_info
*sbi
)
1953 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
1956 mutex_lock(&dirty_i
->seglist_lock
);
1957 for_each_set_bit(segno
, dirty_i
->dirty_segmap
[PRE
], MAIN_SEGS(sbi
))
1958 __set_test_and_free(sbi
, segno
, false);
1959 mutex_unlock(&dirty_i
->seglist_lock
);
1962 void f2fs_clear_prefree_segments(struct f2fs_sb_info
*sbi
,
1963 struct cp_control
*cpc
)
1965 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
1966 struct list_head
*head
= &dcc
->entry_list
;
1967 struct discard_entry
*entry
, *this;
1968 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
1969 unsigned long *prefree_map
= dirty_i
->dirty_segmap
[PRE
];
1970 unsigned int start
= 0, end
= -1;
1971 unsigned int secno
, start_segno
;
1972 bool force
= (cpc
->reason
& CP_DISCARD
);
1973 bool need_align
= f2fs_lfs_mode(sbi
) && __is_large_section(sbi
);
1975 mutex_lock(&dirty_i
->seglist_lock
);
1980 if (need_align
&& end
!= -1)
1982 start
= find_next_bit(prefree_map
, MAIN_SEGS(sbi
), end
+ 1);
1983 if (start
>= MAIN_SEGS(sbi
))
1985 end
= find_next_zero_bit(prefree_map
, MAIN_SEGS(sbi
),
1989 start
= rounddown(start
, sbi
->segs_per_sec
);
1990 end
= roundup(end
, sbi
->segs_per_sec
);
1993 for (i
= start
; i
< end
; i
++) {
1994 if (test_and_clear_bit(i
, prefree_map
))
1995 dirty_i
->nr_dirty
[PRE
]--;
1998 if (!f2fs_realtime_discard_enable(sbi
))
2001 if (force
&& start
>= cpc
->trim_start
&&
2002 (end
- 1) <= cpc
->trim_end
)
2005 if (!f2fs_lfs_mode(sbi
) || !__is_large_section(sbi
)) {
2006 f2fs_issue_discard(sbi
, START_BLOCK(sbi
, start
),
2007 (end
- start
) << sbi
->log_blocks_per_seg
);
2011 secno
= GET_SEC_FROM_SEG(sbi
, start
);
2012 start_segno
= GET_SEG_FROM_SEC(sbi
, secno
);
2013 if (!IS_CURSEC(sbi
, secno
) &&
2014 !get_valid_blocks(sbi
, start
, true))
2015 f2fs_issue_discard(sbi
, START_BLOCK(sbi
, start_segno
),
2016 sbi
->segs_per_sec
<< sbi
->log_blocks_per_seg
);
2018 start
= start_segno
+ sbi
->segs_per_sec
;
2024 mutex_unlock(&dirty_i
->seglist_lock
);
2026 /* send small discards */
2027 list_for_each_entry_safe(entry
, this, head
, list
) {
2028 unsigned int cur_pos
= 0, next_pos
, len
, total_len
= 0;
2029 bool is_valid
= test_bit_le(0, entry
->discard_map
);
2033 next_pos
= find_next_zero_bit_le(entry
->discard_map
,
2034 sbi
->blocks_per_seg
, cur_pos
);
2035 len
= next_pos
- cur_pos
;
2037 if (f2fs_sb_has_blkzoned(sbi
) ||
2038 (force
&& len
< cpc
->trim_minlen
))
2041 f2fs_issue_discard(sbi
, entry
->start_blkaddr
+ cur_pos
,
2045 next_pos
= find_next_bit_le(entry
->discard_map
,
2046 sbi
->blocks_per_seg
, cur_pos
);
2050 is_valid
= !is_valid
;
2052 if (cur_pos
< sbi
->blocks_per_seg
)
2055 release_discard_addr(entry
);
2056 dcc
->nr_discards
-= total_len
;
2059 wake_up_discard_thread(sbi
, false);
2062 static int create_discard_cmd_control(struct f2fs_sb_info
*sbi
)
2064 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
2065 struct discard_cmd_control
*dcc
;
2068 if (SM_I(sbi
)->dcc_info
) {
2069 dcc
= SM_I(sbi
)->dcc_info
;
2073 dcc
= f2fs_kzalloc(sbi
, sizeof(struct discard_cmd_control
), GFP_KERNEL
);
2077 dcc
->discard_granularity
= DEFAULT_DISCARD_GRANULARITY
;
2078 INIT_LIST_HEAD(&dcc
->entry_list
);
2079 for (i
= 0; i
< MAX_PLIST_NUM
; i
++)
2080 INIT_LIST_HEAD(&dcc
->pend_list
[i
]);
2081 INIT_LIST_HEAD(&dcc
->wait_list
);
2082 INIT_LIST_HEAD(&dcc
->fstrim_list
);
2083 mutex_init(&dcc
->cmd_lock
);
2084 atomic_set(&dcc
->issued_discard
, 0);
2085 atomic_set(&dcc
->queued_discard
, 0);
2086 atomic_set(&dcc
->discard_cmd_cnt
, 0);
2087 dcc
->nr_discards
= 0;
2088 dcc
->max_discards
= MAIN_SEGS(sbi
) << sbi
->log_blocks_per_seg
;
2089 dcc
->undiscard_blks
= 0;
2091 dcc
->root
= RB_ROOT_CACHED
;
2092 dcc
->rbtree_check
= false;
2094 init_waitqueue_head(&dcc
->discard_wait_queue
);
2095 SM_I(sbi
)->dcc_info
= dcc
;
2097 dcc
->f2fs_issue_discard
= kthread_run(issue_discard_thread
, sbi
,
2098 "f2fs_discard-%u:%u", MAJOR(dev
), MINOR(dev
));
2099 if (IS_ERR(dcc
->f2fs_issue_discard
)) {
2100 err
= PTR_ERR(dcc
->f2fs_issue_discard
);
2102 SM_I(sbi
)->dcc_info
= NULL
;
2109 static void destroy_discard_cmd_control(struct f2fs_sb_info
*sbi
)
2111 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
2116 f2fs_stop_discard_thread(sbi
);
2119 * Recovery can cache discard commands, so in error path of
2120 * fill_super(), it needs to give a chance to handle them.
2122 if (unlikely(atomic_read(&dcc
->discard_cmd_cnt
)))
2123 f2fs_issue_discard_timeout(sbi
);
2126 SM_I(sbi
)->dcc_info
= NULL
;
2129 static bool __mark_sit_entry_dirty(struct f2fs_sb_info
*sbi
, unsigned int segno
)
2131 struct sit_info
*sit_i
= SIT_I(sbi
);
2133 if (!__test_and_set_bit(segno
, sit_i
->dirty_sentries_bitmap
)) {
2134 sit_i
->dirty_sentries
++;
2141 static void __set_sit_entry_type(struct f2fs_sb_info
*sbi
, int type
,
2142 unsigned int segno
, int modified
)
2144 struct seg_entry
*se
= get_seg_entry(sbi
, segno
);
2147 __mark_sit_entry_dirty(sbi
, segno
);
2150 static inline unsigned long long get_segment_mtime(struct f2fs_sb_info
*sbi
,
2153 unsigned int segno
= GET_SEGNO(sbi
, blkaddr
);
2155 if (segno
== NULL_SEGNO
)
2157 return get_seg_entry(sbi
, segno
)->mtime
;
2160 static void update_segment_mtime(struct f2fs_sb_info
*sbi
, block_t blkaddr
,
2161 unsigned long long old_mtime
)
2163 struct seg_entry
*se
;
2164 unsigned int segno
= GET_SEGNO(sbi
, blkaddr
);
2165 unsigned long long ctime
= get_mtime(sbi
, false);
2166 unsigned long long mtime
= old_mtime
? old_mtime
: ctime
;
2168 if (segno
== NULL_SEGNO
)
2171 se
= get_seg_entry(sbi
, segno
);
2176 se
->mtime
= div_u64(se
->mtime
* se
->valid_blocks
+ mtime
,
2177 se
->valid_blocks
+ 1);
2179 if (ctime
> SIT_I(sbi
)->max_mtime
)
2180 SIT_I(sbi
)->max_mtime
= ctime
;
2183 static void update_sit_entry(struct f2fs_sb_info
*sbi
, block_t blkaddr
, int del
)
2185 struct seg_entry
*se
;
2186 unsigned int segno
, offset
;
2187 long int new_vblocks
;
2189 #ifdef CONFIG_F2FS_CHECK_FS
2193 segno
= GET_SEGNO(sbi
, blkaddr
);
2195 se
= get_seg_entry(sbi
, segno
);
2196 new_vblocks
= se
->valid_blocks
+ del
;
2197 offset
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
2199 f2fs_bug_on(sbi
, (new_vblocks
< 0 ||
2200 (new_vblocks
> f2fs_usable_blks_in_seg(sbi
, segno
))));
2202 se
->valid_blocks
= new_vblocks
;
2204 /* Update valid block bitmap */
2206 exist
= f2fs_test_and_set_bit(offset
, se
->cur_valid_map
);
2207 #ifdef CONFIG_F2FS_CHECK_FS
2208 mir_exist
= f2fs_test_and_set_bit(offset
,
2209 se
->cur_valid_map_mir
);
2210 if (unlikely(exist
!= mir_exist
)) {
2211 f2fs_err(sbi
, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2213 f2fs_bug_on(sbi
, 1);
2216 if (unlikely(exist
)) {
2217 f2fs_err(sbi
, "Bitmap was wrongly set, blk:%u",
2219 f2fs_bug_on(sbi
, 1);
2224 if (!f2fs_test_and_set_bit(offset
, se
->discard_map
))
2225 sbi
->discard_blks
--;
2228 * SSR should never reuse block which is checkpointed
2229 * or newly invalidated.
2231 if (!is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)) {
2232 if (!f2fs_test_and_set_bit(offset
, se
->ckpt_valid_map
))
2233 se
->ckpt_valid_blocks
++;
2236 exist
= f2fs_test_and_clear_bit(offset
, se
->cur_valid_map
);
2237 #ifdef CONFIG_F2FS_CHECK_FS
2238 mir_exist
= f2fs_test_and_clear_bit(offset
,
2239 se
->cur_valid_map_mir
);
2240 if (unlikely(exist
!= mir_exist
)) {
2241 f2fs_err(sbi
, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2243 f2fs_bug_on(sbi
, 1);
2246 if (unlikely(!exist
)) {
2247 f2fs_err(sbi
, "Bitmap was wrongly cleared, blk:%u",
2249 f2fs_bug_on(sbi
, 1);
2252 } else if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
2254 * If checkpoints are off, we must not reuse data that
2255 * was used in the previous checkpoint. If it was used
2256 * before, we must track that to know how much space we
2259 if (f2fs_test_bit(offset
, se
->ckpt_valid_map
)) {
2260 spin_lock(&sbi
->stat_lock
);
2261 sbi
->unusable_block_count
++;
2262 spin_unlock(&sbi
->stat_lock
);
2266 if (f2fs_test_and_clear_bit(offset
, se
->discard_map
))
2267 sbi
->discard_blks
++;
2269 if (!f2fs_test_bit(offset
, se
->ckpt_valid_map
))
2270 se
->ckpt_valid_blocks
+= del
;
2272 __mark_sit_entry_dirty(sbi
, segno
);
2274 /* update total number of valid blocks to be written in ckpt area */
2275 SIT_I(sbi
)->written_valid_blocks
+= del
;
2277 if (__is_large_section(sbi
))
2278 get_sec_entry(sbi
, segno
)->valid_blocks
+= del
;
2281 void f2fs_invalidate_blocks(struct f2fs_sb_info
*sbi
, block_t addr
)
2283 unsigned int segno
= GET_SEGNO(sbi
, addr
);
2284 struct sit_info
*sit_i
= SIT_I(sbi
);
2286 f2fs_bug_on(sbi
, addr
== NULL_ADDR
);
2287 if (addr
== NEW_ADDR
|| addr
== COMPRESS_ADDR
)
2290 invalidate_mapping_pages(META_MAPPING(sbi
), addr
, addr
);
2292 /* add it into sit main buffer */
2293 down_write(&sit_i
->sentry_lock
);
2295 update_segment_mtime(sbi
, addr
, 0);
2296 update_sit_entry(sbi
, addr
, -1);
2298 /* add it into dirty seglist */
2299 locate_dirty_segment(sbi
, segno
);
2301 up_write(&sit_i
->sentry_lock
);
2304 bool f2fs_is_checkpointed_data(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
2306 struct sit_info
*sit_i
= SIT_I(sbi
);
2307 unsigned int segno
, offset
;
2308 struct seg_entry
*se
;
2311 if (!__is_valid_data_blkaddr(blkaddr
))
2314 down_read(&sit_i
->sentry_lock
);
2316 segno
= GET_SEGNO(sbi
, blkaddr
);
2317 se
= get_seg_entry(sbi
, segno
);
2318 offset
= GET_BLKOFF_FROM_SEG0(sbi
, blkaddr
);
2320 if (f2fs_test_bit(offset
, se
->ckpt_valid_map
))
2323 up_read(&sit_i
->sentry_lock
);
2329 * This function should be resided under the curseg_mutex lock
2331 static void __add_sum_entry(struct f2fs_sb_info
*sbi
, int type
,
2332 struct f2fs_summary
*sum
)
2334 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2335 void *addr
= curseg
->sum_blk
;
2336 addr
+= curseg
->next_blkoff
* sizeof(struct f2fs_summary
);
2337 memcpy(addr
, sum
, sizeof(struct f2fs_summary
));
2341 * Calculate the number of current summary pages for writing
2343 int f2fs_npages_for_summary_flush(struct f2fs_sb_info
*sbi
, bool for_ra
)
2345 int valid_sum_count
= 0;
2348 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
2349 if (sbi
->ckpt
->alloc_type
[i
] == SSR
)
2350 valid_sum_count
+= sbi
->blocks_per_seg
;
2353 valid_sum_count
+= le16_to_cpu(
2354 F2FS_CKPT(sbi
)->cur_data_blkoff
[i
]);
2356 valid_sum_count
+= curseg_blkoff(sbi
, i
);
2360 sum_in_page
= (PAGE_SIZE
- 2 * SUM_JOURNAL_SIZE
-
2361 SUM_FOOTER_SIZE
) / SUMMARY_SIZE
;
2362 if (valid_sum_count
<= sum_in_page
)
2364 else if ((valid_sum_count
- sum_in_page
) <=
2365 (PAGE_SIZE
- SUM_FOOTER_SIZE
) / SUMMARY_SIZE
)
2371 * Caller should put this summary page
2373 struct page
*f2fs_get_sum_page(struct f2fs_sb_info
*sbi
, unsigned int segno
)
2375 if (unlikely(f2fs_cp_error(sbi
)))
2376 return ERR_PTR(-EIO
);
2377 return f2fs_get_meta_page_retry(sbi
, GET_SUM_BLOCK(sbi
, segno
));
2380 void f2fs_update_meta_page(struct f2fs_sb_info
*sbi
,
2381 void *src
, block_t blk_addr
)
2383 struct page
*page
= f2fs_grab_meta_page(sbi
, blk_addr
);
2385 memcpy(page_address(page
), src
, PAGE_SIZE
);
2386 set_page_dirty(page
);
2387 f2fs_put_page(page
, 1);
2390 static void write_sum_page(struct f2fs_sb_info
*sbi
,
2391 struct f2fs_summary_block
*sum_blk
, block_t blk_addr
)
2393 f2fs_update_meta_page(sbi
, (void *)sum_blk
, blk_addr
);
2396 static void write_current_sum_page(struct f2fs_sb_info
*sbi
,
2397 int type
, block_t blk_addr
)
2399 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2400 struct page
*page
= f2fs_grab_meta_page(sbi
, blk_addr
);
2401 struct f2fs_summary_block
*src
= curseg
->sum_blk
;
2402 struct f2fs_summary_block
*dst
;
2404 dst
= (struct f2fs_summary_block
*)page_address(page
);
2405 memset(dst
, 0, PAGE_SIZE
);
2407 mutex_lock(&curseg
->curseg_mutex
);
2409 down_read(&curseg
->journal_rwsem
);
2410 memcpy(&dst
->journal
, curseg
->journal
, SUM_JOURNAL_SIZE
);
2411 up_read(&curseg
->journal_rwsem
);
2413 memcpy(dst
->entries
, src
->entries
, SUM_ENTRY_SIZE
);
2414 memcpy(&dst
->footer
, &src
->footer
, SUM_FOOTER_SIZE
);
2416 mutex_unlock(&curseg
->curseg_mutex
);
2418 set_page_dirty(page
);
2419 f2fs_put_page(page
, 1);
2422 static int is_next_segment_free(struct f2fs_sb_info
*sbi
,
2423 struct curseg_info
*curseg
, int type
)
2425 unsigned int segno
= curseg
->segno
+ 1;
2426 struct free_segmap_info
*free_i
= FREE_I(sbi
);
2428 if (segno
< MAIN_SEGS(sbi
) && segno
% sbi
->segs_per_sec
)
2429 return !test_bit(segno
, free_i
->free_segmap
);
2434 * Find a new segment from the free segments bitmap to right order
2435 * This function should be returned with success, otherwise BUG
2437 static void get_new_segment(struct f2fs_sb_info
*sbi
,
2438 unsigned int *newseg
, bool new_sec
, int dir
)
2440 struct free_segmap_info
*free_i
= FREE_I(sbi
);
2441 unsigned int segno
, secno
, zoneno
;
2442 unsigned int total_zones
= MAIN_SECS(sbi
) / sbi
->secs_per_zone
;
2443 unsigned int hint
= GET_SEC_FROM_SEG(sbi
, *newseg
);
2444 unsigned int old_zoneno
= GET_ZONE_FROM_SEG(sbi
, *newseg
);
2445 unsigned int left_start
= hint
;
2450 spin_lock(&free_i
->segmap_lock
);
2452 if (!new_sec
&& ((*newseg
+ 1) % sbi
->segs_per_sec
)) {
2453 segno
= find_next_zero_bit(free_i
->free_segmap
,
2454 GET_SEG_FROM_SEC(sbi
, hint
+ 1), *newseg
+ 1);
2455 if (segno
< GET_SEG_FROM_SEC(sbi
, hint
+ 1))
2459 secno
= find_next_zero_bit(free_i
->free_secmap
, MAIN_SECS(sbi
), hint
);
2460 if (secno
>= MAIN_SECS(sbi
)) {
2461 if (dir
== ALLOC_RIGHT
) {
2462 secno
= find_next_zero_bit(free_i
->free_secmap
,
2464 f2fs_bug_on(sbi
, secno
>= MAIN_SECS(sbi
));
2467 left_start
= hint
- 1;
2473 while (test_bit(left_start
, free_i
->free_secmap
)) {
2474 if (left_start
> 0) {
2478 left_start
= find_next_zero_bit(free_i
->free_secmap
,
2480 f2fs_bug_on(sbi
, left_start
>= MAIN_SECS(sbi
));
2485 segno
= GET_SEG_FROM_SEC(sbi
, secno
);
2486 zoneno
= GET_ZONE_FROM_SEC(sbi
, secno
);
2488 /* give up on finding another zone */
2491 if (sbi
->secs_per_zone
== 1)
2493 if (zoneno
== old_zoneno
)
2495 if (dir
== ALLOC_LEFT
) {
2496 if (!go_left
&& zoneno
+ 1 >= total_zones
)
2498 if (go_left
&& zoneno
== 0)
2501 for (i
= 0; i
< NR_CURSEG_TYPE
; i
++)
2502 if (CURSEG_I(sbi
, i
)->zone
== zoneno
)
2505 if (i
< NR_CURSEG_TYPE
) {
2506 /* zone is in user, try another */
2508 hint
= zoneno
* sbi
->secs_per_zone
- 1;
2509 else if (zoneno
+ 1 >= total_zones
)
2512 hint
= (zoneno
+ 1) * sbi
->secs_per_zone
;
2514 goto find_other_zone
;
2517 /* set it as dirty segment in free segmap */
2518 f2fs_bug_on(sbi
, test_bit(segno
, free_i
->free_segmap
));
2519 __set_inuse(sbi
, segno
);
2521 spin_unlock(&free_i
->segmap_lock
);
2524 static void reset_curseg(struct f2fs_sb_info
*sbi
, int type
, int modified
)
2526 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2527 struct summary_footer
*sum_footer
;
2528 unsigned short seg_type
= curseg
->seg_type
;
2530 curseg
->inited
= true;
2531 curseg
->segno
= curseg
->next_segno
;
2532 curseg
->zone
= GET_ZONE_FROM_SEG(sbi
, curseg
->segno
);
2533 curseg
->next_blkoff
= 0;
2534 curseg
->next_segno
= NULL_SEGNO
;
2536 sum_footer
= &(curseg
->sum_blk
->footer
);
2537 memset(sum_footer
, 0, sizeof(struct summary_footer
));
2539 sanity_check_seg_type(sbi
, seg_type
);
2541 if (IS_DATASEG(seg_type
))
2542 SET_SUM_TYPE(sum_footer
, SUM_TYPE_DATA
);
2543 if (IS_NODESEG(seg_type
))
2544 SET_SUM_TYPE(sum_footer
, SUM_TYPE_NODE
);
2545 __set_sit_entry_type(sbi
, seg_type
, curseg
->segno
, modified
);
2548 static unsigned int __get_next_segno(struct f2fs_sb_info
*sbi
, int type
)
2550 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2551 unsigned short seg_type
= curseg
->seg_type
;
2553 sanity_check_seg_type(sbi
, seg_type
);
2555 /* if segs_per_sec is large than 1, we need to keep original policy. */
2556 if (__is_large_section(sbi
))
2557 return curseg
->segno
;
2559 /* inmem log may not locate on any segment after mount */
2560 if (!curseg
->inited
)
2563 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
2566 if (test_opt(sbi
, NOHEAP
) &&
2567 (seg_type
== CURSEG_HOT_DATA
|| IS_NODESEG(seg_type
)))
2570 if (SIT_I(sbi
)->last_victim
[ALLOC_NEXT
])
2571 return SIT_I(sbi
)->last_victim
[ALLOC_NEXT
];
2573 /* find segments from 0 to reuse freed segments */
2574 if (F2FS_OPTION(sbi
).alloc_mode
== ALLOC_MODE_REUSE
)
2577 return curseg
->segno
;
2581 * Allocate a current working segment.
2582 * This function always allocates a free segment in LFS manner.
2584 static void new_curseg(struct f2fs_sb_info
*sbi
, int type
, bool new_sec
)
2586 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2587 unsigned short seg_type
= curseg
->seg_type
;
2588 unsigned int segno
= curseg
->segno
;
2589 int dir
= ALLOC_LEFT
;
2592 write_sum_page(sbi
, curseg
->sum_blk
,
2593 GET_SUM_BLOCK(sbi
, segno
));
2594 if (seg_type
== CURSEG_WARM_DATA
|| seg_type
== CURSEG_COLD_DATA
)
2597 if (test_opt(sbi
, NOHEAP
))
2600 segno
= __get_next_segno(sbi
, type
);
2601 get_new_segment(sbi
, &segno
, new_sec
, dir
);
2602 curseg
->next_segno
= segno
;
2603 reset_curseg(sbi
, type
, 1);
2604 curseg
->alloc_type
= LFS
;
2607 static void __next_free_blkoff(struct f2fs_sb_info
*sbi
,
2608 struct curseg_info
*seg
, block_t start
)
2610 struct seg_entry
*se
= get_seg_entry(sbi
, seg
->segno
);
2611 int entries
= SIT_VBLOCK_MAP_SIZE
/ sizeof(unsigned long);
2612 unsigned long *target_map
= SIT_I(sbi
)->tmp_map
;
2613 unsigned long *ckpt_map
= (unsigned long *)se
->ckpt_valid_map
;
2614 unsigned long *cur_map
= (unsigned long *)se
->cur_valid_map
;
2617 for (i
= 0; i
< entries
; i
++)
2618 target_map
[i
] = ckpt_map
[i
] | cur_map
[i
];
2620 pos
= __find_rev_next_zero_bit(target_map
, sbi
->blocks_per_seg
, start
);
2622 seg
->next_blkoff
= pos
;
2626 * If a segment is written by LFS manner, next block offset is just obtained
2627 * by increasing the current block offset. However, if a segment is written by
2628 * SSR manner, next block offset obtained by calling __next_free_blkoff
2630 static void __refresh_next_blkoff(struct f2fs_sb_info
*sbi
,
2631 struct curseg_info
*seg
)
2633 if (seg
->alloc_type
== SSR
)
2634 __next_free_blkoff(sbi
, seg
, seg
->next_blkoff
+ 1);
2640 * This function always allocates a used segment(from dirty seglist) by SSR
2641 * manner, so it should recover the existing segment information of valid blocks
2643 static void change_curseg(struct f2fs_sb_info
*sbi
, int type
, bool flush
)
2645 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
2646 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2647 unsigned int new_segno
= curseg
->next_segno
;
2648 struct f2fs_summary_block
*sum_node
;
2649 struct page
*sum_page
;
2652 write_sum_page(sbi
, curseg
->sum_blk
,
2653 GET_SUM_BLOCK(sbi
, curseg
->segno
));
2655 __set_test_and_inuse(sbi
, new_segno
);
2657 mutex_lock(&dirty_i
->seglist_lock
);
2658 __remove_dirty_segment(sbi
, new_segno
, PRE
);
2659 __remove_dirty_segment(sbi
, new_segno
, DIRTY
);
2660 mutex_unlock(&dirty_i
->seglist_lock
);
2662 reset_curseg(sbi
, type
, 1);
2663 curseg
->alloc_type
= SSR
;
2664 __next_free_blkoff(sbi
, curseg
, 0);
2666 sum_page
= f2fs_get_sum_page(sbi
, new_segno
);
2667 if (IS_ERR(sum_page
)) {
2668 /* GC won't be able to use stale summary pages by cp_error */
2669 memset(curseg
->sum_blk
, 0, SUM_ENTRY_SIZE
);
2672 sum_node
= (struct f2fs_summary_block
*)page_address(sum_page
);
2673 memcpy(curseg
->sum_blk
, sum_node
, SUM_ENTRY_SIZE
);
2674 f2fs_put_page(sum_page
, 1);
2677 static int get_ssr_segment(struct f2fs_sb_info
*sbi
, int type
,
2678 int alloc_mode
, unsigned long long age
);
2680 static void get_atssr_segment(struct f2fs_sb_info
*sbi
, int type
,
2681 int target_type
, int alloc_mode
,
2682 unsigned long long age
)
2684 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2686 curseg
->seg_type
= target_type
;
2688 if (get_ssr_segment(sbi
, type
, alloc_mode
, age
)) {
2689 struct seg_entry
*se
= get_seg_entry(sbi
, curseg
->next_segno
);
2691 curseg
->seg_type
= se
->type
;
2692 change_curseg(sbi
, type
, true);
2694 /* allocate cold segment by default */
2695 curseg
->seg_type
= CURSEG_COLD_DATA
;
2696 new_curseg(sbi
, type
, true);
2698 stat_inc_seg_type(sbi
, curseg
);
2701 static void __f2fs_init_atgc_curseg(struct f2fs_sb_info
*sbi
)
2703 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_ALL_DATA_ATGC
);
2705 if (!sbi
->am
.atgc_enabled
)
2708 down_read(&SM_I(sbi
)->curseg_lock
);
2710 mutex_lock(&curseg
->curseg_mutex
);
2711 down_write(&SIT_I(sbi
)->sentry_lock
);
2713 get_atssr_segment(sbi
, CURSEG_ALL_DATA_ATGC
, CURSEG_COLD_DATA
, SSR
, 0);
2715 up_write(&SIT_I(sbi
)->sentry_lock
);
2716 mutex_unlock(&curseg
->curseg_mutex
);
2718 up_read(&SM_I(sbi
)->curseg_lock
);
2721 void f2fs_init_inmem_curseg(struct f2fs_sb_info
*sbi
)
2723 __f2fs_init_atgc_curseg(sbi
);
2726 static void __f2fs_save_inmem_curseg(struct f2fs_sb_info
*sbi
, int type
)
2728 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2730 mutex_lock(&curseg
->curseg_mutex
);
2731 if (!curseg
->inited
)
2734 if (get_valid_blocks(sbi
, curseg
->segno
, false)) {
2735 write_sum_page(sbi
, curseg
->sum_blk
,
2736 GET_SUM_BLOCK(sbi
, curseg
->segno
));
2738 mutex_lock(&DIRTY_I(sbi
)->seglist_lock
);
2739 __set_test_and_free(sbi
, curseg
->segno
, true);
2740 mutex_unlock(&DIRTY_I(sbi
)->seglist_lock
);
2743 mutex_unlock(&curseg
->curseg_mutex
);
2746 void f2fs_save_inmem_curseg(struct f2fs_sb_info
*sbi
)
2748 __f2fs_save_inmem_curseg(sbi
, CURSEG_COLD_DATA_PINNED
);
2750 if (sbi
->am
.atgc_enabled
)
2751 __f2fs_save_inmem_curseg(sbi
, CURSEG_ALL_DATA_ATGC
);
2754 static void __f2fs_restore_inmem_curseg(struct f2fs_sb_info
*sbi
, int type
)
2756 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2758 mutex_lock(&curseg
->curseg_mutex
);
2759 if (!curseg
->inited
)
2761 if (get_valid_blocks(sbi
, curseg
->segno
, false))
2764 mutex_lock(&DIRTY_I(sbi
)->seglist_lock
);
2765 __set_test_and_inuse(sbi
, curseg
->segno
);
2766 mutex_unlock(&DIRTY_I(sbi
)->seglist_lock
);
2768 mutex_unlock(&curseg
->curseg_mutex
);
2771 void f2fs_restore_inmem_curseg(struct f2fs_sb_info
*sbi
)
2773 __f2fs_restore_inmem_curseg(sbi
, CURSEG_COLD_DATA_PINNED
);
2775 if (sbi
->am
.atgc_enabled
)
2776 __f2fs_restore_inmem_curseg(sbi
, CURSEG_ALL_DATA_ATGC
);
2779 static int get_ssr_segment(struct f2fs_sb_info
*sbi
, int type
,
2780 int alloc_mode
, unsigned long long age
)
2782 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2783 const struct victim_selection
*v_ops
= DIRTY_I(sbi
)->v_ops
;
2784 unsigned segno
= NULL_SEGNO
;
2785 unsigned short seg_type
= curseg
->seg_type
;
2787 bool reversed
= false;
2789 sanity_check_seg_type(sbi
, seg_type
);
2791 /* f2fs_need_SSR() already forces to do this */
2792 if (!v_ops
->get_victim(sbi
, &segno
, BG_GC
, seg_type
, alloc_mode
, age
)) {
2793 curseg
->next_segno
= segno
;
2797 /* For node segments, let's do SSR more intensively */
2798 if (IS_NODESEG(seg_type
)) {
2799 if (seg_type
>= CURSEG_WARM_NODE
) {
2801 i
= CURSEG_COLD_NODE
;
2803 i
= CURSEG_HOT_NODE
;
2805 cnt
= NR_CURSEG_NODE_TYPE
;
2807 if (seg_type
>= CURSEG_WARM_DATA
) {
2809 i
= CURSEG_COLD_DATA
;
2811 i
= CURSEG_HOT_DATA
;
2813 cnt
= NR_CURSEG_DATA_TYPE
;
2816 for (; cnt
-- > 0; reversed
? i
-- : i
++) {
2819 if (!v_ops
->get_victim(sbi
, &segno
, BG_GC
, i
, alloc_mode
, age
)) {
2820 curseg
->next_segno
= segno
;
2825 /* find valid_blocks=0 in dirty list */
2826 if (unlikely(is_sbi_flag_set(sbi
, SBI_CP_DISABLED
))) {
2827 segno
= get_free_segment(sbi
);
2828 if (segno
!= NULL_SEGNO
) {
2829 curseg
->next_segno
= segno
;
2837 * flush out current segment and replace it with new segment
2838 * This function should be returned with success, otherwise BUG
2840 static void allocate_segment_by_default(struct f2fs_sb_info
*sbi
,
2841 int type
, bool force
)
2843 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2846 new_curseg(sbi
, type
, true);
2847 else if (!is_set_ckpt_flags(sbi
, CP_CRC_RECOVERY_FLAG
) &&
2848 curseg
->seg_type
== CURSEG_WARM_NODE
)
2849 new_curseg(sbi
, type
, false);
2850 else if (curseg
->alloc_type
== LFS
&&
2851 is_next_segment_free(sbi
, curseg
, type
) &&
2852 likely(!is_sbi_flag_set(sbi
, SBI_CP_DISABLED
)))
2853 new_curseg(sbi
, type
, false);
2854 else if (f2fs_need_SSR(sbi
) &&
2855 get_ssr_segment(sbi
, type
, SSR
, 0))
2856 change_curseg(sbi
, type
, true);
2858 new_curseg(sbi
, type
, false);
2860 stat_inc_seg_type(sbi
, curseg
);
2863 void f2fs_allocate_segment_for_resize(struct f2fs_sb_info
*sbi
, int type
,
2864 unsigned int start
, unsigned int end
)
2866 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2869 down_read(&SM_I(sbi
)->curseg_lock
);
2870 mutex_lock(&curseg
->curseg_mutex
);
2871 down_write(&SIT_I(sbi
)->sentry_lock
);
2873 segno
= CURSEG_I(sbi
, type
)->segno
;
2874 if (segno
< start
|| segno
> end
)
2877 if (f2fs_need_SSR(sbi
) && get_ssr_segment(sbi
, type
, SSR
, 0))
2878 change_curseg(sbi
, type
, true);
2880 new_curseg(sbi
, type
, true);
2882 stat_inc_seg_type(sbi
, curseg
);
2884 locate_dirty_segment(sbi
, segno
);
2886 up_write(&SIT_I(sbi
)->sentry_lock
);
2888 if (segno
!= curseg
->segno
)
2889 f2fs_notice(sbi
, "For resize: curseg of type %d: %u ==> %u",
2890 type
, segno
, curseg
->segno
);
2892 mutex_unlock(&curseg
->curseg_mutex
);
2893 up_read(&SM_I(sbi
)->curseg_lock
);
2896 static void __allocate_new_segment(struct f2fs_sb_info
*sbi
, int type
)
2898 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
2899 unsigned int old_segno
;
2901 if (!curseg
->inited
)
2904 if (!curseg
->next_blkoff
&&
2905 !get_valid_blocks(sbi
, curseg
->segno
, false) &&
2906 !get_ckpt_valid_blocks(sbi
, curseg
->segno
))
2910 old_segno
= curseg
->segno
;
2911 SIT_I(sbi
)->s_ops
->allocate_segment(sbi
, type
, true);
2912 locate_dirty_segment(sbi
, old_segno
);
2915 void f2fs_allocate_new_segment(struct f2fs_sb_info
*sbi
, int type
)
2917 down_write(&SIT_I(sbi
)->sentry_lock
);
2918 __allocate_new_segment(sbi
, type
);
2919 up_write(&SIT_I(sbi
)->sentry_lock
);
2922 void f2fs_allocate_new_segments(struct f2fs_sb_info
*sbi
)
2926 down_write(&SIT_I(sbi
)->sentry_lock
);
2927 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++)
2928 __allocate_new_segment(sbi
, i
);
2929 up_write(&SIT_I(sbi
)->sentry_lock
);
2932 static const struct segment_allocation default_salloc_ops
= {
2933 .allocate_segment
= allocate_segment_by_default
,
2936 bool f2fs_exist_trim_candidates(struct f2fs_sb_info
*sbi
,
2937 struct cp_control
*cpc
)
2939 __u64 trim_start
= cpc
->trim_start
;
2940 bool has_candidate
= false;
2942 down_write(&SIT_I(sbi
)->sentry_lock
);
2943 for (; cpc
->trim_start
<= cpc
->trim_end
; cpc
->trim_start
++) {
2944 if (add_discard_addrs(sbi
, cpc
, true)) {
2945 has_candidate
= true;
2949 up_write(&SIT_I(sbi
)->sentry_lock
);
2951 cpc
->trim_start
= trim_start
;
2952 return has_candidate
;
2955 static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info
*sbi
,
2956 struct discard_policy
*dpolicy
,
2957 unsigned int start
, unsigned int end
)
2959 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
2960 struct discard_cmd
*prev_dc
= NULL
, *next_dc
= NULL
;
2961 struct rb_node
**insert_p
= NULL
, *insert_parent
= NULL
;
2962 struct discard_cmd
*dc
;
2963 struct blk_plug plug
;
2965 unsigned int trimmed
= 0;
2970 mutex_lock(&dcc
->cmd_lock
);
2971 if (unlikely(dcc
->rbtree_check
))
2972 f2fs_bug_on(sbi
, !f2fs_check_rb_tree_consistence(sbi
,
2973 &dcc
->root
, false));
2975 dc
= (struct discard_cmd
*)f2fs_lookup_rb_tree_ret(&dcc
->root
,
2977 (struct rb_entry
**)&prev_dc
,
2978 (struct rb_entry
**)&next_dc
,
2979 &insert_p
, &insert_parent
, true, NULL
);
2983 blk_start_plug(&plug
);
2985 while (dc
&& dc
->lstart
<= end
) {
2986 struct rb_node
*node
;
2989 if (dc
->len
< dpolicy
->granularity
)
2992 if (dc
->state
!= D_PREP
) {
2993 list_move_tail(&dc
->list
, &dcc
->fstrim_list
);
2997 err
= __submit_discard_cmd(sbi
, dpolicy
, dc
, &issued
);
2999 if (issued
>= dpolicy
->max_requests
) {
3000 start
= dc
->lstart
+ dc
->len
;
3003 __remove_discard_cmd(sbi
, dc
);
3005 blk_finish_plug(&plug
);
3006 mutex_unlock(&dcc
->cmd_lock
);
3007 trimmed
+= __wait_all_discard_cmd(sbi
, NULL
);
3008 congestion_wait(BLK_RW_ASYNC
, DEFAULT_IO_TIMEOUT
);
3012 node
= rb_next(&dc
->rb_node
);
3014 __remove_discard_cmd(sbi
, dc
);
3015 dc
= rb_entry_safe(node
, struct discard_cmd
, rb_node
);
3017 if (fatal_signal_pending(current
))
3021 blk_finish_plug(&plug
);
3022 mutex_unlock(&dcc
->cmd_lock
);
3027 int f2fs_trim_fs(struct f2fs_sb_info
*sbi
, struct fstrim_range
*range
)
3029 __u64 start
= F2FS_BYTES_TO_BLK(range
->start
);
3030 __u64 end
= start
+ F2FS_BYTES_TO_BLK(range
->len
) - 1;
3031 unsigned int start_segno
, end_segno
;
3032 block_t start_block
, end_block
;
3033 struct cp_control cpc
;
3034 struct discard_policy dpolicy
;
3035 unsigned long long trimmed
= 0;
3037 bool need_align
= f2fs_lfs_mode(sbi
) && __is_large_section(sbi
);
3039 if (start
>= MAX_BLKADDR(sbi
) || range
->len
< sbi
->blocksize
)
3042 if (end
< MAIN_BLKADDR(sbi
))
3045 if (is_sbi_flag_set(sbi
, SBI_NEED_FSCK
)) {
3046 f2fs_warn(sbi
, "Found FS corruption, run fsck to fix.");
3047 return -EFSCORRUPTED
;
3050 /* start/end segment number in main_area */
3051 start_segno
= (start
<= MAIN_BLKADDR(sbi
)) ? 0 : GET_SEGNO(sbi
, start
);
3052 end_segno
= (end
>= MAX_BLKADDR(sbi
)) ? MAIN_SEGS(sbi
) - 1 :
3053 GET_SEGNO(sbi
, end
);
3055 start_segno
= rounddown(start_segno
, sbi
->segs_per_sec
);
3056 end_segno
= roundup(end_segno
+ 1, sbi
->segs_per_sec
) - 1;
3059 cpc
.reason
= CP_DISCARD
;
3060 cpc
.trim_minlen
= max_t(__u64
, 1, F2FS_BYTES_TO_BLK(range
->minlen
));
3061 cpc
.trim_start
= start_segno
;
3062 cpc
.trim_end
= end_segno
;
3064 if (sbi
->discard_blks
== 0)
3067 down_write(&sbi
->gc_lock
);
3068 err
= f2fs_write_checkpoint(sbi
, &cpc
);
3069 up_write(&sbi
->gc_lock
);
3074 * We filed discard candidates, but actually we don't need to wait for
3075 * all of them, since they'll be issued in idle time along with runtime
3076 * discard option. User configuration looks like using runtime discard
3077 * or periodic fstrim instead of it.
3079 if (f2fs_realtime_discard_enable(sbi
))
3082 start_block
= START_BLOCK(sbi
, start_segno
);
3083 end_block
= START_BLOCK(sbi
, end_segno
+ 1);
3085 __init_discard_policy(sbi
, &dpolicy
, DPOLICY_FSTRIM
, cpc
.trim_minlen
);
3086 trimmed
= __issue_discard_cmd_range(sbi
, &dpolicy
,
3087 start_block
, end_block
);
3089 trimmed
+= __wait_discard_cmd_range(sbi
, &dpolicy
,
3090 start_block
, end_block
);
3093 range
->len
= F2FS_BLK_TO_BYTES(trimmed
);
3097 static bool __has_curseg_space(struct f2fs_sb_info
*sbi
,
3098 struct curseg_info
*curseg
)
3100 return curseg
->next_blkoff
< f2fs_usable_blks_in_seg(sbi
,
3104 int f2fs_rw_hint_to_seg_type(enum rw_hint hint
)
3107 case WRITE_LIFE_SHORT
:
3108 return CURSEG_HOT_DATA
;
3109 case WRITE_LIFE_EXTREME
:
3110 return CURSEG_COLD_DATA
;
3112 return CURSEG_WARM_DATA
;
3116 /* This returns write hints for each segment type. This hints will be
3117 * passed down to block layer. There are mapping tables which depend on
3118 * the mount option 'whint_mode'.
3120 * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
3122 * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
3126 * META WRITE_LIFE_NOT_SET
3130 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
3131 * extension list " "
3134 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
3135 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
3136 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
3137 * WRITE_LIFE_NONE " "
3138 * WRITE_LIFE_MEDIUM " "
3139 * WRITE_LIFE_LONG " "
3142 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
3143 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
3144 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
3145 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
3146 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
3147 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
3149 * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
3153 * META WRITE_LIFE_MEDIUM;
3154 * HOT_NODE WRITE_LIFE_NOT_SET
3156 * COLD_NODE WRITE_LIFE_NONE
3157 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
3158 * extension list " "
3161 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
3162 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
3163 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG
3164 * WRITE_LIFE_NONE " "
3165 * WRITE_LIFE_MEDIUM " "
3166 * WRITE_LIFE_LONG " "
3169 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
3170 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
3171 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
3172 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
3173 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
3174 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
3177 enum rw_hint
f2fs_io_type_to_rw_hint(struct f2fs_sb_info
*sbi
,
3178 enum page_type type
, enum temp_type temp
)
3180 if (F2FS_OPTION(sbi
).whint_mode
== WHINT_MODE_USER
) {
3183 return WRITE_LIFE_NOT_SET
;
3184 else if (temp
== HOT
)
3185 return WRITE_LIFE_SHORT
;
3186 else if (temp
== COLD
)
3187 return WRITE_LIFE_EXTREME
;
3189 return WRITE_LIFE_NOT_SET
;
3191 } else if (F2FS_OPTION(sbi
).whint_mode
== WHINT_MODE_FS
) {
3194 return WRITE_LIFE_LONG
;
3195 else if (temp
== HOT
)
3196 return WRITE_LIFE_SHORT
;
3197 else if (temp
== COLD
)
3198 return WRITE_LIFE_EXTREME
;
3199 } else if (type
== NODE
) {
3200 if (temp
== WARM
|| temp
== HOT
)
3201 return WRITE_LIFE_NOT_SET
;
3202 else if (temp
== COLD
)
3203 return WRITE_LIFE_NONE
;
3204 } else if (type
== META
) {
3205 return WRITE_LIFE_MEDIUM
;
3208 return WRITE_LIFE_NOT_SET
;
3211 static int __get_segment_type_2(struct f2fs_io_info
*fio
)
3213 if (fio
->type
== DATA
)
3214 return CURSEG_HOT_DATA
;
3216 return CURSEG_HOT_NODE
;
3219 static int __get_segment_type_4(struct f2fs_io_info
*fio
)
3221 if (fio
->type
== DATA
) {
3222 struct inode
*inode
= fio
->page
->mapping
->host
;
3224 if (S_ISDIR(inode
->i_mode
))
3225 return CURSEG_HOT_DATA
;
3227 return CURSEG_COLD_DATA
;
3229 if (IS_DNODE(fio
->page
) && is_cold_node(fio
->page
))
3230 return CURSEG_WARM_NODE
;
3232 return CURSEG_COLD_NODE
;
3236 static int __get_segment_type_6(struct f2fs_io_info
*fio
)
3238 if (fio
->type
== DATA
) {
3239 struct inode
*inode
= fio
->page
->mapping
->host
;
3241 if (is_cold_data(fio
->page
)) {
3242 if (fio
->sbi
->am
.atgc_enabled
)
3243 return CURSEG_ALL_DATA_ATGC
;
3245 return CURSEG_COLD_DATA
;
3247 if (file_is_cold(inode
) || f2fs_need_compress_data(inode
))
3248 return CURSEG_COLD_DATA
;
3249 if (file_is_hot(inode
) ||
3250 is_inode_flag_set(inode
, FI_HOT_DATA
) ||
3251 f2fs_is_atomic_file(inode
) ||
3252 f2fs_is_volatile_file(inode
))
3253 return CURSEG_HOT_DATA
;
3254 return f2fs_rw_hint_to_seg_type(inode
->i_write_hint
);
3256 if (IS_DNODE(fio
->page
))
3257 return is_cold_node(fio
->page
) ? CURSEG_WARM_NODE
:
3259 return CURSEG_COLD_NODE
;
3263 static int __get_segment_type(struct f2fs_io_info
*fio
)
3267 switch (F2FS_OPTION(fio
->sbi
).active_logs
) {
3269 type
= __get_segment_type_2(fio
);
3272 type
= __get_segment_type_4(fio
);
3275 type
= __get_segment_type_6(fio
);
3278 f2fs_bug_on(fio
->sbi
, true);
3283 else if (IS_WARM(type
))
3290 void f2fs_allocate_data_block(struct f2fs_sb_info
*sbi
, struct page
*page
,
3291 block_t old_blkaddr
, block_t
*new_blkaddr
,
3292 struct f2fs_summary
*sum
, int type
,
3293 struct f2fs_io_info
*fio
)
3295 struct sit_info
*sit_i
= SIT_I(sbi
);
3296 struct curseg_info
*curseg
= CURSEG_I(sbi
, type
);
3297 unsigned long long old_mtime
;
3298 bool from_gc
= (type
== CURSEG_ALL_DATA_ATGC
);
3299 struct seg_entry
*se
= NULL
;
3301 down_read(&SM_I(sbi
)->curseg_lock
);
3303 mutex_lock(&curseg
->curseg_mutex
);
3304 down_write(&sit_i
->sentry_lock
);
3307 f2fs_bug_on(sbi
, GET_SEGNO(sbi
, old_blkaddr
) == NULL_SEGNO
);
3308 se
= get_seg_entry(sbi
, GET_SEGNO(sbi
, old_blkaddr
));
3309 sanity_check_seg_type(sbi
, se
->type
);
3310 f2fs_bug_on(sbi
, IS_NODESEG(se
->type
));
3312 *new_blkaddr
= NEXT_FREE_BLKADDR(sbi
, curseg
);
3314 f2fs_bug_on(sbi
, curseg
->next_blkoff
>= sbi
->blocks_per_seg
);
3316 f2fs_wait_discard_bio(sbi
, *new_blkaddr
);
3319 * __add_sum_entry should be resided under the curseg_mutex
3320 * because, this function updates a summary entry in the
3321 * current summary block.
3323 __add_sum_entry(sbi
, type
, sum
);
3325 __refresh_next_blkoff(sbi
, curseg
);
3327 stat_inc_block_count(sbi
, curseg
);
3330 old_mtime
= get_segment_mtime(sbi
, old_blkaddr
);
3332 update_segment_mtime(sbi
, old_blkaddr
, 0);
3335 update_segment_mtime(sbi
, *new_blkaddr
, old_mtime
);
3338 * SIT information should be updated before segment allocation,
3339 * since SSR needs latest valid block information.
3341 update_sit_entry(sbi
, *new_blkaddr
, 1);
3342 if (GET_SEGNO(sbi
, old_blkaddr
) != NULL_SEGNO
)
3343 update_sit_entry(sbi
, old_blkaddr
, -1);
3345 if (!__has_curseg_space(sbi
, curseg
)) {
3347 get_atssr_segment(sbi
, type
, se
->type
,
3350 sit_i
->s_ops
->allocate_segment(sbi
, type
, false);
3353 * segment dirty status should be updated after segment allocation,
3354 * so we just need to update status only one time after previous
3355 * segment being closed.
3357 locate_dirty_segment(sbi
, GET_SEGNO(sbi
, old_blkaddr
));
3358 locate_dirty_segment(sbi
, GET_SEGNO(sbi
, *new_blkaddr
));
3360 up_write(&sit_i
->sentry_lock
);
3362 if (page
&& IS_NODESEG(type
)) {
3363 fill_node_footer_blkaddr(page
, NEXT_FREE_BLKADDR(sbi
, curseg
));
3365 f2fs_inode_chksum_set(sbi
, page
);
3368 if (F2FS_IO_ALIGNED(sbi
))
3372 struct f2fs_bio_info
*io
;
3374 INIT_LIST_HEAD(&fio
->list
);
3375 fio
->in_list
= true;
3376 io
= sbi
->write_io
[fio
->type
] + fio
->temp
;
3377 spin_lock(&io
->io_lock
);
3378 list_add_tail(&fio
->list
, &io
->io_list
);
3379 spin_unlock(&io
->io_lock
);
3382 mutex_unlock(&curseg
->curseg_mutex
);
3384 up_read(&SM_I(sbi
)->curseg_lock
);
3387 static void update_device_state(struct f2fs_io_info
*fio
)
3389 struct f2fs_sb_info
*sbi
= fio
->sbi
;
3390 unsigned int devidx
;
3392 if (!f2fs_is_multi_device(sbi
))
3395 devidx
= f2fs_target_device_index(sbi
, fio
->new_blkaddr
);
3397 /* update device state for fsync */
3398 f2fs_set_dirty_device(sbi
, fio
->ino
, devidx
, FLUSH_INO
);
3400 /* update device state for checkpoint */
3401 if (!f2fs_test_bit(devidx
, (char *)&sbi
->dirty_device
)) {
3402 spin_lock(&sbi
->dev_lock
);
3403 f2fs_set_bit(devidx
, (char *)&sbi
->dirty_device
);
3404 spin_unlock(&sbi
->dev_lock
);
3408 static void do_write_page(struct f2fs_summary
*sum
, struct f2fs_io_info
*fio
)
3410 int type
= __get_segment_type(fio
);
3411 bool keep_order
= (f2fs_lfs_mode(fio
->sbi
) && type
== CURSEG_COLD_DATA
);
3414 down_read(&fio
->sbi
->io_order_lock
);
3416 f2fs_allocate_data_block(fio
->sbi
, fio
->page
, fio
->old_blkaddr
,
3417 &fio
->new_blkaddr
, sum
, type
, fio
);
3418 if (GET_SEGNO(fio
->sbi
, fio
->old_blkaddr
) != NULL_SEGNO
)
3419 invalidate_mapping_pages(META_MAPPING(fio
->sbi
),
3420 fio
->old_blkaddr
, fio
->old_blkaddr
);
3422 /* writeout dirty page into bdev */
3423 f2fs_submit_page_write(fio
);
3425 fio
->old_blkaddr
= fio
->new_blkaddr
;
3429 update_device_state(fio
);
3432 up_read(&fio
->sbi
->io_order_lock
);
3435 void f2fs_do_write_meta_page(struct f2fs_sb_info
*sbi
, struct page
*page
,
3436 enum iostat_type io_type
)
3438 struct f2fs_io_info fio
= {
3443 .op_flags
= REQ_SYNC
| REQ_META
| REQ_PRIO
,
3444 .old_blkaddr
= page
->index
,
3445 .new_blkaddr
= page
->index
,
3447 .encrypted_page
= NULL
,
3451 if (unlikely(page
->index
>= MAIN_BLKADDR(sbi
)))
3452 fio
.op_flags
&= ~REQ_META
;
3454 set_page_writeback(page
);
3455 ClearPageError(page
);
3456 f2fs_submit_page_write(&fio
);
3458 stat_inc_meta_count(sbi
, page
->index
);
3459 f2fs_update_iostat(sbi
, io_type
, F2FS_BLKSIZE
);
3462 void f2fs_do_write_node_page(unsigned int nid
, struct f2fs_io_info
*fio
)
3464 struct f2fs_summary sum
;
3466 set_summary(&sum
, nid
, 0, 0);
3467 do_write_page(&sum
, fio
);
3469 f2fs_update_iostat(fio
->sbi
, fio
->io_type
, F2FS_BLKSIZE
);
3472 void f2fs_outplace_write_data(struct dnode_of_data
*dn
,
3473 struct f2fs_io_info
*fio
)
3475 struct f2fs_sb_info
*sbi
= fio
->sbi
;
3476 struct f2fs_summary sum
;
3478 f2fs_bug_on(sbi
, dn
->data_blkaddr
== NULL_ADDR
);
3479 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, fio
->version
);
3480 do_write_page(&sum
, fio
);
3481 f2fs_update_data_blkaddr(dn
, fio
->new_blkaddr
);
3483 f2fs_update_iostat(sbi
, fio
->io_type
, F2FS_BLKSIZE
);
3486 int f2fs_inplace_write_data(struct f2fs_io_info
*fio
)
3489 struct f2fs_sb_info
*sbi
= fio
->sbi
;
3492 fio
->new_blkaddr
= fio
->old_blkaddr
;
3493 /* i/o temperature is needed for passing down write hints */
3494 __get_segment_type(fio
);
3496 segno
= GET_SEGNO(sbi
, fio
->new_blkaddr
);
3498 if (!IS_DATASEG(get_seg_entry(sbi
, segno
)->type
)) {
3499 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
3500 f2fs_warn(sbi
, "%s: incorrect segment(%u) type, run fsck to fix.",
3502 return -EFSCORRUPTED
;
3505 stat_inc_inplace_blocks(fio
->sbi
);
3507 if (fio
->bio
&& !(SM_I(sbi
)->ipu_policy
& (1 << F2FS_IPU_NOCACHE
)))
3508 err
= f2fs_merge_page_bio(fio
);
3510 err
= f2fs_submit_page_bio(fio
);
3512 update_device_state(fio
);
3513 f2fs_update_iostat(fio
->sbi
, fio
->io_type
, F2FS_BLKSIZE
);
3519 static inline int __f2fs_get_curseg(struct f2fs_sb_info
*sbi
,
3524 for (i
= CURSEG_HOT_DATA
; i
< NO_CHECK_TYPE
; i
++) {
3525 if (CURSEG_I(sbi
, i
)->segno
== segno
)
3531 void f2fs_do_replace_block(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
3532 block_t old_blkaddr
, block_t new_blkaddr
,
3533 bool recover_curseg
, bool recover_newaddr
,
3536 struct sit_info
*sit_i
= SIT_I(sbi
);
3537 struct curseg_info
*curseg
;
3538 unsigned int segno
, old_cursegno
;
3539 struct seg_entry
*se
;
3541 unsigned short old_blkoff
;
3543 segno
= GET_SEGNO(sbi
, new_blkaddr
);
3544 se
= get_seg_entry(sbi
, segno
);
3547 down_write(&SM_I(sbi
)->curseg_lock
);
3549 if (!recover_curseg
) {
3550 /* for recovery flow */
3551 if (se
->valid_blocks
== 0 && !IS_CURSEG(sbi
, segno
)) {
3552 if (old_blkaddr
== NULL_ADDR
)
3553 type
= CURSEG_COLD_DATA
;
3555 type
= CURSEG_WARM_DATA
;
3558 if (IS_CURSEG(sbi
, segno
)) {
3559 /* se->type is volatile as SSR allocation */
3560 type
= __f2fs_get_curseg(sbi
, segno
);
3561 f2fs_bug_on(sbi
, type
== NO_CHECK_TYPE
);
3563 type
= CURSEG_WARM_DATA
;
3567 f2fs_bug_on(sbi
, !IS_DATASEG(type
));
3568 curseg
= CURSEG_I(sbi
, type
);
3570 mutex_lock(&curseg
->curseg_mutex
);
3571 down_write(&sit_i
->sentry_lock
);
3573 old_cursegno
= curseg
->segno
;
3574 old_blkoff
= curseg
->next_blkoff
;
3576 /* change the current segment */
3577 if (segno
!= curseg
->segno
) {
3578 curseg
->next_segno
= segno
;
3579 change_curseg(sbi
, type
, true);
3582 curseg
->next_blkoff
= GET_BLKOFF_FROM_SEG0(sbi
, new_blkaddr
);
3583 __add_sum_entry(sbi
, type
, sum
);
3585 if (!recover_curseg
|| recover_newaddr
) {
3587 update_segment_mtime(sbi
, new_blkaddr
, 0);
3588 update_sit_entry(sbi
, new_blkaddr
, 1);
3590 if (GET_SEGNO(sbi
, old_blkaddr
) != NULL_SEGNO
) {
3591 invalidate_mapping_pages(META_MAPPING(sbi
),
3592 old_blkaddr
, old_blkaddr
);
3594 update_segment_mtime(sbi
, old_blkaddr
, 0);
3595 update_sit_entry(sbi
, old_blkaddr
, -1);
3598 locate_dirty_segment(sbi
, GET_SEGNO(sbi
, old_blkaddr
));
3599 locate_dirty_segment(sbi
, GET_SEGNO(sbi
, new_blkaddr
));
3601 locate_dirty_segment(sbi
, old_cursegno
);
3603 if (recover_curseg
) {
3604 if (old_cursegno
!= curseg
->segno
) {
3605 curseg
->next_segno
= old_cursegno
;
3606 change_curseg(sbi
, type
, true);
3608 curseg
->next_blkoff
= old_blkoff
;
3611 up_write(&sit_i
->sentry_lock
);
3612 mutex_unlock(&curseg
->curseg_mutex
);
3613 up_write(&SM_I(sbi
)->curseg_lock
);
3616 void f2fs_replace_block(struct f2fs_sb_info
*sbi
, struct dnode_of_data
*dn
,
3617 block_t old_addr
, block_t new_addr
,
3618 unsigned char version
, bool recover_curseg
,
3619 bool recover_newaddr
)
3621 struct f2fs_summary sum
;
3623 set_summary(&sum
, dn
->nid
, dn
->ofs_in_node
, version
);
3625 f2fs_do_replace_block(sbi
, &sum
, old_addr
, new_addr
,
3626 recover_curseg
, recover_newaddr
, false);
3628 f2fs_update_data_blkaddr(dn
, new_addr
);
3631 void f2fs_wait_on_page_writeback(struct page
*page
,
3632 enum page_type type
, bool ordered
, bool locked
)
3634 if (PageWriteback(page
)) {
3635 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
3637 /* submit cached LFS IO */
3638 f2fs_submit_merged_write_cond(sbi
, NULL
, page
, 0, type
);
3639 /* sbumit cached IPU IO */
3640 f2fs_submit_merged_ipu_write(sbi
, NULL
, page
);
3642 wait_on_page_writeback(page
);
3643 f2fs_bug_on(sbi
, locked
&& PageWriteback(page
));
3645 wait_for_stable_page(page
);
3650 void f2fs_wait_on_block_writeback(struct inode
*inode
, block_t blkaddr
)
3652 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
3655 if (!f2fs_post_read_required(inode
))
3658 if (!__is_valid_data_blkaddr(blkaddr
))
3661 cpage
= find_lock_page(META_MAPPING(sbi
), blkaddr
);
3663 f2fs_wait_on_page_writeback(cpage
, DATA
, true, true);
3664 f2fs_put_page(cpage
, 1);
3668 void f2fs_wait_on_block_writeback_range(struct inode
*inode
, block_t blkaddr
,
3673 for (i
= 0; i
< len
; i
++)
3674 f2fs_wait_on_block_writeback(inode
, blkaddr
+ i
);
3677 static int read_compacted_summaries(struct f2fs_sb_info
*sbi
)
3679 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
3680 struct curseg_info
*seg_i
;
3681 unsigned char *kaddr
;
3686 start
= start_sum_block(sbi
);
3688 page
= f2fs_get_meta_page(sbi
, start
++);
3690 return PTR_ERR(page
);
3691 kaddr
= (unsigned char *)page_address(page
);
3693 /* Step 1: restore nat cache */
3694 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
3695 memcpy(seg_i
->journal
, kaddr
, SUM_JOURNAL_SIZE
);
3697 /* Step 2: restore sit cache */
3698 seg_i
= CURSEG_I(sbi
, CURSEG_COLD_DATA
);
3699 memcpy(seg_i
->journal
, kaddr
+ SUM_JOURNAL_SIZE
, SUM_JOURNAL_SIZE
);
3700 offset
= 2 * SUM_JOURNAL_SIZE
;
3702 /* Step 3: restore summary entries */
3703 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
3704 unsigned short blk_off
;
3707 seg_i
= CURSEG_I(sbi
, i
);
3708 segno
= le32_to_cpu(ckpt
->cur_data_segno
[i
]);
3709 blk_off
= le16_to_cpu(ckpt
->cur_data_blkoff
[i
]);
3710 seg_i
->next_segno
= segno
;
3711 reset_curseg(sbi
, i
, 0);
3712 seg_i
->alloc_type
= ckpt
->alloc_type
[i
];
3713 seg_i
->next_blkoff
= blk_off
;
3715 if (seg_i
->alloc_type
== SSR
)
3716 blk_off
= sbi
->blocks_per_seg
;
3718 for (j
= 0; j
< blk_off
; j
++) {
3719 struct f2fs_summary
*s
;
3720 s
= (struct f2fs_summary
*)(kaddr
+ offset
);
3721 seg_i
->sum_blk
->entries
[j
] = *s
;
3722 offset
+= SUMMARY_SIZE
;
3723 if (offset
+ SUMMARY_SIZE
<= PAGE_SIZE
-
3727 f2fs_put_page(page
, 1);
3730 page
= f2fs_get_meta_page(sbi
, start
++);
3732 return PTR_ERR(page
);
3733 kaddr
= (unsigned char *)page_address(page
);
3737 f2fs_put_page(page
, 1);
3741 static int read_normal_summaries(struct f2fs_sb_info
*sbi
, int type
)
3743 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
3744 struct f2fs_summary_block
*sum
;
3745 struct curseg_info
*curseg
;
3747 unsigned short blk_off
;
3748 unsigned int segno
= 0;
3749 block_t blk_addr
= 0;
3752 /* get segment number and block addr */
3753 if (IS_DATASEG(type
)) {
3754 segno
= le32_to_cpu(ckpt
->cur_data_segno
[type
]);
3755 blk_off
= le16_to_cpu(ckpt
->cur_data_blkoff
[type
-
3757 if (__exist_node_summaries(sbi
))
3758 blk_addr
= sum_blk_addr(sbi
, NR_CURSEG_PERSIST_TYPE
, type
);
3760 blk_addr
= sum_blk_addr(sbi
, NR_CURSEG_DATA_TYPE
, type
);
3762 segno
= le32_to_cpu(ckpt
->cur_node_segno
[type
-
3764 blk_off
= le16_to_cpu(ckpt
->cur_node_blkoff
[type
-
3766 if (__exist_node_summaries(sbi
))
3767 blk_addr
= sum_blk_addr(sbi
, NR_CURSEG_NODE_TYPE
,
3768 type
- CURSEG_HOT_NODE
);
3770 blk_addr
= GET_SUM_BLOCK(sbi
, segno
);
3773 new = f2fs_get_meta_page(sbi
, blk_addr
);
3775 return PTR_ERR(new);
3776 sum
= (struct f2fs_summary_block
*)page_address(new);
3778 if (IS_NODESEG(type
)) {
3779 if (__exist_node_summaries(sbi
)) {
3780 struct f2fs_summary
*ns
= &sum
->entries
[0];
3782 for (i
= 0; i
< sbi
->blocks_per_seg
; i
++, ns
++) {
3784 ns
->ofs_in_node
= 0;
3787 err
= f2fs_restore_node_summary(sbi
, segno
, sum
);
3793 /* set uncompleted segment to curseg */
3794 curseg
= CURSEG_I(sbi
, type
);
3795 mutex_lock(&curseg
->curseg_mutex
);
3797 /* update journal info */
3798 down_write(&curseg
->journal_rwsem
);
3799 memcpy(curseg
->journal
, &sum
->journal
, SUM_JOURNAL_SIZE
);
3800 up_write(&curseg
->journal_rwsem
);
3802 memcpy(curseg
->sum_blk
->entries
, sum
->entries
, SUM_ENTRY_SIZE
);
3803 memcpy(&curseg
->sum_blk
->footer
, &sum
->footer
, SUM_FOOTER_SIZE
);
3804 curseg
->next_segno
= segno
;
3805 reset_curseg(sbi
, type
, 0);
3806 curseg
->alloc_type
= ckpt
->alloc_type
[type
];
3807 curseg
->next_blkoff
= blk_off
;
3808 mutex_unlock(&curseg
->curseg_mutex
);
3810 f2fs_put_page(new, 1);
3814 static int restore_curseg_summaries(struct f2fs_sb_info
*sbi
)
3816 struct f2fs_journal
*sit_j
= CURSEG_I(sbi
, CURSEG_COLD_DATA
)->journal
;
3817 struct f2fs_journal
*nat_j
= CURSEG_I(sbi
, CURSEG_HOT_DATA
)->journal
;
3818 int type
= CURSEG_HOT_DATA
;
3821 if (is_set_ckpt_flags(sbi
, CP_COMPACT_SUM_FLAG
)) {
3822 int npages
= f2fs_npages_for_summary_flush(sbi
, true);
3825 f2fs_ra_meta_pages(sbi
, start_sum_block(sbi
), npages
,
3828 /* restore for compacted data summary */
3829 err
= read_compacted_summaries(sbi
);
3832 type
= CURSEG_HOT_NODE
;
3835 if (__exist_node_summaries(sbi
))
3836 f2fs_ra_meta_pages(sbi
,
3837 sum_blk_addr(sbi
, NR_CURSEG_PERSIST_TYPE
, type
),
3838 NR_CURSEG_PERSIST_TYPE
- type
, META_CP
, true);
3840 for (; type
<= CURSEG_COLD_NODE
; type
++) {
3841 err
= read_normal_summaries(sbi
, type
);
3846 /* sanity check for summary blocks */
3847 if (nats_in_cursum(nat_j
) > NAT_JOURNAL_ENTRIES
||
3848 sits_in_cursum(sit_j
) > SIT_JOURNAL_ENTRIES
) {
3849 f2fs_err(sbi
, "invalid journal entries nats %u sits %u\n",
3850 nats_in_cursum(nat_j
), sits_in_cursum(sit_j
));
3857 static void write_compacted_summaries(struct f2fs_sb_info
*sbi
, block_t blkaddr
)
3860 unsigned char *kaddr
;
3861 struct f2fs_summary
*summary
;
3862 struct curseg_info
*seg_i
;
3863 int written_size
= 0;
3866 page
= f2fs_grab_meta_page(sbi
, blkaddr
++);
3867 kaddr
= (unsigned char *)page_address(page
);
3868 memset(kaddr
, 0, PAGE_SIZE
);
3870 /* Step 1: write nat cache */
3871 seg_i
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
3872 memcpy(kaddr
, seg_i
->journal
, SUM_JOURNAL_SIZE
);
3873 written_size
+= SUM_JOURNAL_SIZE
;
3875 /* Step 2: write sit cache */
3876 seg_i
= CURSEG_I(sbi
, CURSEG_COLD_DATA
);
3877 memcpy(kaddr
+ written_size
, seg_i
->journal
, SUM_JOURNAL_SIZE
);
3878 written_size
+= SUM_JOURNAL_SIZE
;
3880 /* Step 3: write summary entries */
3881 for (i
= CURSEG_HOT_DATA
; i
<= CURSEG_COLD_DATA
; i
++) {
3882 unsigned short blkoff
;
3883 seg_i
= CURSEG_I(sbi
, i
);
3884 if (sbi
->ckpt
->alloc_type
[i
] == SSR
)
3885 blkoff
= sbi
->blocks_per_seg
;
3887 blkoff
= curseg_blkoff(sbi
, i
);
3889 for (j
= 0; j
< blkoff
; j
++) {
3891 page
= f2fs_grab_meta_page(sbi
, blkaddr
++);
3892 kaddr
= (unsigned char *)page_address(page
);
3893 memset(kaddr
, 0, PAGE_SIZE
);
3896 summary
= (struct f2fs_summary
*)(kaddr
+ written_size
);
3897 *summary
= seg_i
->sum_blk
->entries
[j
];
3898 written_size
+= SUMMARY_SIZE
;
3900 if (written_size
+ SUMMARY_SIZE
<= PAGE_SIZE
-
3904 set_page_dirty(page
);
3905 f2fs_put_page(page
, 1);
3910 set_page_dirty(page
);
3911 f2fs_put_page(page
, 1);
3915 static void write_normal_summaries(struct f2fs_sb_info
*sbi
,
3916 block_t blkaddr
, int type
)
3919 if (IS_DATASEG(type
))
3920 end
= type
+ NR_CURSEG_DATA_TYPE
;
3922 end
= type
+ NR_CURSEG_NODE_TYPE
;
3924 for (i
= type
; i
< end
; i
++)
3925 write_current_sum_page(sbi
, i
, blkaddr
+ (i
- type
));
3928 void f2fs_write_data_summaries(struct f2fs_sb_info
*sbi
, block_t start_blk
)
3930 if (is_set_ckpt_flags(sbi
, CP_COMPACT_SUM_FLAG
))
3931 write_compacted_summaries(sbi
, start_blk
);
3933 write_normal_summaries(sbi
, start_blk
, CURSEG_HOT_DATA
);
3936 void f2fs_write_node_summaries(struct f2fs_sb_info
*sbi
, block_t start_blk
)
3938 write_normal_summaries(sbi
, start_blk
, CURSEG_HOT_NODE
);
3941 int f2fs_lookup_journal_in_cursum(struct f2fs_journal
*journal
, int type
,
3942 unsigned int val
, int alloc
)
3946 if (type
== NAT_JOURNAL
) {
3947 for (i
= 0; i
< nats_in_cursum(journal
); i
++) {
3948 if (le32_to_cpu(nid_in_journal(journal
, i
)) == val
)
3951 if (alloc
&& __has_cursum_space(journal
, 1, NAT_JOURNAL
))
3952 return update_nats_in_cursum(journal
, 1);
3953 } else if (type
== SIT_JOURNAL
) {
3954 for (i
= 0; i
< sits_in_cursum(journal
); i
++)
3955 if (le32_to_cpu(segno_in_journal(journal
, i
)) == val
)
3957 if (alloc
&& __has_cursum_space(journal
, 1, SIT_JOURNAL
))
3958 return update_sits_in_cursum(journal
, 1);
3963 static struct page
*get_current_sit_page(struct f2fs_sb_info
*sbi
,
3966 return f2fs_get_meta_page(sbi
, current_sit_addr(sbi
, segno
));
3969 static struct page
*get_next_sit_page(struct f2fs_sb_info
*sbi
,
3972 struct sit_info
*sit_i
= SIT_I(sbi
);
3974 pgoff_t src_off
, dst_off
;
3976 src_off
= current_sit_addr(sbi
, start
);
3977 dst_off
= next_sit_addr(sbi
, src_off
);
3979 page
= f2fs_grab_meta_page(sbi
, dst_off
);
3980 seg_info_to_sit_page(sbi
, page
, start
);
3982 set_page_dirty(page
);
3983 set_to_next_sit(sit_i
, start
);
3988 static struct sit_entry_set
*grab_sit_entry_set(void)
3990 struct sit_entry_set
*ses
=
3991 f2fs_kmem_cache_alloc(sit_entry_set_slab
, GFP_NOFS
);
3994 INIT_LIST_HEAD(&ses
->set_list
);
3998 static void release_sit_entry_set(struct sit_entry_set
*ses
)
4000 list_del(&ses
->set_list
);
4001 kmem_cache_free(sit_entry_set_slab
, ses
);
4004 static void adjust_sit_entry_set(struct sit_entry_set
*ses
,
4005 struct list_head
*head
)
4007 struct sit_entry_set
*next
= ses
;
4009 if (list_is_last(&ses
->set_list
, head
))
4012 list_for_each_entry_continue(next
, head
, set_list
)
4013 if (ses
->entry_cnt
<= next
->entry_cnt
)
4016 list_move_tail(&ses
->set_list
, &next
->set_list
);
4019 static void add_sit_entry(unsigned int segno
, struct list_head
*head
)
4021 struct sit_entry_set
*ses
;
4022 unsigned int start_segno
= START_SEGNO(segno
);
4024 list_for_each_entry(ses
, head
, set_list
) {
4025 if (ses
->start_segno
== start_segno
) {
4027 adjust_sit_entry_set(ses
, head
);
4032 ses
= grab_sit_entry_set();
4034 ses
->start_segno
= start_segno
;
4036 list_add(&ses
->set_list
, head
);
4039 static void add_sits_in_set(struct f2fs_sb_info
*sbi
)
4041 struct f2fs_sm_info
*sm_info
= SM_I(sbi
);
4042 struct list_head
*set_list
= &sm_info
->sit_entry_set
;
4043 unsigned long *bitmap
= SIT_I(sbi
)->dirty_sentries_bitmap
;
4046 for_each_set_bit(segno
, bitmap
, MAIN_SEGS(sbi
))
4047 add_sit_entry(segno
, set_list
);
4050 static void remove_sits_in_journal(struct f2fs_sb_info
*sbi
)
4052 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_COLD_DATA
);
4053 struct f2fs_journal
*journal
= curseg
->journal
;
4056 down_write(&curseg
->journal_rwsem
);
4057 for (i
= 0; i
< sits_in_cursum(journal
); i
++) {
4061 segno
= le32_to_cpu(segno_in_journal(journal
, i
));
4062 dirtied
= __mark_sit_entry_dirty(sbi
, segno
);
4065 add_sit_entry(segno
, &SM_I(sbi
)->sit_entry_set
);
4067 update_sits_in_cursum(journal
, -i
);
4068 up_write(&curseg
->journal_rwsem
);
4072 * CP calls this function, which flushes SIT entries including sit_journal,
4073 * and moves prefree segs to free segs.
4075 void f2fs_flush_sit_entries(struct f2fs_sb_info
*sbi
, struct cp_control
*cpc
)
4077 struct sit_info
*sit_i
= SIT_I(sbi
);
4078 unsigned long *bitmap
= sit_i
->dirty_sentries_bitmap
;
4079 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_COLD_DATA
);
4080 struct f2fs_journal
*journal
= curseg
->journal
;
4081 struct sit_entry_set
*ses
, *tmp
;
4082 struct list_head
*head
= &SM_I(sbi
)->sit_entry_set
;
4083 bool to_journal
= !is_sbi_flag_set(sbi
, SBI_IS_RESIZEFS
);
4084 struct seg_entry
*se
;
4086 down_write(&sit_i
->sentry_lock
);
4088 if (!sit_i
->dirty_sentries
)
4092 * add and account sit entries of dirty bitmap in sit entry
4095 add_sits_in_set(sbi
);
4098 * if there are no enough space in journal to store dirty sit
4099 * entries, remove all entries from journal and add and account
4100 * them in sit entry set.
4102 if (!__has_cursum_space(journal
, sit_i
->dirty_sentries
, SIT_JOURNAL
) ||
4104 remove_sits_in_journal(sbi
);
4107 * there are two steps to flush sit entries:
4108 * #1, flush sit entries to journal in current cold data summary block.
4109 * #2, flush sit entries to sit page.
4111 list_for_each_entry_safe(ses
, tmp
, head
, set_list
) {
4112 struct page
*page
= NULL
;
4113 struct f2fs_sit_block
*raw_sit
= NULL
;
4114 unsigned int start_segno
= ses
->start_segno
;
4115 unsigned int end
= min(start_segno
+ SIT_ENTRY_PER_BLOCK
,
4116 (unsigned long)MAIN_SEGS(sbi
));
4117 unsigned int segno
= start_segno
;
4120 !__has_cursum_space(journal
, ses
->entry_cnt
, SIT_JOURNAL
))
4124 down_write(&curseg
->journal_rwsem
);
4126 page
= get_next_sit_page(sbi
, start_segno
);
4127 raw_sit
= page_address(page
);
4130 /* flush dirty sit entries in region of current sit set */
4131 for_each_set_bit_from(segno
, bitmap
, end
) {
4132 int offset
, sit_offset
;
4134 se
= get_seg_entry(sbi
, segno
);
4135 #ifdef CONFIG_F2FS_CHECK_FS
4136 if (memcmp(se
->cur_valid_map
, se
->cur_valid_map_mir
,
4137 SIT_VBLOCK_MAP_SIZE
))
4138 f2fs_bug_on(sbi
, 1);
4141 /* add discard candidates */
4142 if (!(cpc
->reason
& CP_DISCARD
)) {
4143 cpc
->trim_start
= segno
;
4144 add_discard_addrs(sbi
, cpc
, false);
4148 offset
= f2fs_lookup_journal_in_cursum(journal
,
4149 SIT_JOURNAL
, segno
, 1);
4150 f2fs_bug_on(sbi
, offset
< 0);
4151 segno_in_journal(journal
, offset
) =
4153 seg_info_to_raw_sit(se
,
4154 &sit_in_journal(journal
, offset
));
4155 check_block_count(sbi
, segno
,
4156 &sit_in_journal(journal
, offset
));
4158 sit_offset
= SIT_ENTRY_OFFSET(sit_i
, segno
);
4159 seg_info_to_raw_sit(se
,
4160 &raw_sit
->entries
[sit_offset
]);
4161 check_block_count(sbi
, segno
,
4162 &raw_sit
->entries
[sit_offset
]);
4165 __clear_bit(segno
, bitmap
);
4166 sit_i
->dirty_sentries
--;
4171 up_write(&curseg
->journal_rwsem
);
4173 f2fs_put_page(page
, 1);
4175 f2fs_bug_on(sbi
, ses
->entry_cnt
);
4176 release_sit_entry_set(ses
);
4179 f2fs_bug_on(sbi
, !list_empty(head
));
4180 f2fs_bug_on(sbi
, sit_i
->dirty_sentries
);
4182 if (cpc
->reason
& CP_DISCARD
) {
4183 __u64 trim_start
= cpc
->trim_start
;
4185 for (; cpc
->trim_start
<= cpc
->trim_end
; cpc
->trim_start
++)
4186 add_discard_addrs(sbi
, cpc
, false);
4188 cpc
->trim_start
= trim_start
;
4190 up_write(&sit_i
->sentry_lock
);
4192 set_prefree_as_free_segments(sbi
);
4195 static int build_sit_info(struct f2fs_sb_info
*sbi
)
4197 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
4198 struct sit_info
*sit_i
;
4199 unsigned int sit_segs
, start
;
4200 char *src_bitmap
, *bitmap
;
4201 unsigned int bitmap_size
, main_bitmap_size
, sit_bitmap_size
;
4203 /* allocate memory for SIT information */
4204 sit_i
= f2fs_kzalloc(sbi
, sizeof(struct sit_info
), GFP_KERNEL
);
4208 SM_I(sbi
)->sit_info
= sit_i
;
4211 f2fs_kvzalloc(sbi
, array_size(sizeof(struct seg_entry
),
4214 if (!sit_i
->sentries
)
4217 main_bitmap_size
= f2fs_bitmap_size(MAIN_SEGS(sbi
));
4218 sit_i
->dirty_sentries_bitmap
= f2fs_kvzalloc(sbi
, main_bitmap_size
,
4220 if (!sit_i
->dirty_sentries_bitmap
)
4223 #ifdef CONFIG_F2FS_CHECK_FS
4224 bitmap_size
= MAIN_SEGS(sbi
) * SIT_VBLOCK_MAP_SIZE
* 4;
4226 bitmap_size
= MAIN_SEGS(sbi
) * SIT_VBLOCK_MAP_SIZE
* 3;
4228 sit_i
->bitmap
= f2fs_kvzalloc(sbi
, bitmap_size
, GFP_KERNEL
);
4232 bitmap
= sit_i
->bitmap
;
4234 for (start
= 0; start
< MAIN_SEGS(sbi
); start
++) {
4235 sit_i
->sentries
[start
].cur_valid_map
= bitmap
;
4236 bitmap
+= SIT_VBLOCK_MAP_SIZE
;
4238 sit_i
->sentries
[start
].ckpt_valid_map
= bitmap
;
4239 bitmap
+= SIT_VBLOCK_MAP_SIZE
;
4241 #ifdef CONFIG_F2FS_CHECK_FS
4242 sit_i
->sentries
[start
].cur_valid_map_mir
= bitmap
;
4243 bitmap
+= SIT_VBLOCK_MAP_SIZE
;
4246 sit_i
->sentries
[start
].discard_map
= bitmap
;
4247 bitmap
+= SIT_VBLOCK_MAP_SIZE
;
4250 sit_i
->tmp_map
= f2fs_kzalloc(sbi
, SIT_VBLOCK_MAP_SIZE
, GFP_KERNEL
);
4251 if (!sit_i
->tmp_map
)
4254 if (__is_large_section(sbi
)) {
4255 sit_i
->sec_entries
=
4256 f2fs_kvzalloc(sbi
, array_size(sizeof(struct sec_entry
),
4259 if (!sit_i
->sec_entries
)
4263 /* get information related with SIT */
4264 sit_segs
= le32_to_cpu(raw_super
->segment_count_sit
) >> 1;
4266 /* setup SIT bitmap from ckeckpoint pack */
4267 sit_bitmap_size
= __bitmap_size(sbi
, SIT_BITMAP
);
4268 src_bitmap
= __bitmap_ptr(sbi
, SIT_BITMAP
);
4270 sit_i
->sit_bitmap
= kmemdup(src_bitmap
, sit_bitmap_size
, GFP_KERNEL
);
4271 if (!sit_i
->sit_bitmap
)
4274 #ifdef CONFIG_F2FS_CHECK_FS
4275 sit_i
->sit_bitmap_mir
= kmemdup(src_bitmap
,
4276 sit_bitmap_size
, GFP_KERNEL
);
4277 if (!sit_i
->sit_bitmap_mir
)
4280 sit_i
->invalid_segmap
= f2fs_kvzalloc(sbi
,
4281 main_bitmap_size
, GFP_KERNEL
);
4282 if (!sit_i
->invalid_segmap
)
4286 /* init SIT information */
4287 sit_i
->s_ops
= &default_salloc_ops
;
4289 sit_i
->sit_base_addr
= le32_to_cpu(raw_super
->sit_blkaddr
);
4290 sit_i
->sit_blocks
= sit_segs
<< sbi
->log_blocks_per_seg
;
4291 sit_i
->written_valid_blocks
= 0;
4292 sit_i
->bitmap_size
= sit_bitmap_size
;
4293 sit_i
->dirty_sentries
= 0;
4294 sit_i
->sents_per_block
= SIT_ENTRY_PER_BLOCK
;
4295 sit_i
->elapsed_time
= le64_to_cpu(sbi
->ckpt
->elapsed_time
);
4296 sit_i
->mounted_time
= ktime_get_boottime_seconds();
4297 init_rwsem(&sit_i
->sentry_lock
);
4301 static int build_free_segmap(struct f2fs_sb_info
*sbi
)
4303 struct free_segmap_info
*free_i
;
4304 unsigned int bitmap_size
, sec_bitmap_size
;
4306 /* allocate memory for free segmap information */
4307 free_i
= f2fs_kzalloc(sbi
, sizeof(struct free_segmap_info
), GFP_KERNEL
);
4311 SM_I(sbi
)->free_info
= free_i
;
4313 bitmap_size
= f2fs_bitmap_size(MAIN_SEGS(sbi
));
4314 free_i
->free_segmap
= f2fs_kvmalloc(sbi
, bitmap_size
, GFP_KERNEL
);
4315 if (!free_i
->free_segmap
)
4318 sec_bitmap_size
= f2fs_bitmap_size(MAIN_SECS(sbi
));
4319 free_i
->free_secmap
= f2fs_kvmalloc(sbi
, sec_bitmap_size
, GFP_KERNEL
);
4320 if (!free_i
->free_secmap
)
4323 /* set all segments as dirty temporarily */
4324 memset(free_i
->free_segmap
, 0xff, bitmap_size
);
4325 memset(free_i
->free_secmap
, 0xff, sec_bitmap_size
);
4327 /* init free segmap information */
4328 free_i
->start_segno
= GET_SEGNO_FROM_SEG0(sbi
, MAIN_BLKADDR(sbi
));
4329 free_i
->free_segments
= 0;
4330 free_i
->free_sections
= 0;
4331 spin_lock_init(&free_i
->segmap_lock
);
4335 static int build_curseg(struct f2fs_sb_info
*sbi
)
4337 struct curseg_info
*array
;
4340 array
= f2fs_kzalloc(sbi
, array_size(NR_CURSEG_TYPE
,
4341 sizeof(*array
)), GFP_KERNEL
);
4345 SM_I(sbi
)->curseg_array
= array
;
4347 for (i
= 0; i
< NO_CHECK_TYPE
; i
++) {
4348 mutex_init(&array
[i
].curseg_mutex
);
4349 array
[i
].sum_blk
= f2fs_kzalloc(sbi
, PAGE_SIZE
, GFP_KERNEL
);
4350 if (!array
[i
].sum_blk
)
4352 init_rwsem(&array
[i
].journal_rwsem
);
4353 array
[i
].journal
= f2fs_kzalloc(sbi
,
4354 sizeof(struct f2fs_journal
), GFP_KERNEL
);
4355 if (!array
[i
].journal
)
4357 if (i
< NR_PERSISTENT_LOG
)
4358 array
[i
].seg_type
= CURSEG_HOT_DATA
+ i
;
4359 else if (i
== CURSEG_COLD_DATA_PINNED
)
4360 array
[i
].seg_type
= CURSEG_COLD_DATA
;
4361 else if (i
== CURSEG_ALL_DATA_ATGC
)
4362 array
[i
].seg_type
= CURSEG_COLD_DATA
;
4363 array
[i
].segno
= NULL_SEGNO
;
4364 array
[i
].next_blkoff
= 0;
4365 array
[i
].inited
= false;
4367 return restore_curseg_summaries(sbi
);
4370 static int build_sit_entries(struct f2fs_sb_info
*sbi
)
4372 struct sit_info
*sit_i
= SIT_I(sbi
);
4373 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_COLD_DATA
);
4374 struct f2fs_journal
*journal
= curseg
->journal
;
4375 struct seg_entry
*se
;
4376 struct f2fs_sit_entry sit
;
4377 int sit_blk_cnt
= SIT_BLK_CNT(sbi
);
4378 unsigned int i
, start
, end
;
4379 unsigned int readed
, start_blk
= 0;
4381 block_t total_node_blocks
= 0;
4384 readed
= f2fs_ra_meta_pages(sbi
, start_blk
, BIO_MAX_VECS
,
4387 start
= start_blk
* sit_i
->sents_per_block
;
4388 end
= (start_blk
+ readed
) * sit_i
->sents_per_block
;
4390 for (; start
< end
&& start
< MAIN_SEGS(sbi
); start
++) {
4391 struct f2fs_sit_block
*sit_blk
;
4394 se
= &sit_i
->sentries
[start
];
4395 page
= get_current_sit_page(sbi
, start
);
4397 return PTR_ERR(page
);
4398 sit_blk
= (struct f2fs_sit_block
*)page_address(page
);
4399 sit
= sit_blk
->entries
[SIT_ENTRY_OFFSET(sit_i
, start
)];
4400 f2fs_put_page(page
, 1);
4402 err
= check_block_count(sbi
, start
, &sit
);
4405 seg_info_from_raw_sit(se
, &sit
);
4406 if (IS_NODESEG(se
->type
))
4407 total_node_blocks
+= se
->valid_blocks
;
4409 /* build discard map only one time */
4410 if (is_set_ckpt_flags(sbi
, CP_TRIMMED_FLAG
)) {
4411 memset(se
->discard_map
, 0xff,
4412 SIT_VBLOCK_MAP_SIZE
);
4414 memcpy(se
->discard_map
,
4416 SIT_VBLOCK_MAP_SIZE
);
4417 sbi
->discard_blks
+=
4418 sbi
->blocks_per_seg
-
4422 if (__is_large_section(sbi
))
4423 get_sec_entry(sbi
, start
)->valid_blocks
+=
4426 start_blk
+= readed
;
4427 } while (start_blk
< sit_blk_cnt
);
4429 down_read(&curseg
->journal_rwsem
);
4430 for (i
= 0; i
< sits_in_cursum(journal
); i
++) {
4431 unsigned int old_valid_blocks
;
4433 start
= le32_to_cpu(segno_in_journal(journal
, i
));
4434 if (start
>= MAIN_SEGS(sbi
)) {
4435 f2fs_err(sbi
, "Wrong journal entry on segno %u",
4437 err
= -EFSCORRUPTED
;
4441 se
= &sit_i
->sentries
[start
];
4442 sit
= sit_in_journal(journal
, i
);
4444 old_valid_blocks
= se
->valid_blocks
;
4445 if (IS_NODESEG(se
->type
))
4446 total_node_blocks
-= old_valid_blocks
;
4448 err
= check_block_count(sbi
, start
, &sit
);
4451 seg_info_from_raw_sit(se
, &sit
);
4452 if (IS_NODESEG(se
->type
))
4453 total_node_blocks
+= se
->valid_blocks
;
4455 if (is_set_ckpt_flags(sbi
, CP_TRIMMED_FLAG
)) {
4456 memset(se
->discard_map
, 0xff, SIT_VBLOCK_MAP_SIZE
);
4458 memcpy(se
->discard_map
, se
->cur_valid_map
,
4459 SIT_VBLOCK_MAP_SIZE
);
4460 sbi
->discard_blks
+= old_valid_blocks
;
4461 sbi
->discard_blks
-= se
->valid_blocks
;
4464 if (__is_large_section(sbi
)) {
4465 get_sec_entry(sbi
, start
)->valid_blocks
+=
4467 get_sec_entry(sbi
, start
)->valid_blocks
-=
4471 up_read(&curseg
->journal_rwsem
);
4473 if (!err
&& total_node_blocks
!= valid_node_count(sbi
)) {
4474 f2fs_err(sbi
, "SIT is corrupted node# %u vs %u",
4475 total_node_blocks
, valid_node_count(sbi
));
4476 err
= -EFSCORRUPTED
;
4482 static void init_free_segmap(struct f2fs_sb_info
*sbi
)
4486 struct seg_entry
*sentry
;
4488 for (start
= 0; start
< MAIN_SEGS(sbi
); start
++) {
4489 if (f2fs_usable_blks_in_seg(sbi
, start
) == 0)
4491 sentry
= get_seg_entry(sbi
, start
);
4492 if (!sentry
->valid_blocks
)
4493 __set_free(sbi
, start
);
4495 SIT_I(sbi
)->written_valid_blocks
+=
4496 sentry
->valid_blocks
;
4499 /* set use the current segments */
4500 for (type
= CURSEG_HOT_DATA
; type
<= CURSEG_COLD_NODE
; type
++) {
4501 struct curseg_info
*curseg_t
= CURSEG_I(sbi
, type
);
4502 __set_test_and_inuse(sbi
, curseg_t
->segno
);
4506 static void init_dirty_segmap(struct f2fs_sb_info
*sbi
)
4508 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
4509 struct free_segmap_info
*free_i
= FREE_I(sbi
);
4510 unsigned int segno
= 0, offset
= 0, secno
;
4511 block_t valid_blocks
, usable_blks_in_seg
;
4512 block_t blks_per_sec
= BLKS_PER_SEC(sbi
);
4515 /* find dirty segment based on free segmap */
4516 segno
= find_next_inuse(free_i
, MAIN_SEGS(sbi
), offset
);
4517 if (segno
>= MAIN_SEGS(sbi
))
4520 valid_blocks
= get_valid_blocks(sbi
, segno
, false);
4521 usable_blks_in_seg
= f2fs_usable_blks_in_seg(sbi
, segno
);
4522 if (valid_blocks
== usable_blks_in_seg
|| !valid_blocks
)
4524 if (valid_blocks
> usable_blks_in_seg
) {
4525 f2fs_bug_on(sbi
, 1);
4528 mutex_lock(&dirty_i
->seglist_lock
);
4529 __locate_dirty_segment(sbi
, segno
, DIRTY
);
4530 mutex_unlock(&dirty_i
->seglist_lock
);
4533 if (!__is_large_section(sbi
))
4536 mutex_lock(&dirty_i
->seglist_lock
);
4537 for (segno
= 0; segno
< MAIN_SEGS(sbi
); segno
+= sbi
->segs_per_sec
) {
4538 valid_blocks
= get_valid_blocks(sbi
, segno
, true);
4539 secno
= GET_SEC_FROM_SEG(sbi
, segno
);
4541 if (!valid_blocks
|| valid_blocks
== blks_per_sec
)
4543 if (IS_CURSEC(sbi
, secno
))
4545 set_bit(secno
, dirty_i
->dirty_secmap
);
4547 mutex_unlock(&dirty_i
->seglist_lock
);
4550 static int init_victim_secmap(struct f2fs_sb_info
*sbi
)
4552 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
4553 unsigned int bitmap_size
= f2fs_bitmap_size(MAIN_SECS(sbi
));
4555 dirty_i
->victim_secmap
= f2fs_kvzalloc(sbi
, bitmap_size
, GFP_KERNEL
);
4556 if (!dirty_i
->victim_secmap
)
4561 static int build_dirty_segmap(struct f2fs_sb_info
*sbi
)
4563 struct dirty_seglist_info
*dirty_i
;
4564 unsigned int bitmap_size
, i
;
4566 /* allocate memory for dirty segments list information */
4567 dirty_i
= f2fs_kzalloc(sbi
, sizeof(struct dirty_seglist_info
),
4572 SM_I(sbi
)->dirty_info
= dirty_i
;
4573 mutex_init(&dirty_i
->seglist_lock
);
4575 bitmap_size
= f2fs_bitmap_size(MAIN_SEGS(sbi
));
4577 for (i
= 0; i
< NR_DIRTY_TYPE
; i
++) {
4578 dirty_i
->dirty_segmap
[i
] = f2fs_kvzalloc(sbi
, bitmap_size
,
4580 if (!dirty_i
->dirty_segmap
[i
])
4584 if (__is_large_section(sbi
)) {
4585 bitmap_size
= f2fs_bitmap_size(MAIN_SECS(sbi
));
4586 dirty_i
->dirty_secmap
= f2fs_kvzalloc(sbi
,
4587 bitmap_size
, GFP_KERNEL
);
4588 if (!dirty_i
->dirty_secmap
)
4592 init_dirty_segmap(sbi
);
4593 return init_victim_secmap(sbi
);
4596 static int sanity_check_curseg(struct f2fs_sb_info
*sbi
)
4601 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4602 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4604 for (i
= 0; i
< NR_PERSISTENT_LOG
; i
++) {
4605 struct curseg_info
*curseg
= CURSEG_I(sbi
, i
);
4606 struct seg_entry
*se
= get_seg_entry(sbi
, curseg
->segno
);
4607 unsigned int blkofs
= curseg
->next_blkoff
;
4609 sanity_check_seg_type(sbi
, curseg
->seg_type
);
4611 if (f2fs_test_bit(blkofs
, se
->cur_valid_map
))
4614 if (curseg
->alloc_type
== SSR
)
4617 for (blkofs
+= 1; blkofs
< sbi
->blocks_per_seg
; blkofs
++) {
4618 if (!f2fs_test_bit(blkofs
, se
->cur_valid_map
))
4622 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4623 i
, curseg
->segno
, curseg
->alloc_type
,
4624 curseg
->next_blkoff
, blkofs
);
4625 return -EFSCORRUPTED
;
4631 #ifdef CONFIG_BLK_DEV_ZONED
4633 static int check_zone_write_pointer(struct f2fs_sb_info
*sbi
,
4634 struct f2fs_dev_info
*fdev
,
4635 struct blk_zone
*zone
)
4637 unsigned int wp_segno
, wp_blkoff
, zone_secno
, zone_segno
, segno
;
4638 block_t zone_block
, wp_block
, last_valid_block
;
4639 unsigned int log_sectors_per_block
= sbi
->log_blocksize
- SECTOR_SHIFT
;
4641 struct seg_entry
*se
;
4643 if (zone
->type
!= BLK_ZONE_TYPE_SEQWRITE_REQ
)
4646 wp_block
= fdev
->start_blk
+ (zone
->wp
>> log_sectors_per_block
);
4647 wp_segno
= GET_SEGNO(sbi
, wp_block
);
4648 wp_blkoff
= wp_block
- START_BLOCK(sbi
, wp_segno
);
4649 zone_block
= fdev
->start_blk
+ (zone
->start
>> log_sectors_per_block
);
4650 zone_segno
= GET_SEGNO(sbi
, zone_block
);
4651 zone_secno
= GET_SEC_FROM_SEG(sbi
, zone_segno
);
4653 if (zone_segno
>= MAIN_SEGS(sbi
))
4657 * Skip check of zones cursegs point to, since
4658 * fix_curseg_write_pointer() checks them.
4660 for (i
= 0; i
< NO_CHECK_TYPE
; i
++)
4661 if (zone_secno
== GET_SEC_FROM_SEG(sbi
,
4662 CURSEG_I(sbi
, i
)->segno
))
4666 * Get last valid block of the zone.
4668 last_valid_block
= zone_block
- 1;
4669 for (s
= sbi
->segs_per_sec
- 1; s
>= 0; s
--) {
4670 segno
= zone_segno
+ s
;
4671 se
= get_seg_entry(sbi
, segno
);
4672 for (b
= sbi
->blocks_per_seg
- 1; b
>= 0; b
--)
4673 if (f2fs_test_bit(b
, se
->cur_valid_map
)) {
4674 last_valid_block
= START_BLOCK(sbi
, segno
) + b
;
4677 if (last_valid_block
>= zone_block
)
4682 * If last valid block is beyond the write pointer, report the
4683 * inconsistency. This inconsistency does not cause write error
4684 * because the zone will not be selected for write operation until
4685 * it get discarded. Just report it.
4687 if (last_valid_block
>= wp_block
) {
4688 f2fs_notice(sbi
, "Valid block beyond write pointer: "
4689 "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
4690 GET_SEGNO(sbi
, last_valid_block
),
4691 GET_BLKOFF_FROM_SEG0(sbi
, last_valid_block
),
4692 wp_segno
, wp_blkoff
);
4697 * If there is no valid block in the zone and if write pointer is
4698 * not at zone start, reset the write pointer.
4700 if (last_valid_block
+ 1 == zone_block
&& zone
->wp
!= zone
->start
) {
4702 "Zone without valid block has non-zero write "
4703 "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4704 wp_segno
, wp_blkoff
);
4705 ret
= __f2fs_issue_discard_zone(sbi
, fdev
->bdev
, zone_block
,
4706 zone
->len
>> log_sectors_per_block
);
4708 f2fs_err(sbi
, "Discard zone failed: %s (errno=%d)",
4717 static struct f2fs_dev_info
*get_target_zoned_dev(struct f2fs_sb_info
*sbi
,
4718 block_t zone_blkaddr
)
4722 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
4723 if (!bdev_is_zoned(FDEV(i
).bdev
))
4725 if (sbi
->s_ndevs
== 1 || (FDEV(i
).start_blk
<= zone_blkaddr
&&
4726 zone_blkaddr
<= FDEV(i
).end_blk
))
4733 static int report_one_zone_cb(struct blk_zone
*zone
, unsigned int idx
,
4735 memcpy(data
, zone
, sizeof(struct blk_zone
));
4739 static int fix_curseg_write_pointer(struct f2fs_sb_info
*sbi
, int type
)
4741 struct curseg_info
*cs
= CURSEG_I(sbi
, type
);
4742 struct f2fs_dev_info
*zbd
;
4743 struct blk_zone zone
;
4744 unsigned int cs_section
, wp_segno
, wp_blkoff
, wp_sector_off
;
4745 block_t cs_zone_block
, wp_block
;
4746 unsigned int log_sectors_per_block
= sbi
->log_blocksize
- SECTOR_SHIFT
;
4747 sector_t zone_sector
;
4750 cs_section
= GET_SEC_FROM_SEG(sbi
, cs
->segno
);
4751 cs_zone_block
= START_BLOCK(sbi
, GET_SEG_FROM_SEC(sbi
, cs_section
));
4753 zbd
= get_target_zoned_dev(sbi
, cs_zone_block
);
4757 /* report zone for the sector the curseg points to */
4758 zone_sector
= (sector_t
)(cs_zone_block
- zbd
->start_blk
)
4759 << log_sectors_per_block
;
4760 err
= blkdev_report_zones(zbd
->bdev
, zone_sector
, 1,
4761 report_one_zone_cb
, &zone
);
4763 f2fs_err(sbi
, "Report zone failed: %s errno=(%d)",
4768 if (zone
.type
!= BLK_ZONE_TYPE_SEQWRITE_REQ
)
4771 wp_block
= zbd
->start_blk
+ (zone
.wp
>> log_sectors_per_block
);
4772 wp_segno
= GET_SEGNO(sbi
, wp_block
);
4773 wp_blkoff
= wp_block
- START_BLOCK(sbi
, wp_segno
);
4774 wp_sector_off
= zone
.wp
& GENMASK(log_sectors_per_block
- 1, 0);
4776 if (cs
->segno
== wp_segno
&& cs
->next_blkoff
== wp_blkoff
&&
4780 f2fs_notice(sbi
, "Unaligned curseg[%d] with write pointer: "
4781 "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
4782 type
, cs
->segno
, cs
->next_blkoff
, wp_segno
, wp_blkoff
);
4784 f2fs_notice(sbi
, "Assign new section to curseg[%d]: "
4785 "curseg[0x%x,0x%x]", type
, cs
->segno
, cs
->next_blkoff
);
4786 allocate_segment_by_default(sbi
, type
, true);
4788 /* check consistency of the zone curseg pointed to */
4789 if (check_zone_write_pointer(sbi
, zbd
, &zone
))
4792 /* check newly assigned zone */
4793 cs_section
= GET_SEC_FROM_SEG(sbi
, cs
->segno
);
4794 cs_zone_block
= START_BLOCK(sbi
, GET_SEG_FROM_SEC(sbi
, cs_section
));
4796 zbd
= get_target_zoned_dev(sbi
, cs_zone_block
);
4800 zone_sector
= (sector_t
)(cs_zone_block
- zbd
->start_blk
)
4801 << log_sectors_per_block
;
4802 err
= blkdev_report_zones(zbd
->bdev
, zone_sector
, 1,
4803 report_one_zone_cb
, &zone
);
4805 f2fs_err(sbi
, "Report zone failed: %s errno=(%d)",
4810 if (zone
.type
!= BLK_ZONE_TYPE_SEQWRITE_REQ
)
4813 if (zone
.wp
!= zone
.start
) {
4815 "New zone for curseg[%d] is not yet discarded. "
4816 "Reset the zone: curseg[0x%x,0x%x]",
4817 type
, cs
->segno
, cs
->next_blkoff
);
4818 err
= __f2fs_issue_discard_zone(sbi
, zbd
->bdev
,
4819 zone_sector
>> log_sectors_per_block
,
4820 zone
.len
>> log_sectors_per_block
);
4822 f2fs_err(sbi
, "Discard zone failed: %s (errno=%d)",
4831 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info
*sbi
)
4835 for (i
= 0; i
< NR_PERSISTENT_LOG
; i
++) {
4836 ret
= fix_curseg_write_pointer(sbi
, i
);
4844 struct check_zone_write_pointer_args
{
4845 struct f2fs_sb_info
*sbi
;
4846 struct f2fs_dev_info
*fdev
;
4849 static int check_zone_write_pointer_cb(struct blk_zone
*zone
, unsigned int idx
,
4851 struct check_zone_write_pointer_args
*args
;
4852 args
= (struct check_zone_write_pointer_args
*)data
;
4854 return check_zone_write_pointer(args
->sbi
, args
->fdev
, zone
);
4857 int f2fs_check_write_pointer(struct f2fs_sb_info
*sbi
)
4860 struct check_zone_write_pointer_args args
;
4862 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
4863 if (!bdev_is_zoned(FDEV(i
).bdev
))
4867 args
.fdev
= &FDEV(i
);
4868 ret
= blkdev_report_zones(FDEV(i
).bdev
, 0, BLK_ALL_ZONES
,
4869 check_zone_write_pointer_cb
, &args
);
4877 static bool is_conv_zone(struct f2fs_sb_info
*sbi
, unsigned int zone_idx
,
4878 unsigned int dev_idx
)
4880 if (!bdev_is_zoned(FDEV(dev_idx
).bdev
))
4882 return !test_bit(zone_idx
, FDEV(dev_idx
).blkz_seq
);
4885 /* Return the zone index in the given device */
4886 static unsigned int get_zone_idx(struct f2fs_sb_info
*sbi
, unsigned int secno
,
4889 block_t sec_start_blkaddr
= START_BLOCK(sbi
, GET_SEG_FROM_SEC(sbi
, secno
));
4891 return (sec_start_blkaddr
- FDEV(dev_idx
).start_blk
) >>
4892 sbi
->log_blocks_per_blkz
;
4896 * Return the usable segments in a section based on the zone's
4897 * corresponding zone capacity. Zone is equal to a section.
4899 static inline unsigned int f2fs_usable_zone_segs_in_sec(
4900 struct f2fs_sb_info
*sbi
, unsigned int segno
)
4902 unsigned int dev_idx
, zone_idx
, unusable_segs_in_sec
;
4904 dev_idx
= f2fs_target_device_index(sbi
, START_BLOCK(sbi
, segno
));
4905 zone_idx
= get_zone_idx(sbi
, GET_SEC_FROM_SEG(sbi
, segno
), dev_idx
);
4907 /* Conventional zone's capacity is always equal to zone size */
4908 if (is_conv_zone(sbi
, zone_idx
, dev_idx
))
4909 return sbi
->segs_per_sec
;
4912 * If the zone_capacity_blocks array is NULL, then zone capacity
4913 * is equal to the zone size for all zones
4915 if (!FDEV(dev_idx
).zone_capacity_blocks
)
4916 return sbi
->segs_per_sec
;
4918 /* Get the segment count beyond zone capacity block */
4919 unusable_segs_in_sec
= (sbi
->blocks_per_blkz
-
4920 FDEV(dev_idx
).zone_capacity_blocks
[zone_idx
]) >>
4921 sbi
->log_blocks_per_seg
;
4922 return sbi
->segs_per_sec
- unusable_segs_in_sec
;
4926 * Return the number of usable blocks in a segment. The number of blocks
4927 * returned is always equal to the number of blocks in a segment for
4928 * segments fully contained within a sequential zone capacity or a
4929 * conventional zone. For segments partially contained in a sequential
4930 * zone capacity, the number of usable blocks up to the zone capacity
4931 * is returned. 0 is returned in all other cases.
4933 static inline unsigned int f2fs_usable_zone_blks_in_seg(
4934 struct f2fs_sb_info
*sbi
, unsigned int segno
)
4936 block_t seg_start
, sec_start_blkaddr
, sec_cap_blkaddr
;
4937 unsigned int zone_idx
, dev_idx
, secno
;
4939 secno
= GET_SEC_FROM_SEG(sbi
, segno
);
4940 seg_start
= START_BLOCK(sbi
, segno
);
4941 dev_idx
= f2fs_target_device_index(sbi
, seg_start
);
4942 zone_idx
= get_zone_idx(sbi
, secno
, dev_idx
);
4945 * Conventional zone's capacity is always equal to zone size,
4946 * so, blocks per segment is unchanged.
4948 if (is_conv_zone(sbi
, zone_idx
, dev_idx
))
4949 return sbi
->blocks_per_seg
;
4951 if (!FDEV(dev_idx
).zone_capacity_blocks
)
4952 return sbi
->blocks_per_seg
;
4954 sec_start_blkaddr
= START_BLOCK(sbi
, GET_SEG_FROM_SEC(sbi
, secno
));
4955 sec_cap_blkaddr
= sec_start_blkaddr
+
4956 FDEV(dev_idx
).zone_capacity_blocks
[zone_idx
];
4959 * If segment starts before zone capacity and spans beyond
4960 * zone capacity, then usable blocks are from seg start to
4961 * zone capacity. If the segment starts after the zone capacity,
4962 * then there are no usable blocks.
4964 if (seg_start
>= sec_cap_blkaddr
)
4966 if (seg_start
+ sbi
->blocks_per_seg
> sec_cap_blkaddr
)
4967 return sec_cap_blkaddr
- seg_start
;
4969 return sbi
->blocks_per_seg
;
4972 int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info
*sbi
)
4977 int f2fs_check_write_pointer(struct f2fs_sb_info
*sbi
)
4982 static inline unsigned int f2fs_usable_zone_blks_in_seg(struct f2fs_sb_info
*sbi
,
4988 static inline unsigned int f2fs_usable_zone_segs_in_sec(struct f2fs_sb_info
*sbi
,
4994 unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info
*sbi
,
4997 if (f2fs_sb_has_blkzoned(sbi
))
4998 return f2fs_usable_zone_blks_in_seg(sbi
, segno
);
5000 return sbi
->blocks_per_seg
;
5003 unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info
*sbi
,
5006 if (f2fs_sb_has_blkzoned(sbi
))
5007 return f2fs_usable_zone_segs_in_sec(sbi
, segno
);
5009 return sbi
->segs_per_sec
;
5013 * Update min, max modified time for cost-benefit GC algorithm
5015 static void init_min_max_mtime(struct f2fs_sb_info
*sbi
)
5017 struct sit_info
*sit_i
= SIT_I(sbi
);
5020 down_write(&sit_i
->sentry_lock
);
5022 sit_i
->min_mtime
= ULLONG_MAX
;
5024 for (segno
= 0; segno
< MAIN_SEGS(sbi
); segno
+= sbi
->segs_per_sec
) {
5026 unsigned long long mtime
= 0;
5028 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
5029 mtime
+= get_seg_entry(sbi
, segno
+ i
)->mtime
;
5031 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
5033 if (sit_i
->min_mtime
> mtime
)
5034 sit_i
->min_mtime
= mtime
;
5036 sit_i
->max_mtime
= get_mtime(sbi
, false);
5037 sit_i
->dirty_max_mtime
= 0;
5038 up_write(&sit_i
->sentry_lock
);
5041 int f2fs_build_segment_manager(struct f2fs_sb_info
*sbi
)
5043 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
5044 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
5045 struct f2fs_sm_info
*sm_info
;
5048 sm_info
= f2fs_kzalloc(sbi
, sizeof(struct f2fs_sm_info
), GFP_KERNEL
);
5053 sbi
->sm_info
= sm_info
;
5054 sm_info
->seg0_blkaddr
= le32_to_cpu(raw_super
->segment0_blkaddr
);
5055 sm_info
->main_blkaddr
= le32_to_cpu(raw_super
->main_blkaddr
);
5056 sm_info
->segment_count
= le32_to_cpu(raw_super
->segment_count
);
5057 sm_info
->reserved_segments
= le32_to_cpu(ckpt
->rsvd_segment_count
);
5058 sm_info
->ovp_segments
= le32_to_cpu(ckpt
->overprov_segment_count
);
5059 sm_info
->main_segments
= le32_to_cpu(raw_super
->segment_count_main
);
5060 sm_info
->ssa_blkaddr
= le32_to_cpu(raw_super
->ssa_blkaddr
);
5061 sm_info
->rec_prefree_segments
= sm_info
->main_segments
*
5062 DEF_RECLAIM_PREFREE_SEGMENTS
/ 100;
5063 if (sm_info
->rec_prefree_segments
> DEF_MAX_RECLAIM_PREFREE_SEGMENTS
)
5064 sm_info
->rec_prefree_segments
= DEF_MAX_RECLAIM_PREFREE_SEGMENTS
;
5066 if (!f2fs_lfs_mode(sbi
))
5067 sm_info
->ipu_policy
= 1 << F2FS_IPU_FSYNC
;
5068 sm_info
->min_ipu_util
= DEF_MIN_IPU_UTIL
;
5069 sm_info
->min_fsync_blocks
= DEF_MIN_FSYNC_BLOCKS
;
5070 sm_info
->min_seq_blocks
= sbi
->blocks_per_seg
* sbi
->segs_per_sec
;
5071 sm_info
->min_hot_blocks
= DEF_MIN_HOT_BLOCKS
;
5072 sm_info
->min_ssr_sections
= reserved_sections(sbi
);
5074 INIT_LIST_HEAD(&sm_info
->sit_entry_set
);
5076 init_rwsem(&sm_info
->curseg_lock
);
5078 if (!f2fs_readonly(sbi
->sb
)) {
5079 err
= f2fs_create_flush_cmd_control(sbi
);
5084 err
= create_discard_cmd_control(sbi
);
5088 err
= build_sit_info(sbi
);
5091 err
= build_free_segmap(sbi
);
5094 err
= build_curseg(sbi
);
5098 /* reinit free segmap based on SIT */
5099 err
= build_sit_entries(sbi
);
5103 init_free_segmap(sbi
);
5104 err
= build_dirty_segmap(sbi
);
5108 err
= sanity_check_curseg(sbi
);
5112 init_min_max_mtime(sbi
);
5116 static void discard_dirty_segmap(struct f2fs_sb_info
*sbi
,
5117 enum dirty_type dirty_type
)
5119 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
5121 mutex_lock(&dirty_i
->seglist_lock
);
5122 kvfree(dirty_i
->dirty_segmap
[dirty_type
]);
5123 dirty_i
->nr_dirty
[dirty_type
] = 0;
5124 mutex_unlock(&dirty_i
->seglist_lock
);
5127 static void destroy_victim_secmap(struct f2fs_sb_info
*sbi
)
5129 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
5130 kvfree(dirty_i
->victim_secmap
);
5133 static void destroy_dirty_segmap(struct f2fs_sb_info
*sbi
)
5135 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
5141 /* discard pre-free/dirty segments list */
5142 for (i
= 0; i
< NR_DIRTY_TYPE
; i
++)
5143 discard_dirty_segmap(sbi
, i
);
5145 if (__is_large_section(sbi
)) {
5146 mutex_lock(&dirty_i
->seglist_lock
);
5147 kvfree(dirty_i
->dirty_secmap
);
5148 mutex_unlock(&dirty_i
->seglist_lock
);
5151 destroy_victim_secmap(sbi
);
5152 SM_I(sbi
)->dirty_info
= NULL
;
5156 static void destroy_curseg(struct f2fs_sb_info
*sbi
)
5158 struct curseg_info
*array
= SM_I(sbi
)->curseg_array
;
5163 SM_I(sbi
)->curseg_array
= NULL
;
5164 for (i
= 0; i
< NR_CURSEG_TYPE
; i
++) {
5165 kfree(array
[i
].sum_blk
);
5166 kfree(array
[i
].journal
);
5171 static void destroy_free_segmap(struct f2fs_sb_info
*sbi
)
5173 struct free_segmap_info
*free_i
= SM_I(sbi
)->free_info
;
5176 SM_I(sbi
)->free_info
= NULL
;
5177 kvfree(free_i
->free_segmap
);
5178 kvfree(free_i
->free_secmap
);
5182 static void destroy_sit_info(struct f2fs_sb_info
*sbi
)
5184 struct sit_info
*sit_i
= SIT_I(sbi
);
5189 if (sit_i
->sentries
)
5190 kvfree(sit_i
->bitmap
);
5191 kfree(sit_i
->tmp_map
);
5193 kvfree(sit_i
->sentries
);
5194 kvfree(sit_i
->sec_entries
);
5195 kvfree(sit_i
->dirty_sentries_bitmap
);
5197 SM_I(sbi
)->sit_info
= NULL
;
5198 kvfree(sit_i
->sit_bitmap
);
5199 #ifdef CONFIG_F2FS_CHECK_FS
5200 kvfree(sit_i
->sit_bitmap_mir
);
5201 kvfree(sit_i
->invalid_segmap
);
5206 void f2fs_destroy_segment_manager(struct f2fs_sb_info
*sbi
)
5208 struct f2fs_sm_info
*sm_info
= SM_I(sbi
);
5212 f2fs_destroy_flush_cmd_control(sbi
, true);
5213 destroy_discard_cmd_control(sbi
);
5214 destroy_dirty_segmap(sbi
);
5215 destroy_curseg(sbi
);
5216 destroy_free_segmap(sbi
);
5217 destroy_sit_info(sbi
);
5218 sbi
->sm_info
= NULL
;
5222 int __init
f2fs_create_segment_manager_caches(void)
5224 discard_entry_slab
= f2fs_kmem_cache_create("f2fs_discard_entry",
5225 sizeof(struct discard_entry
));
5226 if (!discard_entry_slab
)
5229 discard_cmd_slab
= f2fs_kmem_cache_create("f2fs_discard_cmd",
5230 sizeof(struct discard_cmd
));
5231 if (!discard_cmd_slab
)
5232 goto destroy_discard_entry
;
5234 sit_entry_set_slab
= f2fs_kmem_cache_create("f2fs_sit_entry_set",
5235 sizeof(struct sit_entry_set
));
5236 if (!sit_entry_set_slab
)
5237 goto destroy_discard_cmd
;
5239 inmem_entry_slab
= f2fs_kmem_cache_create("f2fs_inmem_page_entry",
5240 sizeof(struct inmem_pages
));
5241 if (!inmem_entry_slab
)
5242 goto destroy_sit_entry_set
;
5245 destroy_sit_entry_set
:
5246 kmem_cache_destroy(sit_entry_set_slab
);
5247 destroy_discard_cmd
:
5248 kmem_cache_destroy(discard_cmd_slab
);
5249 destroy_discard_entry
:
5250 kmem_cache_destroy(discard_entry_slab
);
5255 void f2fs_destroy_segment_manager_caches(void)
5257 kmem_cache_destroy(sit_entry_set_slab
);
5258 kmem_cache_destroy(discard_cmd_slab
);
5259 kmem_cache_destroy(discard_entry_slab
);
5260 kmem_cache_destroy(inmem_entry_slab
);