4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
24 #include <trace/events/f2fs.h>
26 static int gc_thread_func(void *data
)
28 struct f2fs_sb_info
*sbi
= data
;
29 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
30 wait_queue_head_t
*wq
= &sbi
->gc_thread
->gc_wait_queue_head
;
33 wait_ms
= gc_th
->min_sleep_time
;
37 wait_event_interruptible_timeout(*wq
,
38 kthread_should_stop() || freezing(current
) ||
40 msecs_to_jiffies(wait_ms
));
42 /* give it a try one time */
48 if (kthread_should_stop())
51 if (sbi
->sb
->s_writers
.frozen
>= SB_FREEZE_WRITE
) {
52 increase_sleep_time(gc_th
, &wait_ms
);
56 #ifdef CONFIG_F2FS_FAULT_INJECTION
57 if (time_to_inject(sbi
, FAULT_CHECKPOINT
)) {
58 f2fs_show_injection_info(FAULT_CHECKPOINT
);
59 f2fs_stop_checkpoint(sbi
, false);
63 if (!sb_start_write_trylock(sbi
->sb
))
67 * [GC triggering condition]
68 * 0. GC is not conducted currently.
69 * 1. There are enough dirty segments.
70 * 2. IO subsystem is idle by checking the # of writeback pages.
71 * 3. IO subsystem is idle by checking the # of requests in
72 * bdev's request list.
74 * Note) We have to avoid triggering GCs frequently.
75 * Because it is possible that some segments can be
76 * invalidated soon after by user update or deletion.
77 * So, I'd like to wait some time to collect dirty segments.
79 if (!mutex_trylock(&sbi
->gc_mutex
))
82 if (gc_th
->gc_urgent
) {
83 wait_ms
= gc_th
->urgent_sleep_time
;
88 increase_sleep_time(gc_th
, &wait_ms
);
89 mutex_unlock(&sbi
->gc_mutex
);
93 if (has_enough_invalid_blocks(sbi
))
94 decrease_sleep_time(gc_th
, &wait_ms
);
96 increase_sleep_time(gc_th
, &wait_ms
);
98 stat_inc_bggc_count(sbi
);
100 /* if return value is not zero, no victim was selected */
101 if (f2fs_gc(sbi
, test_opt(sbi
, FORCE_FG_GC
), true, NULL_SEGNO
))
102 wait_ms
= gc_th
->no_gc_sleep_time
;
104 trace_f2fs_background_gc(sbi
->sb
, wait_ms
,
105 prefree_segments(sbi
), free_segments(sbi
));
107 /* balancing f2fs's metadata periodically */
108 f2fs_balance_fs_bg(sbi
);
110 sb_end_write(sbi
->sb
);
112 } while (!kthread_should_stop());
116 int start_gc_thread(struct f2fs_sb_info
*sbi
)
118 struct f2fs_gc_kthread
*gc_th
;
119 dev_t dev
= sbi
->sb
->s_bdev
->bd_dev
;
122 gc_th
= f2fs_kmalloc(sbi
, sizeof(struct f2fs_gc_kthread
), GFP_KERNEL
);
128 gc_th
->urgent_sleep_time
= DEF_GC_THREAD_URGENT_SLEEP_TIME
;
129 gc_th
->min_sleep_time
= DEF_GC_THREAD_MIN_SLEEP_TIME
;
130 gc_th
->max_sleep_time
= DEF_GC_THREAD_MAX_SLEEP_TIME
;
131 gc_th
->no_gc_sleep_time
= DEF_GC_THREAD_NOGC_SLEEP_TIME
;
134 gc_th
->gc_urgent
= 0;
137 sbi
->gc_thread
= gc_th
;
138 init_waitqueue_head(&sbi
->gc_thread
->gc_wait_queue_head
);
139 sbi
->gc_thread
->f2fs_gc_task
= kthread_run(gc_thread_func
, sbi
,
140 "f2fs_gc-%u:%u", MAJOR(dev
), MINOR(dev
));
141 if (IS_ERR(gc_th
->f2fs_gc_task
)) {
142 err
= PTR_ERR(gc_th
->f2fs_gc_task
);
144 sbi
->gc_thread
= NULL
;
150 void stop_gc_thread(struct f2fs_sb_info
*sbi
)
152 struct f2fs_gc_kthread
*gc_th
= sbi
->gc_thread
;
155 kthread_stop(gc_th
->f2fs_gc_task
);
157 sbi
->gc_thread
= NULL
;
160 static int select_gc_type(struct f2fs_gc_kthread
*gc_th
, int gc_type
)
162 int gc_mode
= (gc_type
== BG_GC
) ? GC_CB
: GC_GREEDY
;
164 if (gc_th
&& gc_th
->gc_idle
) {
165 if (gc_th
->gc_idle
== 1)
167 else if (gc_th
->gc_idle
== 2)
173 static void select_policy(struct f2fs_sb_info
*sbi
, int gc_type
,
174 int type
, struct victim_sel_policy
*p
)
176 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
178 if (p
->alloc_mode
== SSR
) {
179 p
->gc_mode
= GC_GREEDY
;
180 p
->dirty_segmap
= dirty_i
->dirty_segmap
[type
];
181 p
->max_search
= dirty_i
->nr_dirty
[type
];
184 p
->gc_mode
= select_gc_type(sbi
->gc_thread
, gc_type
);
185 p
->dirty_segmap
= dirty_i
->dirty_segmap
[DIRTY
];
186 p
->max_search
= dirty_i
->nr_dirty
[DIRTY
];
187 p
->ofs_unit
= sbi
->segs_per_sec
;
190 /* we need to check every dirty segments in the FG_GC case */
191 if (gc_type
!= FG_GC
&& p
->max_search
> sbi
->max_victim_search
)
192 p
->max_search
= sbi
->max_victim_search
;
194 /* let's select beginning hot/small space first in no_heap mode*/
195 if (test_opt(sbi
, NOHEAP
) &&
196 (type
== CURSEG_HOT_DATA
|| IS_NODESEG(type
)))
199 p
->offset
= SIT_I(sbi
)->last_victim
[p
->gc_mode
];
202 static unsigned int get_max_cost(struct f2fs_sb_info
*sbi
,
203 struct victim_sel_policy
*p
)
205 /* SSR allocates in a segment unit */
206 if (p
->alloc_mode
== SSR
)
207 return sbi
->blocks_per_seg
;
208 if (p
->gc_mode
== GC_GREEDY
)
209 return 2 * sbi
->blocks_per_seg
* p
->ofs_unit
;
210 else if (p
->gc_mode
== GC_CB
)
212 else /* No other gc_mode */
216 static unsigned int check_bg_victims(struct f2fs_sb_info
*sbi
)
218 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
222 * If the gc_type is FG_GC, we can select victim segments
223 * selected by background GC before.
224 * Those segments guarantee they have small valid blocks.
226 for_each_set_bit(secno
, dirty_i
->victim_secmap
, MAIN_SECS(sbi
)) {
227 if (sec_usage_check(sbi
, secno
))
230 if (no_fggc_candidate(sbi
, secno
))
233 clear_bit(secno
, dirty_i
->victim_secmap
);
234 return GET_SEG_FROM_SEC(sbi
, secno
);
239 static unsigned int get_cb_cost(struct f2fs_sb_info
*sbi
, unsigned int segno
)
241 struct sit_info
*sit_i
= SIT_I(sbi
);
242 unsigned int secno
= GET_SEC_FROM_SEG(sbi
, segno
);
243 unsigned int start
= GET_SEG_FROM_SEC(sbi
, secno
);
244 unsigned long long mtime
= 0;
245 unsigned int vblocks
;
246 unsigned char age
= 0;
250 for (i
= 0; i
< sbi
->segs_per_sec
; i
++)
251 mtime
+= get_seg_entry(sbi
, start
+ i
)->mtime
;
252 vblocks
= get_valid_blocks(sbi
, segno
, true);
254 mtime
= div_u64(mtime
, sbi
->segs_per_sec
);
255 vblocks
= div_u64(vblocks
, sbi
->segs_per_sec
);
257 u
= (vblocks
* 100) >> sbi
->log_blocks_per_seg
;
259 /* Handle if the system time has changed by the user */
260 if (mtime
< sit_i
->min_mtime
)
261 sit_i
->min_mtime
= mtime
;
262 if (mtime
> sit_i
->max_mtime
)
263 sit_i
->max_mtime
= mtime
;
264 if (sit_i
->max_mtime
!= sit_i
->min_mtime
)
265 age
= 100 - div64_u64(100 * (mtime
- sit_i
->min_mtime
),
266 sit_i
->max_mtime
- sit_i
->min_mtime
);
268 return UINT_MAX
- ((100 * (100 - u
) * age
) / (100 + u
));
271 static inline unsigned int get_gc_cost(struct f2fs_sb_info
*sbi
,
272 unsigned int segno
, struct victim_sel_policy
*p
)
274 if (p
->alloc_mode
== SSR
)
275 return get_seg_entry(sbi
, segno
)->ckpt_valid_blocks
;
277 /* alloc_mode == LFS */
278 if (p
->gc_mode
== GC_GREEDY
)
279 return get_valid_blocks(sbi
, segno
, true);
281 return get_cb_cost(sbi
, segno
);
284 static unsigned int count_bits(const unsigned long *addr
,
285 unsigned int offset
, unsigned int len
)
287 unsigned int end
= offset
+ len
, sum
= 0;
289 while (offset
< end
) {
290 if (test_bit(offset
++, addr
))
297 * This function is called from two paths.
298 * One is garbage collection and the other is SSR segment selection.
299 * When it is called during GC, it just gets a victim segment
300 * and it does not remove it from dirty seglist.
301 * When it is called from SSR segment selection, it finds a segment
302 * which has minimum valid blocks and removes it from dirty seglist.
304 static int get_victim_by_default(struct f2fs_sb_info
*sbi
,
305 unsigned int *result
, int gc_type
, int type
, char alloc_mode
)
307 struct dirty_seglist_info
*dirty_i
= DIRTY_I(sbi
);
308 struct sit_info
*sm
= SIT_I(sbi
);
309 struct victim_sel_policy p
;
310 unsigned int secno
, last_victim
;
311 unsigned int last_segment
= MAIN_SEGS(sbi
);
312 unsigned int nsearched
= 0;
314 mutex_lock(&dirty_i
->seglist_lock
);
316 p
.alloc_mode
= alloc_mode
;
317 select_policy(sbi
, gc_type
, type
, &p
);
319 p
.min_segno
= NULL_SEGNO
;
320 p
.min_cost
= get_max_cost(sbi
, &p
);
322 if (*result
!= NULL_SEGNO
) {
323 if (IS_DATASEG(get_seg_entry(sbi
, *result
)->type
) &&
324 get_valid_blocks(sbi
, *result
, false) &&
325 !sec_usage_check(sbi
, GET_SEC_FROM_SEG(sbi
, *result
)))
326 p
.min_segno
= *result
;
330 if (p
.max_search
== 0)
333 last_victim
= sm
->last_victim
[p
.gc_mode
];
334 if (p
.alloc_mode
== LFS
&& gc_type
== FG_GC
) {
335 p
.min_segno
= check_bg_victims(sbi
);
336 if (p
.min_segno
!= NULL_SEGNO
)
344 segno
= find_next_bit(p
.dirty_segmap
, last_segment
, p
.offset
);
345 if (segno
>= last_segment
) {
346 if (sm
->last_victim
[p
.gc_mode
]) {
348 sm
->last_victim
[p
.gc_mode
];
349 sm
->last_victim
[p
.gc_mode
] = 0;
356 p
.offset
= segno
+ p
.ofs_unit
;
357 if (p
.ofs_unit
> 1) {
358 p
.offset
-= segno
% p
.ofs_unit
;
359 nsearched
+= count_bits(p
.dirty_segmap
,
360 p
.offset
- p
.ofs_unit
,
366 secno
= GET_SEC_FROM_SEG(sbi
, segno
);
368 if (sec_usage_check(sbi
, secno
))
370 if (gc_type
== BG_GC
&& test_bit(secno
, dirty_i
->victim_secmap
))
372 if (gc_type
== FG_GC
&& p
.alloc_mode
== LFS
&&
373 no_fggc_candidate(sbi
, secno
))
376 cost
= get_gc_cost(sbi
, segno
, &p
);
378 if (p
.min_cost
> cost
) {
383 if (nsearched
>= p
.max_search
) {
384 if (!sm
->last_victim
[p
.gc_mode
] && segno
<= last_victim
)
385 sm
->last_victim
[p
.gc_mode
] = last_victim
+ 1;
387 sm
->last_victim
[p
.gc_mode
] = segno
+ 1;
388 sm
->last_victim
[p
.gc_mode
] %= MAIN_SEGS(sbi
);
392 if (p
.min_segno
!= NULL_SEGNO
) {
394 if (p
.alloc_mode
== LFS
) {
395 secno
= GET_SEC_FROM_SEG(sbi
, p
.min_segno
);
396 if (gc_type
== FG_GC
)
397 sbi
->cur_victim_sec
= secno
;
399 set_bit(secno
, dirty_i
->victim_secmap
);
401 *result
= (p
.min_segno
/ p
.ofs_unit
) * p
.ofs_unit
;
403 trace_f2fs_get_victim(sbi
->sb
, type
, gc_type
, &p
,
405 prefree_segments(sbi
), free_segments(sbi
));
408 mutex_unlock(&dirty_i
->seglist_lock
);
410 return (p
.min_segno
== NULL_SEGNO
) ? 0 : 1;
413 static const struct victim_selection default_v_ops
= {
414 .get_victim
= get_victim_by_default
,
417 static struct inode
*find_gc_inode(struct gc_inode_list
*gc_list
, nid_t ino
)
419 struct inode_entry
*ie
;
421 ie
= radix_tree_lookup(&gc_list
->iroot
, ino
);
427 static void add_gc_inode(struct gc_inode_list
*gc_list
, struct inode
*inode
)
429 struct inode_entry
*new_ie
;
431 if (inode
== find_gc_inode(gc_list
, inode
->i_ino
)) {
435 new_ie
= f2fs_kmem_cache_alloc(inode_entry_slab
, GFP_NOFS
);
436 new_ie
->inode
= inode
;
438 f2fs_radix_tree_insert(&gc_list
->iroot
, inode
->i_ino
, new_ie
);
439 list_add_tail(&new_ie
->list
, &gc_list
->ilist
);
442 static void put_gc_inode(struct gc_inode_list
*gc_list
)
444 struct inode_entry
*ie
, *next_ie
;
445 list_for_each_entry_safe(ie
, next_ie
, &gc_list
->ilist
, list
) {
446 radix_tree_delete(&gc_list
->iroot
, ie
->inode
->i_ino
);
449 kmem_cache_free(inode_entry_slab
, ie
);
453 static int check_valid_map(struct f2fs_sb_info
*sbi
,
454 unsigned int segno
, int offset
)
456 struct sit_info
*sit_i
= SIT_I(sbi
);
457 struct seg_entry
*sentry
;
460 down_read(&sit_i
->sentry_lock
);
461 sentry
= get_seg_entry(sbi
, segno
);
462 ret
= f2fs_test_bit(offset
, sentry
->cur_valid_map
);
463 up_read(&sit_i
->sentry_lock
);
468 * This function compares node address got in summary with that in NAT.
469 * On validity, copy that node with cold status, otherwise (invalid node)
472 static void gc_node_segment(struct f2fs_sb_info
*sbi
,
473 struct f2fs_summary
*sum
, unsigned int segno
, int gc_type
)
475 struct f2fs_summary
*entry
;
480 start_addr
= START_BLOCK(sbi
, segno
);
485 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
486 nid_t nid
= le32_to_cpu(entry
->nid
);
487 struct page
*node_page
;
490 /* stop BG_GC if there is not enough free sections. */
491 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0))
494 if (check_valid_map(sbi
, segno
, off
) == 0)
498 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), 1,
504 ra_node_page(sbi
, nid
);
509 node_page
= get_node_page(sbi
, nid
);
510 if (IS_ERR(node_page
))
513 /* block may become invalid during get_node_page */
514 if (check_valid_map(sbi
, segno
, off
) == 0) {
515 f2fs_put_page(node_page
, 1);
519 get_node_info(sbi
, nid
, &ni
);
520 if (ni
.blk_addr
!= start_addr
+ off
) {
521 f2fs_put_page(node_page
, 1);
525 move_node_page(node_page
, gc_type
);
526 stat_inc_node_blk_count(sbi
, 1, gc_type
);
534 * Calculate start block index indicating the given node offset.
535 * Be careful, caller should give this node offset only indicating direct node
536 * blocks. If any node offsets, which point the other types of node blocks such
537 * as indirect or double indirect node blocks, are given, it must be a caller's
540 block_t
start_bidx_of_node(unsigned int node_ofs
, struct inode
*inode
)
542 unsigned int indirect_blks
= 2 * NIDS_PER_BLOCK
+ 4;
550 } else if (node_ofs
<= indirect_blks
) {
551 int dec
= (node_ofs
- 4) / (NIDS_PER_BLOCK
+ 1);
552 bidx
= node_ofs
- 2 - dec
;
554 int dec
= (node_ofs
- indirect_blks
- 3) / (NIDS_PER_BLOCK
+ 1);
555 bidx
= node_ofs
- 5 - dec
;
557 return bidx
* ADDRS_PER_BLOCK
+ ADDRS_PER_INODE(inode
);
560 static bool is_alive(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
561 struct node_info
*dni
, block_t blkaddr
, unsigned int *nofs
)
563 struct page
*node_page
;
565 unsigned int ofs_in_node
;
566 block_t source_blkaddr
;
568 nid
= le32_to_cpu(sum
->nid
);
569 ofs_in_node
= le16_to_cpu(sum
->ofs_in_node
);
571 node_page
= get_node_page(sbi
, nid
);
572 if (IS_ERR(node_page
))
575 get_node_info(sbi
, nid
, dni
);
577 if (sum
->version
!= dni
->version
) {
578 f2fs_msg(sbi
->sb
, KERN_WARNING
,
579 "%s: valid data with mismatched node version.",
581 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
584 *nofs
= ofs_of_node(node_page
);
585 source_blkaddr
= datablock_addr(NULL
, node_page
, ofs_in_node
);
586 f2fs_put_page(node_page
, 1);
588 if (source_blkaddr
!= blkaddr
)
594 * Move data block via META_MAPPING while keeping locked data page.
595 * This can be used to move blocks, aka LBAs, directly on disk.
597 static void move_data_block(struct inode
*inode
, block_t bidx
,
598 unsigned int segno
, int off
)
600 struct f2fs_io_info fio
= {
601 .sbi
= F2FS_I_SB(inode
),
607 .encrypted_page
= NULL
,
610 struct dnode_of_data dn
;
611 struct f2fs_summary sum
;
617 /* do not read out */
618 page
= f2fs_grab_cache_page(inode
->i_mapping
, bidx
, false);
622 if (!check_valid_map(F2FS_I_SB(inode
), segno
, off
))
625 if (f2fs_is_atomic_file(inode
))
628 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
629 err
= get_dnode_of_data(&dn
, bidx
, LOOKUP_NODE
);
633 if (unlikely(dn
.data_blkaddr
== NULL_ADDR
)) {
634 ClearPageUptodate(page
);
639 * don't cache encrypted data into meta inode until previous dirty
640 * data were writebacked to avoid racing between GC and flush.
642 f2fs_wait_on_page_writeback(page
, DATA
, true);
644 get_node_info(fio
.sbi
, dn
.nid
, &ni
);
645 set_summary(&sum
, dn
.nid
, dn
.ofs_in_node
, ni
.version
);
649 fio
.new_blkaddr
= fio
.old_blkaddr
= dn
.data_blkaddr
;
651 allocate_data_block(fio
.sbi
, NULL
, fio
.old_blkaddr
, &newaddr
,
652 &sum
, CURSEG_COLD_DATA
, NULL
, false);
654 fio
.encrypted_page
= f2fs_pagecache_get_page(META_MAPPING(fio
.sbi
),
655 newaddr
, FGP_LOCK
| FGP_CREAT
, GFP_NOFS
);
656 if (!fio
.encrypted_page
) {
661 err
= f2fs_submit_page_bio(&fio
);
666 lock_page(fio
.encrypted_page
);
668 if (unlikely(fio
.encrypted_page
->mapping
!= META_MAPPING(fio
.sbi
))) {
672 if (unlikely(!PageUptodate(fio
.encrypted_page
))) {
677 set_page_dirty(fio
.encrypted_page
);
678 f2fs_wait_on_page_writeback(fio
.encrypted_page
, DATA
, true);
679 if (clear_page_dirty_for_io(fio
.encrypted_page
))
680 dec_page_count(fio
.sbi
, F2FS_DIRTY_META
);
682 set_page_writeback(fio
.encrypted_page
);
684 /* allocate block address */
685 f2fs_wait_on_page_writeback(dn
.node_page
, NODE
, true);
687 fio
.op
= REQ_OP_WRITE
;
688 fio
.op_flags
= REQ_SYNC
;
689 fio
.new_blkaddr
= newaddr
;
690 err
= f2fs_submit_page_write(&fio
);
692 if (PageWriteback(fio
.encrypted_page
))
693 end_page_writeback(fio
.encrypted_page
);
697 f2fs_update_iostat(fio
.sbi
, FS_GC_DATA_IO
, F2FS_BLKSIZE
);
699 f2fs_update_data_blkaddr(&dn
, newaddr
);
700 set_inode_flag(inode
, FI_APPEND_WRITE
);
701 if (page
->index
== 0)
702 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
704 f2fs_put_page(fio
.encrypted_page
, 1);
707 __f2fs_replace_block(fio
.sbi
, &sum
, newaddr
, fio
.old_blkaddr
,
712 f2fs_put_page(page
, 1);
715 static void move_data_page(struct inode
*inode
, block_t bidx
, int gc_type
,
716 unsigned int segno
, int off
)
720 page
= get_lock_data_page(inode
, bidx
, true);
724 if (!check_valid_map(F2FS_I_SB(inode
), segno
, off
))
727 if (f2fs_is_atomic_file(inode
))
730 if (gc_type
== BG_GC
) {
731 if (PageWriteback(page
))
733 set_page_dirty(page
);
736 struct f2fs_io_info fio
= {
737 .sbi
= F2FS_I_SB(inode
),
742 .op_flags
= REQ_SYNC
,
743 .old_blkaddr
= NULL_ADDR
,
745 .encrypted_page
= NULL
,
746 .need_lock
= LOCK_REQ
,
747 .io_type
= FS_GC_DATA_IO
,
749 bool is_dirty
= PageDirty(page
);
753 set_page_dirty(page
);
754 f2fs_wait_on_page_writeback(page
, DATA
, true);
755 if (clear_page_dirty_for_io(page
)) {
756 inode_dec_dirty_pages(inode
);
757 remove_dirty_inode(inode
);
762 err
= do_write_data_page(&fio
);
763 if (err
== -ENOMEM
&& is_dirty
) {
764 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
769 f2fs_put_page(page
, 1);
773 * This function tries to get parent node of victim data block, and identifies
774 * data block validity. If the block is valid, copy that with cold status and
775 * modify parent node.
776 * If the parent node is not valid or the data block address is different,
777 * the victim data block is ignored.
779 static void gc_data_segment(struct f2fs_sb_info
*sbi
, struct f2fs_summary
*sum
,
780 struct gc_inode_list
*gc_list
, unsigned int segno
, int gc_type
)
782 struct super_block
*sb
= sbi
->sb
;
783 struct f2fs_summary
*entry
;
788 start_addr
= START_BLOCK(sbi
, segno
);
793 for (off
= 0; off
< sbi
->blocks_per_seg
; off
++, entry
++) {
794 struct page
*data_page
;
796 struct node_info dni
; /* dnode info for the data */
797 unsigned int ofs_in_node
, nofs
;
799 nid_t nid
= le32_to_cpu(entry
->nid
);
801 /* stop BG_GC if there is not enough free sections. */
802 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0))
805 if (check_valid_map(sbi
, segno
, off
) == 0)
809 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), 1,
815 ra_node_page(sbi
, nid
);
819 /* Get an inode by ino with checking validity */
820 if (!is_alive(sbi
, entry
, &dni
, start_addr
+ off
, &nofs
))
824 ra_node_page(sbi
, dni
.ino
);
828 ofs_in_node
= le16_to_cpu(entry
->ofs_in_node
);
831 inode
= f2fs_iget(sb
, dni
.ino
);
832 if (IS_ERR(inode
) || is_bad_inode(inode
))
835 /* if encrypted inode, let's go phase 3 */
836 if (f2fs_encrypted_file(inode
)) {
837 add_gc_inode(gc_list
, inode
);
841 if (!down_write_trylock(
842 &F2FS_I(inode
)->dio_rwsem
[WRITE
])) {
847 start_bidx
= start_bidx_of_node(nofs
, inode
);
848 data_page
= get_read_data_page(inode
,
849 start_bidx
+ ofs_in_node
, REQ_RAHEAD
,
851 up_write(&F2FS_I(inode
)->dio_rwsem
[WRITE
]);
852 if (IS_ERR(data_page
)) {
857 f2fs_put_page(data_page
, 0);
858 add_gc_inode(gc_list
, inode
);
863 inode
= find_gc_inode(gc_list
, dni
.ino
);
865 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
868 if (S_ISREG(inode
->i_mode
)) {
869 if (!down_write_trylock(&fi
->dio_rwsem
[READ
]))
871 if (!down_write_trylock(
872 &fi
->dio_rwsem
[WRITE
])) {
873 up_write(&fi
->dio_rwsem
[READ
]);
878 /* wait for all inflight aio data */
879 inode_dio_wait(inode
);
882 start_bidx
= start_bidx_of_node(nofs
, inode
)
884 if (f2fs_encrypted_file(inode
))
885 move_data_block(inode
, start_bidx
, segno
, off
);
887 move_data_page(inode
, start_bidx
, gc_type
,
891 up_write(&fi
->dio_rwsem
[WRITE
]);
892 up_write(&fi
->dio_rwsem
[READ
]);
895 stat_inc_data_blk_count(sbi
, 1, gc_type
);
903 static int __get_victim(struct f2fs_sb_info
*sbi
, unsigned int *victim
,
906 struct sit_info
*sit_i
= SIT_I(sbi
);
909 down_write(&sit_i
->sentry_lock
);
910 ret
= DIRTY_I(sbi
)->v_ops
->get_victim(sbi
, victim
, gc_type
,
912 up_write(&sit_i
->sentry_lock
);
916 static int do_garbage_collect(struct f2fs_sb_info
*sbi
,
917 unsigned int start_segno
,
918 struct gc_inode_list
*gc_list
, int gc_type
)
920 struct page
*sum_page
;
921 struct f2fs_summary_block
*sum
;
922 struct blk_plug plug
;
923 unsigned int segno
= start_segno
;
924 unsigned int end_segno
= start_segno
+ sbi
->segs_per_sec
;
926 unsigned char type
= IS_DATASEG(get_seg_entry(sbi
, segno
)->type
) ?
927 SUM_TYPE_DATA
: SUM_TYPE_NODE
;
929 /* readahead multi ssa blocks those have contiguous address */
930 if (sbi
->segs_per_sec
> 1)
931 ra_meta_pages(sbi
, GET_SUM_BLOCK(sbi
, segno
),
932 sbi
->segs_per_sec
, META_SSA
, true);
934 /* reference all summary page */
935 while (segno
< end_segno
) {
936 sum_page
= get_sum_page(sbi
, segno
++);
937 unlock_page(sum_page
);
940 blk_start_plug(&plug
);
942 for (segno
= start_segno
; segno
< end_segno
; segno
++) {
944 /* find segment summary of victim */
945 sum_page
= find_get_page(META_MAPPING(sbi
),
946 GET_SUM_BLOCK(sbi
, segno
));
947 f2fs_put_page(sum_page
, 0);
949 if (get_valid_blocks(sbi
, segno
, false) == 0 ||
950 !PageUptodate(sum_page
) ||
951 unlikely(f2fs_cp_error(sbi
)))
954 sum
= page_address(sum_page
);
955 f2fs_bug_on(sbi
, type
!= GET_SUM_TYPE((&sum
->footer
)));
958 * this is to avoid deadlock:
959 * - lock_page(sum_page) - f2fs_replace_block
960 * - check_valid_map() - down_write(sentry_lock)
961 * - down_read(sentry_lock) - change_curseg()
962 * - lock_page(sum_page)
964 if (type
== SUM_TYPE_NODE
)
965 gc_node_segment(sbi
, sum
->entries
, segno
, gc_type
);
967 gc_data_segment(sbi
, sum
->entries
, gc_list
, segno
,
970 stat_inc_seg_count(sbi
, type
, gc_type
);
972 if (gc_type
== FG_GC
&&
973 get_valid_blocks(sbi
, segno
, false) == 0)
976 f2fs_put_page(sum_page
, 0);
979 if (gc_type
== FG_GC
)
980 f2fs_submit_merged_write(sbi
,
981 (type
== SUM_TYPE_NODE
) ? NODE
: DATA
);
983 blk_finish_plug(&plug
);
985 stat_inc_call_count(sbi
->stat_info
);
990 int f2fs_gc(struct f2fs_sb_info
*sbi
, bool sync
,
991 bool background
, unsigned int segno
)
993 int gc_type
= sync
? FG_GC
: BG_GC
;
994 int sec_freed
= 0, seg_freed
= 0, total_freed
= 0;
996 struct cp_control cpc
;
997 unsigned int init_segno
= segno
;
998 struct gc_inode_list gc_list
= {
999 .ilist
= LIST_HEAD_INIT(gc_list
.ilist
),
1000 .iroot
= RADIX_TREE_INIT(GFP_NOFS
),
1003 trace_f2fs_gc_begin(sbi
->sb
, sync
, background
,
1004 get_pages(sbi
, F2FS_DIRTY_NODES
),
1005 get_pages(sbi
, F2FS_DIRTY_DENTS
),
1006 get_pages(sbi
, F2FS_DIRTY_IMETA
),
1009 reserved_segments(sbi
),
1010 prefree_segments(sbi
));
1012 cpc
.reason
= __get_cp_reason(sbi
);
1014 if (unlikely(!(sbi
->sb
->s_flags
& SB_ACTIVE
))) {
1018 if (unlikely(f2fs_cp_error(sbi
))) {
1023 if (gc_type
== BG_GC
&& has_not_enough_free_secs(sbi
, 0, 0)) {
1025 * For example, if there are many prefree_segments below given
1026 * threshold, we can make them free by checkpoint. Then, we
1027 * secure free segments which doesn't need fggc any more.
1029 if (prefree_segments(sbi
)) {
1030 ret
= write_checkpoint(sbi
, &cpc
);
1034 if (has_not_enough_free_secs(sbi
, 0, 0))
1038 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
1039 if (gc_type
== BG_GC
&& !background
) {
1043 if (!__get_victim(sbi
, &segno
, gc_type
)) {
1048 seg_freed
= do_garbage_collect(sbi
, segno
, &gc_list
, gc_type
);
1049 if (gc_type
== FG_GC
&& seg_freed
== sbi
->segs_per_sec
)
1051 total_freed
+= seg_freed
;
1053 if (gc_type
== FG_GC
)
1054 sbi
->cur_victim_sec
= NULL_SEGNO
;
1057 if (has_not_enough_free_secs(sbi
, sec_freed
, 0)) {
1062 if (gc_type
== FG_GC
)
1063 ret
= write_checkpoint(sbi
, &cpc
);
1066 SIT_I(sbi
)->last_victim
[ALLOC_NEXT
] = 0;
1067 SIT_I(sbi
)->last_victim
[FLUSH_DEVICE
] = init_segno
;
1069 trace_f2fs_gc_end(sbi
->sb
, ret
, total_freed
, sec_freed
,
1070 get_pages(sbi
, F2FS_DIRTY_NODES
),
1071 get_pages(sbi
, F2FS_DIRTY_DENTS
),
1072 get_pages(sbi
, F2FS_DIRTY_IMETA
),
1075 reserved_segments(sbi
),
1076 prefree_segments(sbi
));
1078 mutex_unlock(&sbi
->gc_mutex
);
1080 put_gc_inode(&gc_list
);
1083 ret
= sec_freed
? 0 : -EAGAIN
;
1087 void build_gc_manager(struct f2fs_sb_info
*sbi
)
1089 u64 main_count
, resv_count
, ovp_count
;
1091 DIRTY_I(sbi
)->v_ops
= &default_v_ops
;
1093 /* threshold of # of valid blocks in a section for victims of FG_GC */
1094 main_count
= SM_I(sbi
)->main_segments
<< sbi
->log_blocks_per_seg
;
1095 resv_count
= SM_I(sbi
)->reserved_segments
<< sbi
->log_blocks_per_seg
;
1096 ovp_count
= SM_I(sbi
)->ovp_segments
<< sbi
->log_blocks_per_seg
;
1098 sbi
->fggc_threshold
= div64_u64((main_count
- ovp_count
) *
1099 BLKS_PER_SEC(sbi
), (main_count
- resv_count
));
1101 /* give warm/cold data area from slower device */
1102 if (sbi
->s_ndevs
&& sbi
->segs_per_sec
== 1)
1103 SIT_I(sbi
)->last_victim
[ALLOC_NEXT
] =
1104 GET_SEGNO(sbi
, FDEV(0).end_blk
) + 1;