]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/f2fs/gc.c
clk-bcm2835: Read max core clock from firmware
[mirror_ubuntu-zesty-kernel.git] / fs / f2fs / gc.c
1 /*
2 * fs/f2fs/gc.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/fs.h>
12 #include <linux/module.h>
13 #include <linux/backing-dev.h>
14 #include <linux/init.h>
15 #include <linux/f2fs_fs.h>
16 #include <linux/kthread.h>
17 #include <linux/delay.h>
18 #include <linux/freezer.h>
19
20 #include "f2fs.h"
21 #include "node.h"
22 #include "segment.h"
23 #include "gc.h"
24 #include <trace/events/f2fs.h>
25
26 static int gc_thread_func(void *data)
27 {
28 struct f2fs_sb_info *sbi = data;
29 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
30 wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
31 long wait_ms;
32
33 wait_ms = gc_th->min_sleep_time;
34
35 do {
36 if (try_to_freeze())
37 continue;
38 else
39 wait_event_interruptible_timeout(*wq,
40 kthread_should_stop(),
41 msecs_to_jiffies(wait_ms));
42 if (kthread_should_stop())
43 break;
44
45 if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
46 increase_sleep_time(gc_th, &wait_ms);
47 continue;
48 }
49
50 #ifdef CONFIG_F2FS_FAULT_INJECTION
51 if (time_to_inject(sbi, FAULT_CHECKPOINT))
52 f2fs_stop_checkpoint(sbi, false);
53 #endif
54
55 /*
56 * [GC triggering condition]
57 * 0. GC is not conducted currently.
58 * 1. There are enough dirty segments.
59 * 2. IO subsystem is idle by checking the # of writeback pages.
60 * 3. IO subsystem is idle by checking the # of requests in
61 * bdev's request list.
62 *
63 * Note) We have to avoid triggering GCs frequently.
64 * Because it is possible that some segments can be
65 * invalidated soon after by user update or deletion.
66 * So, I'd like to wait some time to collect dirty segments.
67 */
68 if (!mutex_trylock(&sbi->gc_mutex))
69 continue;
70
71 if (!is_idle(sbi)) {
72 increase_sleep_time(gc_th, &wait_ms);
73 mutex_unlock(&sbi->gc_mutex);
74 continue;
75 }
76
77 if (has_enough_invalid_blocks(sbi))
78 decrease_sleep_time(gc_th, &wait_ms);
79 else
80 increase_sleep_time(gc_th, &wait_ms);
81
82 stat_inc_bggc_count(sbi);
83
84 /* if return value is not zero, no victim was selected */
85 if (f2fs_gc(sbi, test_opt(sbi, FORCE_FG_GC), true))
86 wait_ms = gc_th->no_gc_sleep_time;
87
88 trace_f2fs_background_gc(sbi->sb, wait_ms,
89 prefree_segments(sbi), free_segments(sbi));
90
91 /* balancing f2fs's metadata periodically */
92 f2fs_balance_fs_bg(sbi);
93
94 } while (!kthread_should_stop());
95 return 0;
96 }
97
98 int start_gc_thread(struct f2fs_sb_info *sbi)
99 {
100 struct f2fs_gc_kthread *gc_th;
101 dev_t dev = sbi->sb->s_bdev->bd_dev;
102 int err = 0;
103
104 gc_th = f2fs_kmalloc(sbi, sizeof(struct f2fs_gc_kthread), GFP_KERNEL);
105 if (!gc_th) {
106 err = -ENOMEM;
107 goto out;
108 }
109
110 gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
111 gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
112 gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
113
114 gc_th->gc_idle = 0;
115
116 sbi->gc_thread = gc_th;
117 init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
118 sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
119 "f2fs_gc-%u:%u", MAJOR(dev), MINOR(dev));
120 if (IS_ERR(gc_th->f2fs_gc_task)) {
121 err = PTR_ERR(gc_th->f2fs_gc_task);
122 kfree(gc_th);
123 sbi->gc_thread = NULL;
124 }
125 out:
126 return err;
127 }
128
129 void stop_gc_thread(struct f2fs_sb_info *sbi)
130 {
131 struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
132 if (!gc_th)
133 return;
134 kthread_stop(gc_th->f2fs_gc_task);
135 kfree(gc_th);
136 sbi->gc_thread = NULL;
137 }
138
139 static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
140 {
141 int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
142
143 if (gc_th && gc_th->gc_idle) {
144 if (gc_th->gc_idle == 1)
145 gc_mode = GC_CB;
146 else if (gc_th->gc_idle == 2)
147 gc_mode = GC_GREEDY;
148 }
149 return gc_mode;
150 }
151
152 static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
153 int type, struct victim_sel_policy *p)
154 {
155 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
156
157 if (p->alloc_mode == SSR) {
158 p->gc_mode = GC_GREEDY;
159 p->dirty_segmap = dirty_i->dirty_segmap[type];
160 p->max_search = dirty_i->nr_dirty[type];
161 p->ofs_unit = 1;
162 } else {
163 p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
164 p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
165 p->max_search = dirty_i->nr_dirty[DIRTY];
166 p->ofs_unit = sbi->segs_per_sec;
167 }
168
169 /* we need to check every dirty segments in the FG_GC case */
170 if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
171 p->max_search = sbi->max_victim_search;
172
173 p->offset = sbi->last_victim[p->gc_mode];
174 }
175
176 static unsigned int get_max_cost(struct f2fs_sb_info *sbi,
177 struct victim_sel_policy *p)
178 {
179 /* SSR allocates in a segment unit */
180 if (p->alloc_mode == SSR)
181 return sbi->blocks_per_seg;
182 if (p->gc_mode == GC_GREEDY)
183 return sbi->blocks_per_seg * p->ofs_unit;
184 else if (p->gc_mode == GC_CB)
185 return UINT_MAX;
186 else /* No other gc_mode */
187 return 0;
188 }
189
190 static unsigned int check_bg_victims(struct f2fs_sb_info *sbi)
191 {
192 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
193 unsigned int secno;
194
195 /*
196 * If the gc_type is FG_GC, we can select victim segments
197 * selected by background GC before.
198 * Those segments guarantee they have small valid blocks.
199 */
200 for_each_set_bit(secno, dirty_i->victim_secmap, MAIN_SECS(sbi)) {
201 if (sec_usage_check(sbi, secno))
202 continue;
203
204 if (no_fggc_candidate(sbi, secno))
205 continue;
206
207 clear_bit(secno, dirty_i->victim_secmap);
208 return secno * sbi->segs_per_sec;
209 }
210 return NULL_SEGNO;
211 }
212
213 static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno)
214 {
215 struct sit_info *sit_i = SIT_I(sbi);
216 unsigned int secno = GET_SECNO(sbi, segno);
217 unsigned int start = secno * sbi->segs_per_sec;
218 unsigned long long mtime = 0;
219 unsigned int vblocks;
220 unsigned char age = 0;
221 unsigned char u;
222 unsigned int i;
223
224 for (i = 0; i < sbi->segs_per_sec; i++)
225 mtime += get_seg_entry(sbi, start + i)->mtime;
226 vblocks = get_valid_blocks(sbi, segno, sbi->segs_per_sec);
227
228 mtime = div_u64(mtime, sbi->segs_per_sec);
229 vblocks = div_u64(vblocks, sbi->segs_per_sec);
230
231 u = (vblocks * 100) >> sbi->log_blocks_per_seg;
232
233 /* Handle if the system time has changed by the user */
234 if (mtime < sit_i->min_mtime)
235 sit_i->min_mtime = mtime;
236 if (mtime > sit_i->max_mtime)
237 sit_i->max_mtime = mtime;
238 if (sit_i->max_mtime != sit_i->min_mtime)
239 age = 100 - div64_u64(100 * (mtime - sit_i->min_mtime),
240 sit_i->max_mtime - sit_i->min_mtime);
241
242 return UINT_MAX - ((100 * (100 - u) * age) / (100 + u));
243 }
244
245 static inline unsigned int get_gc_cost(struct f2fs_sb_info *sbi,
246 unsigned int segno, struct victim_sel_policy *p)
247 {
248 if (p->alloc_mode == SSR)
249 return get_seg_entry(sbi, segno)->ckpt_valid_blocks;
250
251 /* alloc_mode == LFS */
252 if (p->gc_mode == GC_GREEDY)
253 return get_valid_blocks(sbi, segno, sbi->segs_per_sec);
254 else
255 return get_cb_cost(sbi, segno);
256 }
257
258 static unsigned int count_bits(const unsigned long *addr,
259 unsigned int offset, unsigned int len)
260 {
261 unsigned int end = offset + len, sum = 0;
262
263 while (offset < end) {
264 if (test_bit(offset++, addr))
265 ++sum;
266 }
267 return sum;
268 }
269
270 /*
271 * This function is called from two paths.
272 * One is garbage collection and the other is SSR segment selection.
273 * When it is called during GC, it just gets a victim segment
274 * and it does not remove it from dirty seglist.
275 * When it is called from SSR segment selection, it finds a segment
276 * which has minimum valid blocks and removes it from dirty seglist.
277 */
278 static int get_victim_by_default(struct f2fs_sb_info *sbi,
279 unsigned int *result, int gc_type, int type, char alloc_mode)
280 {
281 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
282 struct victim_sel_policy p;
283 unsigned int secno, last_victim;
284 unsigned int last_segment = MAIN_SEGS(sbi);
285 unsigned int nsearched = 0;
286
287 mutex_lock(&dirty_i->seglist_lock);
288
289 p.alloc_mode = alloc_mode;
290 select_policy(sbi, gc_type, type, &p);
291
292 p.min_segno = NULL_SEGNO;
293 p.min_cost = get_max_cost(sbi, &p);
294
295 if (p.max_search == 0)
296 goto out;
297
298 last_victim = sbi->last_victim[p.gc_mode];
299 if (p.alloc_mode == LFS && gc_type == FG_GC) {
300 p.min_segno = check_bg_victims(sbi);
301 if (p.min_segno != NULL_SEGNO)
302 goto got_it;
303 }
304
305 while (1) {
306 unsigned long cost;
307 unsigned int segno;
308
309 segno = find_next_bit(p.dirty_segmap, last_segment, p.offset);
310 if (segno >= last_segment) {
311 if (sbi->last_victim[p.gc_mode]) {
312 last_segment = sbi->last_victim[p.gc_mode];
313 sbi->last_victim[p.gc_mode] = 0;
314 p.offset = 0;
315 continue;
316 }
317 break;
318 }
319
320 p.offset = segno + p.ofs_unit;
321 if (p.ofs_unit > 1) {
322 p.offset -= segno % p.ofs_unit;
323 nsearched += count_bits(p.dirty_segmap,
324 p.offset - p.ofs_unit,
325 p.ofs_unit);
326 } else {
327 nsearched++;
328 }
329
330 secno = GET_SECNO(sbi, segno);
331
332 if (sec_usage_check(sbi, secno))
333 goto next;
334 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
335 goto next;
336 if (gc_type == FG_GC && p.alloc_mode == LFS &&
337 no_fggc_candidate(sbi, secno))
338 goto next;
339
340 cost = get_gc_cost(sbi, segno, &p);
341
342 if (p.min_cost > cost) {
343 p.min_segno = segno;
344 p.min_cost = cost;
345 }
346 next:
347 if (nsearched >= p.max_search) {
348 if (!sbi->last_victim[p.gc_mode] && segno <= last_victim)
349 sbi->last_victim[p.gc_mode] = last_victim + 1;
350 else
351 sbi->last_victim[p.gc_mode] = segno + 1;
352 break;
353 }
354 }
355 if (p.min_segno != NULL_SEGNO) {
356 got_it:
357 if (p.alloc_mode == LFS) {
358 secno = GET_SECNO(sbi, p.min_segno);
359 if (gc_type == FG_GC)
360 sbi->cur_victim_sec = secno;
361 else
362 set_bit(secno, dirty_i->victim_secmap);
363 }
364 *result = (p.min_segno / p.ofs_unit) * p.ofs_unit;
365
366 trace_f2fs_get_victim(sbi->sb, type, gc_type, &p,
367 sbi->cur_victim_sec,
368 prefree_segments(sbi), free_segments(sbi));
369 }
370 out:
371 mutex_unlock(&dirty_i->seglist_lock);
372
373 return (p.min_segno == NULL_SEGNO) ? 0 : 1;
374 }
375
376 static const struct victim_selection default_v_ops = {
377 .get_victim = get_victim_by_default,
378 };
379
380 static struct inode *find_gc_inode(struct gc_inode_list *gc_list, nid_t ino)
381 {
382 struct inode_entry *ie;
383
384 ie = radix_tree_lookup(&gc_list->iroot, ino);
385 if (ie)
386 return ie->inode;
387 return NULL;
388 }
389
390 static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
391 {
392 struct inode_entry *new_ie;
393
394 if (inode == find_gc_inode(gc_list, inode->i_ino)) {
395 iput(inode);
396 return;
397 }
398 new_ie = f2fs_kmem_cache_alloc(inode_entry_slab, GFP_NOFS);
399 new_ie->inode = inode;
400
401 f2fs_radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie);
402 list_add_tail(&new_ie->list, &gc_list->ilist);
403 }
404
405 static void put_gc_inode(struct gc_inode_list *gc_list)
406 {
407 struct inode_entry *ie, *next_ie;
408 list_for_each_entry_safe(ie, next_ie, &gc_list->ilist, list) {
409 radix_tree_delete(&gc_list->iroot, ie->inode->i_ino);
410 iput(ie->inode);
411 list_del(&ie->list);
412 kmem_cache_free(inode_entry_slab, ie);
413 }
414 }
415
416 static int check_valid_map(struct f2fs_sb_info *sbi,
417 unsigned int segno, int offset)
418 {
419 struct sit_info *sit_i = SIT_I(sbi);
420 struct seg_entry *sentry;
421 int ret;
422
423 mutex_lock(&sit_i->sentry_lock);
424 sentry = get_seg_entry(sbi, segno);
425 ret = f2fs_test_bit(offset, sentry->cur_valid_map);
426 mutex_unlock(&sit_i->sentry_lock);
427 return ret;
428 }
429
430 /*
431 * This function compares node address got in summary with that in NAT.
432 * On validity, copy that node with cold status, otherwise (invalid node)
433 * ignore that.
434 */
435 static void gc_node_segment(struct f2fs_sb_info *sbi,
436 struct f2fs_summary *sum, unsigned int segno, int gc_type)
437 {
438 struct f2fs_summary *entry;
439 block_t start_addr;
440 int off;
441 int phase = 0;
442
443 start_addr = START_BLOCK(sbi, segno);
444
445 next_step:
446 entry = sum;
447
448 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
449 nid_t nid = le32_to_cpu(entry->nid);
450 struct page *node_page;
451 struct node_info ni;
452
453 /* stop BG_GC if there is not enough free sections. */
454 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
455 return;
456
457 if (check_valid_map(sbi, segno, off) == 0)
458 continue;
459
460 if (phase == 0) {
461 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
462 META_NAT, true);
463 continue;
464 }
465
466 if (phase == 1) {
467 ra_node_page(sbi, nid);
468 continue;
469 }
470
471 /* phase == 2 */
472 node_page = get_node_page(sbi, nid);
473 if (IS_ERR(node_page))
474 continue;
475
476 /* block may become invalid during get_node_page */
477 if (check_valid_map(sbi, segno, off) == 0) {
478 f2fs_put_page(node_page, 1);
479 continue;
480 }
481
482 get_node_info(sbi, nid, &ni);
483 if (ni.blk_addr != start_addr + off) {
484 f2fs_put_page(node_page, 1);
485 continue;
486 }
487
488 move_node_page(node_page, gc_type);
489 stat_inc_node_blk_count(sbi, 1, gc_type);
490 }
491
492 if (++phase < 3)
493 goto next_step;
494 }
495
496 /*
497 * Calculate start block index indicating the given node offset.
498 * Be careful, caller should give this node offset only indicating direct node
499 * blocks. If any node offsets, which point the other types of node blocks such
500 * as indirect or double indirect node blocks, are given, it must be a caller's
501 * bug.
502 */
503 block_t start_bidx_of_node(unsigned int node_ofs, struct inode *inode)
504 {
505 unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
506 unsigned int bidx;
507
508 if (node_ofs == 0)
509 return 0;
510
511 if (node_ofs <= 2) {
512 bidx = node_ofs - 1;
513 } else if (node_ofs <= indirect_blks) {
514 int dec = (node_ofs - 4) / (NIDS_PER_BLOCK + 1);
515 bidx = node_ofs - 2 - dec;
516 } else {
517 int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
518 bidx = node_ofs - 5 - dec;
519 }
520 return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(inode);
521 }
522
523 static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
524 struct node_info *dni, block_t blkaddr, unsigned int *nofs)
525 {
526 struct page *node_page;
527 nid_t nid;
528 unsigned int ofs_in_node;
529 block_t source_blkaddr;
530
531 nid = le32_to_cpu(sum->nid);
532 ofs_in_node = le16_to_cpu(sum->ofs_in_node);
533
534 node_page = get_node_page(sbi, nid);
535 if (IS_ERR(node_page))
536 return false;
537
538 get_node_info(sbi, nid, dni);
539
540 if (sum->version != dni->version) {
541 f2fs_put_page(node_page, 1);
542 return false;
543 }
544
545 *nofs = ofs_of_node(node_page);
546 source_blkaddr = datablock_addr(node_page, ofs_in_node);
547 f2fs_put_page(node_page, 1);
548
549 if (source_blkaddr != blkaddr)
550 return false;
551 return true;
552 }
553
554 static void move_encrypted_block(struct inode *inode, block_t bidx,
555 unsigned int segno, int off)
556 {
557 struct f2fs_io_info fio = {
558 .sbi = F2FS_I_SB(inode),
559 .type = DATA,
560 .op = REQ_OP_READ,
561 .op_flags = 0,
562 .encrypted_page = NULL,
563 };
564 struct dnode_of_data dn;
565 struct f2fs_summary sum;
566 struct node_info ni;
567 struct page *page;
568 block_t newaddr;
569 int err;
570
571 /* do not read out */
572 page = f2fs_grab_cache_page(inode->i_mapping, bidx, false);
573 if (!page)
574 return;
575
576 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
577 goto out;
578
579 set_new_dnode(&dn, inode, NULL, NULL, 0);
580 err = get_dnode_of_data(&dn, bidx, LOOKUP_NODE);
581 if (err)
582 goto out;
583
584 if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
585 ClearPageUptodate(page);
586 goto put_out;
587 }
588
589 /*
590 * don't cache encrypted data into meta inode until previous dirty
591 * data were writebacked to avoid racing between GC and flush.
592 */
593 f2fs_wait_on_page_writeback(page, DATA, true);
594
595 get_node_info(fio.sbi, dn.nid, &ni);
596 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
597
598 /* read page */
599 fio.page = page;
600 fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
601
602 allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr,
603 &sum, CURSEG_COLD_DATA);
604
605 fio.encrypted_page = pagecache_get_page(META_MAPPING(fio.sbi), newaddr,
606 FGP_LOCK | FGP_CREAT, GFP_NOFS);
607 if (!fio.encrypted_page) {
608 err = -ENOMEM;
609 goto recover_block;
610 }
611
612 err = f2fs_submit_page_bio(&fio);
613 if (err)
614 goto put_page_out;
615
616 /* write page */
617 lock_page(fio.encrypted_page);
618
619 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
620 err = -EIO;
621 goto put_page_out;
622 }
623 if (unlikely(!PageUptodate(fio.encrypted_page))) {
624 err = -EIO;
625 goto put_page_out;
626 }
627
628 set_page_dirty(fio.encrypted_page);
629 f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
630 if (clear_page_dirty_for_io(fio.encrypted_page))
631 dec_page_count(fio.sbi, F2FS_DIRTY_META);
632
633 set_page_writeback(fio.encrypted_page);
634
635 /* allocate block address */
636 f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
637
638 fio.op = REQ_OP_WRITE;
639 fio.op_flags = REQ_SYNC;
640 fio.new_blkaddr = newaddr;
641 f2fs_submit_page_mbio(&fio);
642
643 f2fs_update_data_blkaddr(&dn, newaddr);
644 set_inode_flag(inode, FI_APPEND_WRITE);
645 if (page->index == 0)
646 set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
647 put_page_out:
648 f2fs_put_page(fio.encrypted_page, 1);
649 recover_block:
650 if (err)
651 __f2fs_replace_block(fio.sbi, &sum, newaddr, fio.old_blkaddr,
652 true, true);
653 put_out:
654 f2fs_put_dnode(&dn);
655 out:
656 f2fs_put_page(page, 1);
657 }
658
659 static void move_data_page(struct inode *inode, block_t bidx, int gc_type,
660 unsigned int segno, int off)
661 {
662 struct page *page;
663
664 page = get_lock_data_page(inode, bidx, true);
665 if (IS_ERR(page))
666 return;
667
668 if (!check_valid_map(F2FS_I_SB(inode), segno, off))
669 goto out;
670
671 if (gc_type == BG_GC) {
672 if (PageWriteback(page))
673 goto out;
674 set_page_dirty(page);
675 set_cold_data(page);
676 } else {
677 struct f2fs_io_info fio = {
678 .sbi = F2FS_I_SB(inode),
679 .type = DATA,
680 .op = REQ_OP_WRITE,
681 .op_flags = REQ_SYNC,
682 .page = page,
683 .encrypted_page = NULL,
684 };
685 bool is_dirty = PageDirty(page);
686 int err;
687
688 retry:
689 set_page_dirty(page);
690 f2fs_wait_on_page_writeback(page, DATA, true);
691 if (clear_page_dirty_for_io(page)) {
692 inode_dec_dirty_pages(inode);
693 remove_dirty_inode(inode);
694 }
695
696 set_cold_data(page);
697
698 err = do_write_data_page(&fio);
699 if (err == -ENOMEM && is_dirty) {
700 congestion_wait(BLK_RW_ASYNC, HZ/50);
701 goto retry;
702 }
703 }
704 out:
705 f2fs_put_page(page, 1);
706 }
707
708 /*
709 * This function tries to get parent node of victim data block, and identifies
710 * data block validity. If the block is valid, copy that with cold status and
711 * modify parent node.
712 * If the parent node is not valid or the data block address is different,
713 * the victim data block is ignored.
714 */
715 static void gc_data_segment(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
716 struct gc_inode_list *gc_list, unsigned int segno, int gc_type)
717 {
718 struct super_block *sb = sbi->sb;
719 struct f2fs_summary *entry;
720 block_t start_addr;
721 int off;
722 int phase = 0;
723
724 start_addr = START_BLOCK(sbi, segno);
725
726 next_step:
727 entry = sum;
728
729 for (off = 0; off < sbi->blocks_per_seg; off++, entry++) {
730 struct page *data_page;
731 struct inode *inode;
732 struct node_info dni; /* dnode info for the data */
733 unsigned int ofs_in_node, nofs;
734 block_t start_bidx;
735 nid_t nid = le32_to_cpu(entry->nid);
736
737 /* stop BG_GC if there is not enough free sections. */
738 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, 0, 0))
739 return;
740
741 if (check_valid_map(sbi, segno, off) == 0)
742 continue;
743
744 if (phase == 0) {
745 ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), 1,
746 META_NAT, true);
747 continue;
748 }
749
750 if (phase == 1) {
751 ra_node_page(sbi, nid);
752 continue;
753 }
754
755 /* Get an inode by ino with checking validity */
756 if (!is_alive(sbi, entry, &dni, start_addr + off, &nofs))
757 continue;
758
759 if (phase == 2) {
760 ra_node_page(sbi, dni.ino);
761 continue;
762 }
763
764 ofs_in_node = le16_to_cpu(entry->ofs_in_node);
765
766 if (phase == 3) {
767 inode = f2fs_iget(sb, dni.ino);
768 if (IS_ERR(inode) || is_bad_inode(inode))
769 continue;
770
771 /* if encrypted inode, let's go phase 3 */
772 if (f2fs_encrypted_inode(inode) &&
773 S_ISREG(inode->i_mode)) {
774 add_gc_inode(gc_list, inode);
775 continue;
776 }
777
778 start_bidx = start_bidx_of_node(nofs, inode);
779 data_page = get_read_data_page(inode,
780 start_bidx + ofs_in_node, REQ_RAHEAD,
781 true);
782 if (IS_ERR(data_page)) {
783 iput(inode);
784 continue;
785 }
786
787 f2fs_put_page(data_page, 0);
788 add_gc_inode(gc_list, inode);
789 continue;
790 }
791
792 /* phase 4 */
793 inode = find_gc_inode(gc_list, dni.ino);
794 if (inode) {
795 struct f2fs_inode_info *fi = F2FS_I(inode);
796 bool locked = false;
797
798 if (S_ISREG(inode->i_mode)) {
799 if (!down_write_trylock(&fi->dio_rwsem[READ]))
800 continue;
801 if (!down_write_trylock(
802 &fi->dio_rwsem[WRITE])) {
803 up_write(&fi->dio_rwsem[READ]);
804 continue;
805 }
806 locked = true;
807 }
808
809 start_bidx = start_bidx_of_node(nofs, inode)
810 + ofs_in_node;
811 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
812 move_encrypted_block(inode, start_bidx, segno, off);
813 else
814 move_data_page(inode, start_bidx, gc_type, segno, off);
815
816 if (locked) {
817 up_write(&fi->dio_rwsem[WRITE]);
818 up_write(&fi->dio_rwsem[READ]);
819 }
820
821 stat_inc_data_blk_count(sbi, 1, gc_type);
822 }
823 }
824
825 if (++phase < 5)
826 goto next_step;
827 }
828
829 static int __get_victim(struct f2fs_sb_info *sbi, unsigned int *victim,
830 int gc_type)
831 {
832 struct sit_info *sit_i = SIT_I(sbi);
833 int ret;
834
835 mutex_lock(&sit_i->sentry_lock);
836 ret = DIRTY_I(sbi)->v_ops->get_victim(sbi, victim, gc_type,
837 NO_CHECK_TYPE, LFS);
838 mutex_unlock(&sit_i->sentry_lock);
839 return ret;
840 }
841
842 static int do_garbage_collect(struct f2fs_sb_info *sbi,
843 unsigned int start_segno,
844 struct gc_inode_list *gc_list, int gc_type)
845 {
846 struct page *sum_page;
847 struct f2fs_summary_block *sum;
848 struct blk_plug plug;
849 unsigned int segno = start_segno;
850 unsigned int end_segno = start_segno + sbi->segs_per_sec;
851 int sec_freed = 0;
852 unsigned char type = IS_DATASEG(get_seg_entry(sbi, segno)->type) ?
853 SUM_TYPE_DATA : SUM_TYPE_NODE;
854
855 /* readahead multi ssa blocks those have contiguous address */
856 if (sbi->segs_per_sec > 1)
857 ra_meta_pages(sbi, GET_SUM_BLOCK(sbi, segno),
858 sbi->segs_per_sec, META_SSA, true);
859
860 /* reference all summary page */
861 while (segno < end_segno) {
862 sum_page = get_sum_page(sbi, segno++);
863 unlock_page(sum_page);
864 }
865
866 blk_start_plug(&plug);
867
868 for (segno = start_segno; segno < end_segno; segno++) {
869
870 /* find segment summary of victim */
871 sum_page = find_get_page(META_MAPPING(sbi),
872 GET_SUM_BLOCK(sbi, segno));
873 f2fs_put_page(sum_page, 0);
874
875 if (get_valid_blocks(sbi, segno, 1) == 0 ||
876 !PageUptodate(sum_page) ||
877 unlikely(f2fs_cp_error(sbi)))
878 goto next;
879
880 sum = page_address(sum_page);
881 f2fs_bug_on(sbi, type != GET_SUM_TYPE((&sum->footer)));
882
883 /*
884 * this is to avoid deadlock:
885 * - lock_page(sum_page) - f2fs_replace_block
886 * - check_valid_map() - mutex_lock(sentry_lock)
887 * - mutex_lock(sentry_lock) - change_curseg()
888 * - lock_page(sum_page)
889 */
890
891 if (type == SUM_TYPE_NODE)
892 gc_node_segment(sbi, sum->entries, segno, gc_type);
893 else
894 gc_data_segment(sbi, sum->entries, gc_list, segno,
895 gc_type);
896
897 stat_inc_seg_count(sbi, type, gc_type);
898 next:
899 f2fs_put_page(sum_page, 0);
900 }
901
902 if (gc_type == FG_GC)
903 f2fs_submit_merged_bio(sbi,
904 (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE);
905
906 blk_finish_plug(&plug);
907
908 if (gc_type == FG_GC &&
909 get_valid_blocks(sbi, start_segno, sbi->segs_per_sec) == 0)
910 sec_freed = 1;
911
912 stat_inc_call_count(sbi->stat_info);
913
914 return sec_freed;
915 }
916
917 int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background)
918 {
919 unsigned int segno;
920 int gc_type = sync ? FG_GC : BG_GC;
921 int sec_freed = 0;
922 int ret = -EINVAL;
923 struct cp_control cpc;
924 struct gc_inode_list gc_list = {
925 .ilist = LIST_HEAD_INIT(gc_list.ilist),
926 .iroot = RADIX_TREE_INIT(GFP_NOFS),
927 };
928
929 cpc.reason = __get_cp_reason(sbi);
930 gc_more:
931 segno = NULL_SEGNO;
932
933 if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE)))
934 goto stop;
935 if (unlikely(f2fs_cp_error(sbi))) {
936 ret = -EIO;
937 goto stop;
938 }
939
940 if (gc_type == BG_GC && has_not_enough_free_secs(sbi, sec_freed, 0)) {
941 gc_type = FG_GC;
942 /*
943 * If there is no victim and no prefree segment but still not
944 * enough free sections, we should flush dent/node blocks and do
945 * garbage collections.
946 */
947 if (__get_victim(sbi, &segno, gc_type) ||
948 prefree_segments(sbi)) {
949 ret = write_checkpoint(sbi, &cpc);
950 if (ret)
951 goto stop;
952 segno = NULL_SEGNO;
953 } else if (has_not_enough_free_secs(sbi, 0, 0)) {
954 ret = write_checkpoint(sbi, &cpc);
955 if (ret)
956 goto stop;
957 }
958 } else if (gc_type == BG_GC && !background) {
959 /* f2fs_balance_fs doesn't need to do BG_GC in critical path. */
960 goto stop;
961 }
962
963 if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
964 goto stop;
965 ret = 0;
966
967 if (do_garbage_collect(sbi, segno, &gc_list, gc_type) &&
968 gc_type == FG_GC)
969 sec_freed++;
970
971 if (gc_type == FG_GC)
972 sbi->cur_victim_sec = NULL_SEGNO;
973
974 if (!sync) {
975 if (has_not_enough_free_secs(sbi, sec_freed, 0))
976 goto gc_more;
977
978 if (gc_type == FG_GC)
979 ret = write_checkpoint(sbi, &cpc);
980 }
981 stop:
982 mutex_unlock(&sbi->gc_mutex);
983
984 put_gc_inode(&gc_list);
985
986 if (sync)
987 ret = sec_freed ? 0 : -EAGAIN;
988 return ret;
989 }
990
991 void build_gc_manager(struct f2fs_sb_info *sbi)
992 {
993 u64 main_count, resv_count, ovp_count, blocks_per_sec;
994
995 DIRTY_I(sbi)->v_ops = &default_v_ops;
996
997 /* threshold of # of valid blocks in a section for victims of FG_GC */
998 main_count = SM_I(sbi)->main_segments << sbi->log_blocks_per_seg;
999 resv_count = SM_I(sbi)->reserved_segments << sbi->log_blocks_per_seg;
1000 ovp_count = SM_I(sbi)->ovp_segments << sbi->log_blocks_per_seg;
1001 blocks_per_sec = sbi->blocks_per_seg * sbi->segs_per_sec;
1002
1003 sbi->fggc_threshold = div_u64((main_count - ovp_count) * blocks_per_sec,
1004 (main_count - resv_count));
1005 }