]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/f2fs/segment.c
f2fs: introduce discard_map for f2fs_trim_fs
[mirror_ubuntu-zesty-kernel.git] / fs / f2fs / segment.c
CommitLineData
0a8165d7 1/*
351df4b2
JK
2 * fs/f2fs/segment.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <linux/fs.h>
12#include <linux/f2fs_fs.h>
13#include <linux/bio.h>
14#include <linux/blkdev.h>
690e4a3e 15#include <linux/prefetch.h>
6b4afdd7 16#include <linux/kthread.h>
351df4b2 17#include <linux/vmalloc.h>
74de593a 18#include <linux/swap.h>
351df4b2
JK
19
20#include "f2fs.h"
21#include "segment.h"
22#include "node.h"
9e4ded3f 23#include "trace.h"
6ec178da 24#include <trace/events/f2fs.h>
351df4b2 25
9a7f143a
CL
26#define __reverse_ffz(x) __reverse_ffs(~(x))
27
7fd9e544 28static struct kmem_cache *discard_entry_slab;
184a5cd2 29static struct kmem_cache *sit_entry_set_slab;
88b88a66 30static struct kmem_cache *inmem_entry_slab;
7fd9e544 31
9a7f143a
CL
32/*
33 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
34 * MSB and LSB are reversed in a byte by f2fs_set_bit.
35 */
36static inline unsigned long __reverse_ffs(unsigned long word)
37{
38 int num = 0;
39
40#if BITS_PER_LONG == 64
41 if ((word & 0xffffffff) == 0) {
42 num += 32;
43 word >>= 32;
44 }
45#endif
46 if ((word & 0xffff) == 0) {
47 num += 16;
48 word >>= 16;
49 }
50 if ((word & 0xff) == 0) {
51 num += 8;
52 word >>= 8;
53 }
54 if ((word & 0xf0) == 0)
55 num += 4;
56 else
57 word >>= 4;
58 if ((word & 0xc) == 0)
59 num += 2;
60 else
61 word >>= 2;
62 if ((word & 0x2) == 0)
63 num += 1;
64 return num;
65}
66
67/*
e1c42045 68 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
9a7f143a
CL
69 * f2fs_set_bit makes MSB and LSB reversed in a byte.
70 * Example:
71 * LSB <--> MSB
72 * f2fs_set_bit(0, bitmap) => 0000 0001
73 * f2fs_set_bit(7, bitmap) => 1000 0000
74 */
75static unsigned long __find_rev_next_bit(const unsigned long *addr,
76 unsigned long size, unsigned long offset)
77{
78 const unsigned long *p = addr + BIT_WORD(offset);
79 unsigned long result = offset & ~(BITS_PER_LONG - 1);
80 unsigned long tmp;
81 unsigned long mask, submask;
82 unsigned long quot, rest;
83
84 if (offset >= size)
85 return size;
86
87 size -= result;
88 offset %= BITS_PER_LONG;
89 if (!offset)
90 goto aligned;
91
92 tmp = *(p++);
93 quot = (offset >> 3) << 3;
94 rest = offset & 0x7;
95 mask = ~0UL << quot;
96 submask = (unsigned char)(0xff << rest) >> rest;
97 submask <<= quot;
98 mask &= submask;
99 tmp &= mask;
100 if (size < BITS_PER_LONG)
101 goto found_first;
102 if (tmp)
103 goto found_middle;
104
105 size -= BITS_PER_LONG;
106 result += BITS_PER_LONG;
107aligned:
108 while (size & ~(BITS_PER_LONG-1)) {
109 tmp = *(p++);
110 if (tmp)
111 goto found_middle;
112 result += BITS_PER_LONG;
113 size -= BITS_PER_LONG;
114 }
115 if (!size)
116 return result;
117 tmp = *p;
118found_first:
119 tmp &= (~0UL >> (BITS_PER_LONG - size));
120 if (tmp == 0UL) /* Are any bits set? */
121 return result + size; /* Nope. */
122found_middle:
123 return result + __reverse_ffs(tmp);
124}
125
126static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
127 unsigned long size, unsigned long offset)
128{
129 const unsigned long *p = addr + BIT_WORD(offset);
130 unsigned long result = offset & ~(BITS_PER_LONG - 1);
131 unsigned long tmp;
132 unsigned long mask, submask;
133 unsigned long quot, rest;
134
135 if (offset >= size)
136 return size;
137
138 size -= result;
139 offset %= BITS_PER_LONG;
140 if (!offset)
141 goto aligned;
142
143 tmp = *(p++);
144 quot = (offset >> 3) << 3;
145 rest = offset & 0x7;
146 mask = ~(~0UL << quot);
147 submask = (unsigned char)~((unsigned char)(0xff << rest) >> rest);
148 submask <<= quot;
149 mask += submask;
150 tmp |= mask;
151 if (size < BITS_PER_LONG)
152 goto found_first;
153 if (~tmp)
154 goto found_middle;
155
156 size -= BITS_PER_LONG;
157 result += BITS_PER_LONG;
158aligned:
159 while (size & ~(BITS_PER_LONG - 1)) {
160 tmp = *(p++);
161 if (~tmp)
162 goto found_middle;
163 result += BITS_PER_LONG;
164 size -= BITS_PER_LONG;
165 }
166 if (!size)
167 return result;
168 tmp = *p;
169
170found_first:
171 tmp |= ~0UL << size;
172 if (tmp == ~0UL) /* Are any bits zero? */
173 return result + size; /* Nope. */
174found_middle:
175 return result + __reverse_ffz(tmp);
176}
177
88b88a66
JK
178void register_inmem_page(struct inode *inode, struct page *page)
179{
180 struct f2fs_inode_info *fi = F2FS_I(inode);
181 struct inmem_pages *new;
34ba94ba 182 int err;
9be32d72 183
0722b101 184 SetPagePrivate(page);
9e4ded3f 185 f2fs_trace_pid(page);
0722b101 186
88b88a66
JK
187 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
188
189 /* add atomic page indices to the list */
190 new->page = page;
191 INIT_LIST_HEAD(&new->list);
9be32d72 192retry:
88b88a66
JK
193 /* increase reference count with clean state */
194 mutex_lock(&fi->inmem_lock);
34ba94ba
JK
195 err = radix_tree_insert(&fi->inmem_root, page->index, new);
196 if (err == -EEXIST) {
197 mutex_unlock(&fi->inmem_lock);
198 kmem_cache_free(inmem_entry_slab, new);
199 return;
200 } else if (err) {
201 mutex_unlock(&fi->inmem_lock);
34ba94ba
JK
202 goto retry;
203 }
88b88a66
JK
204 get_page(page);
205 list_add_tail(&new->list, &fi->inmem_pages);
8dcf2ff7 206 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
88b88a66 207 mutex_unlock(&fi->inmem_lock);
8ce67cb0
JK
208
209 trace_f2fs_register_inmem_page(page, INMEM);
88b88a66
JK
210}
211
212void commit_inmem_pages(struct inode *inode, bool abort)
213{
214 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
215 struct f2fs_inode_info *fi = F2FS_I(inode);
216 struct inmem_pages *cur, *tmp;
217 bool submit_bio = false;
218 struct f2fs_io_info fio = {
05ca3632 219 .sbi = sbi,
88b88a66 220 .type = DATA,
1e84371f 221 .rw = WRITE_SYNC | REQ_PRIO,
88b88a66
JK
222 };
223
0341845e
JK
224 /*
225 * The abort is true only when f2fs_evict_inode is called.
226 * Basically, the f2fs_evict_inode doesn't produce any data writes, so
227 * that we don't need to call f2fs_balance_fs.
228 * Otherwise, f2fs_gc in f2fs_balance_fs can wait forever until this
229 * inode becomes free by iget_locked in f2fs_iget.
230 */
70c640b1 231 if (!abort) {
0341845e 232 f2fs_balance_fs(sbi);
70c640b1
JK
233 f2fs_lock_op(sbi);
234 }
88b88a66
JK
235
236 mutex_lock(&fi->inmem_lock);
237 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
70c640b1
JK
238 if (!abort) {
239 lock_page(cur->page);
240 if (cur->page->mapping == inode->i_mapping) {
241 f2fs_wait_on_page_writeback(cur->page, DATA);
242 if (clear_page_dirty_for_io(cur->page))
243 inode_dec_dirty_pages(inode);
8ce67cb0 244 trace_f2fs_commit_inmem_page(cur->page, INMEM);
05ca3632
JK
245 fio.page = cur->page;
246 do_write_data_page(&fio);
70c640b1
JK
247 submit_bio = true;
248 }
249 f2fs_put_page(cur->page, 1);
250 } else {
8ce67cb0 251 trace_f2fs_commit_inmem_page(cur->page, INMEM_DROP);
70c640b1 252 put_page(cur->page);
88b88a66 253 }
34ba94ba 254 radix_tree_delete(&fi->inmem_root, cur->page->index);
88b88a66
JK
255 list_del(&cur->list);
256 kmem_cache_free(inmem_entry_slab, cur);
8dcf2ff7 257 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
88b88a66 258 }
88b88a66
JK
259 mutex_unlock(&fi->inmem_lock);
260
70c640b1
JK
261 if (!abort) {
262 f2fs_unlock_op(sbi);
263 if (submit_bio)
264 f2fs_submit_merged_bio(sbi, DATA, WRITE);
265 }
88b88a66
JK
266}
267
0a8165d7 268/*
351df4b2
JK
269 * This function balances dirty node and dentry pages.
270 * In addition, it controls garbage collection.
271 */
272void f2fs_balance_fs(struct f2fs_sb_info *sbi)
273{
351df4b2 274 /*
029cd28c
JK
275 * We should do GC or end up with checkpoint, if there are so many dirty
276 * dir/node pages without enough free segments.
351df4b2 277 */
43727527 278 if (has_not_enough_free_secs(sbi, 0)) {
351df4b2 279 mutex_lock(&sbi->gc_mutex);
408e9375 280 f2fs_gc(sbi);
351df4b2
JK
281 }
282}
283
4660f9c0
JK
284void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
285{
1dcc336b
CY
286 /* try to shrink extent cache when there is no enough memory */
287 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
288
4660f9c0
JK
289 /* check the # of cached NAT entries and prefree segments */
290 if (try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK) ||
e5e7ea3c 291 excess_prefree_segs(sbi) ||
88a70a69 292 !available_free_memory(sbi, INO_ENTRIES))
4660f9c0
JK
293 f2fs_sync_fs(sbi->sb, true);
294}
295
2163d198 296static int issue_flush_thread(void *data)
6b4afdd7
JK
297{
298 struct f2fs_sb_info *sbi = data;
a688b9d9
GZ
299 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
300 wait_queue_head_t *q = &fcc->flush_wait_queue;
6b4afdd7
JK
301repeat:
302 if (kthread_should_stop())
303 return 0;
304
721bd4d5 305 if (!llist_empty(&fcc->issue_list)) {
6b4afdd7
JK
306 struct bio *bio = bio_alloc(GFP_NOIO, 0);
307 struct flush_cmd *cmd, *next;
308 int ret;
309
721bd4d5
GZ
310 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
311 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
312
6b4afdd7
JK
313 bio->bi_bdev = sbi->sb->s_bdev;
314 ret = submit_bio_wait(WRITE_FLUSH, bio);
315
721bd4d5
GZ
316 llist_for_each_entry_safe(cmd, next,
317 fcc->dispatch_list, llnode) {
6b4afdd7 318 cmd->ret = ret;
6b4afdd7
JK
319 complete(&cmd->wait);
320 }
a4ed23f2 321 bio_put(bio);
a688b9d9 322 fcc->dispatch_list = NULL;
6b4afdd7
JK
323 }
324
a688b9d9 325 wait_event_interruptible(*q,
721bd4d5 326 kthread_should_stop() || !llist_empty(&fcc->issue_list));
6b4afdd7
JK
327 goto repeat;
328}
329
330int f2fs_issue_flush(struct f2fs_sb_info *sbi)
331{
a688b9d9 332 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
adf8d90b 333 struct flush_cmd cmd;
6b4afdd7 334
24a9ee0f
JK
335 trace_f2fs_issue_flush(sbi->sb, test_opt(sbi, NOBARRIER),
336 test_opt(sbi, FLUSH_MERGE));
337
0f7b2abd
JK
338 if (test_opt(sbi, NOBARRIER))
339 return 0;
340
6b4afdd7
JK
341 if (!test_opt(sbi, FLUSH_MERGE))
342 return blkdev_issue_flush(sbi->sb->s_bdev, GFP_KERNEL, NULL);
343
adf8d90b 344 init_completion(&cmd.wait);
6b4afdd7 345
721bd4d5 346 llist_add(&cmd.llnode, &fcc->issue_list);
6b4afdd7 347
a688b9d9
GZ
348 if (!fcc->dispatch_list)
349 wake_up(&fcc->flush_wait_queue);
6b4afdd7 350
adf8d90b
CY
351 wait_for_completion(&cmd.wait);
352
353 return cmd.ret;
6b4afdd7
JK
354}
355
2163d198
GZ
356int create_flush_cmd_control(struct f2fs_sb_info *sbi)
357{
358 dev_t dev = sbi->sb->s_bdev->bd_dev;
359 struct flush_cmd_control *fcc;
360 int err = 0;
361
362 fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
363 if (!fcc)
364 return -ENOMEM;
2163d198 365 init_waitqueue_head(&fcc->flush_wait_queue);
721bd4d5 366 init_llist_head(&fcc->issue_list);
6b2920a5 367 SM_I(sbi)->cmd_control_info = fcc;
2163d198
GZ
368 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
369 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
370 if (IS_ERR(fcc->f2fs_issue_flush)) {
371 err = PTR_ERR(fcc->f2fs_issue_flush);
372 kfree(fcc);
6b2920a5 373 SM_I(sbi)->cmd_control_info = NULL;
2163d198
GZ
374 return err;
375 }
2163d198
GZ
376
377 return err;
378}
379
380void destroy_flush_cmd_control(struct f2fs_sb_info *sbi)
381{
6b2920a5 382 struct flush_cmd_control *fcc = SM_I(sbi)->cmd_control_info;
2163d198
GZ
383
384 if (fcc && fcc->f2fs_issue_flush)
385 kthread_stop(fcc->f2fs_issue_flush);
386 kfree(fcc);
6b2920a5 387 SM_I(sbi)->cmd_control_info = NULL;
2163d198
GZ
388}
389
351df4b2
JK
390static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
391 enum dirty_type dirty_type)
392{
393 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
394
395 /* need not be added */
396 if (IS_CURSEG(sbi, segno))
397 return;
398
399 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
400 dirty_i->nr_dirty[dirty_type]++;
401
402 if (dirty_type == DIRTY) {
403 struct seg_entry *sentry = get_seg_entry(sbi, segno);
4625d6aa 404 enum dirty_type t = sentry->type;
b2f2c390 405
ec325b52
JK
406 if (unlikely(t >= DIRTY)) {
407 f2fs_bug_on(sbi, 1);
408 return;
409 }
4625d6aa
CL
410 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
411 dirty_i->nr_dirty[t]++;
351df4b2
JK
412 }
413}
414
415static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
416 enum dirty_type dirty_type)
417{
418 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
419
420 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
421 dirty_i->nr_dirty[dirty_type]--;
422
423 if (dirty_type == DIRTY) {
4625d6aa
CL
424 struct seg_entry *sentry = get_seg_entry(sbi, segno);
425 enum dirty_type t = sentry->type;
426
427 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
428 dirty_i->nr_dirty[t]--;
b2f2c390 429
5ec4e49f
JK
430 if (get_valid_blocks(sbi, segno, sbi->segs_per_sec) == 0)
431 clear_bit(GET_SECNO(sbi, segno),
432 dirty_i->victim_secmap);
351df4b2
JK
433 }
434}
435
0a8165d7 436/*
351df4b2
JK
437 * Should not occur error such as -ENOMEM.
438 * Adding dirty entry into seglist is not critical operation.
439 * If a given segment is one of current working segments, it won't be added.
440 */
8d8451af 441static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
351df4b2
JK
442{
443 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
444 unsigned short valid_blocks;
445
446 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
447 return;
448
449 mutex_lock(&dirty_i->seglist_lock);
450
451 valid_blocks = get_valid_blocks(sbi, segno, 0);
452
453 if (valid_blocks == 0) {
454 __locate_dirty_segment(sbi, segno, PRE);
455 __remove_dirty_segment(sbi, segno, DIRTY);
456 } else if (valid_blocks < sbi->blocks_per_seg) {
457 __locate_dirty_segment(sbi, segno, DIRTY);
458 } else {
459 /* Recovery routine with SSR needs this */
460 __remove_dirty_segment(sbi, segno, DIRTY);
461 }
462
463 mutex_unlock(&dirty_i->seglist_lock);
351df4b2
JK
464}
465
1e87a78d 466static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
37208879
JK
467 block_t blkstart, block_t blklen)
468{
55cf9cb6
CY
469 sector_t start = SECTOR_FROM_BLOCK(blkstart);
470 sector_t len = SECTOR_FROM_BLOCK(blklen);
a66cdd98
JK
471 struct seg_entry *se;
472 unsigned int offset;
473 block_t i;
474
475 for (i = blkstart; i < blkstart + blklen; i++) {
476 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
477 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
478
479 if (!f2fs_test_and_set_bit(offset, se->discard_map))
480 sbi->discard_blks--;
481 }
1661d07c 482 trace_f2fs_issue_discard(sbi->sb, blkstart, blklen);
1e87a78d
JK
483 return blkdev_issue_discard(sbi->sb->s_bdev, start, len, GFP_NOFS, 0);
484}
485
cf2271e7 486void discard_next_dnode(struct f2fs_sb_info *sbi, block_t blkaddr)
1e87a78d 487{
1e87a78d
JK
488 if (f2fs_issue_discard(sbi, blkaddr, 1)) {
489 struct page *page = grab_meta_page(sbi, blkaddr);
490 /* zero-filled page */
491 set_page_dirty(page);
492 f2fs_put_page(page, 1);
493 }
37208879
JK
494}
495
adf4983b 496static void __add_discard_entry(struct f2fs_sb_info *sbi,
a66cdd98
JK
497 struct cp_control *cpc, struct seg_entry *se,
498 unsigned int start, unsigned int end)
b2955550
JK
499{
500 struct list_head *head = &SM_I(sbi)->discard_list;
adf4983b
JK
501 struct discard_entry *new, *last;
502
503 if (!list_empty(head)) {
504 last = list_last_entry(head, struct discard_entry, list);
505 if (START_BLOCK(sbi, cpc->trim_start) + start ==
506 last->blkaddr + last->len) {
507 last->len += end - start;
508 goto done;
509 }
510 }
511
512 new = f2fs_kmem_cache_alloc(discard_entry_slab, GFP_NOFS);
513 INIT_LIST_HEAD(&new->list);
514 new->blkaddr = START_BLOCK(sbi, cpc->trim_start) + start;
515 new->len = end - start;
516 list_add_tail(&new->list, head);
517done:
518 SM_I(sbi)->nr_discards += end - start;
519 cpc->trimmed += end - start;
520}
521
522static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
523{
b2955550
JK
524 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
525 int max_blocks = sbi->blocks_per_seg;
4b2fecc8 526 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
b2955550
JK
527 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
528 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
a66cdd98 529 unsigned long *discard_map = (unsigned long *)se->discard_map;
60a3b782 530 unsigned long *dmap = SIT_I(sbi)->tmp_map;
b2955550 531 unsigned int start = 0, end = -1;
4b2fecc8 532 bool force = (cpc->reason == CP_DISCARD);
b2955550
JK
533 int i;
534
a66cdd98 535 if (se->valid_blocks == max_blocks)
b2955550
JK
536 return;
537
a66cdd98
JK
538 if (!force) {
539 if (!test_opt(sbi, DISCARD) || !se->valid_blocks ||
540 SM_I(sbi)->nr_discards >= SM_I(sbi)->max_discards)
4b2fecc8
JK
541 return;
542 }
543
b2955550
JK
544 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
545 for (i = 0; i < entries; i++)
a66cdd98 546 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
d7bc2484 547 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
b2955550 548
4b2fecc8 549 while (force || SM_I(sbi)->nr_discards <= SM_I(sbi)->max_discards) {
b2955550
JK
550 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
551 if (start >= max_blocks)
552 break;
553
554 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
555
b28c3f94 556 if (force && end - start < cpc->trim_minlen)
4b2fecc8
JK
557 continue;
558
a66cdd98 559 __add_discard_entry(sbi, cpc, se, start, end);
b2955550
JK
560 }
561}
562
4b2fecc8
JK
563void release_discard_addrs(struct f2fs_sb_info *sbi)
564{
565 struct list_head *head = &(SM_I(sbi)->discard_list);
566 struct discard_entry *entry, *this;
567
568 /* drop caches */
569 list_for_each_entry_safe(entry, this, head, list) {
570 list_del(&entry->list);
571 kmem_cache_free(discard_entry_slab, entry);
572 }
573}
574
0a8165d7 575/*
351df4b2
JK
576 * Should call clear_prefree_segments after checkpoint is done.
577 */
578static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
579{
580 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
b65ee148 581 unsigned int segno;
351df4b2
JK
582
583 mutex_lock(&dirty_i->seglist_lock);
7cd8558b 584 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
351df4b2 585 __set_test_and_free(sbi, segno);
351df4b2
JK
586 mutex_unlock(&dirty_i->seglist_lock);
587}
588
589void clear_prefree_segments(struct f2fs_sb_info *sbi)
590{
b2955550 591 struct list_head *head = &(SM_I(sbi)->discard_list);
2d7b822a 592 struct discard_entry *entry, *this;
351df4b2 593 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
29e59c14 594 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
29e59c14 595 unsigned int start = 0, end = -1;
351df4b2
JK
596
597 mutex_lock(&dirty_i->seglist_lock);
29e59c14 598
351df4b2 599 while (1) {
29e59c14 600 int i;
7cd8558b
JK
601 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
602 if (start >= MAIN_SEGS(sbi))
351df4b2 603 break;
7cd8558b
JK
604 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
605 start + 1);
29e59c14
CL
606
607 for (i = start; i < end; i++)
608 clear_bit(i, prefree_map);
609
610 dirty_i->nr_dirty[PRE] -= end - start;
611
612 if (!test_opt(sbi, DISCARD))
613 continue;
351df4b2 614
37208879
JK
615 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
616 (end - start) << sbi->log_blocks_per_seg);
351df4b2
JK
617 }
618 mutex_unlock(&dirty_i->seglist_lock);
b2955550
JK
619
620 /* send small discards */
2d7b822a 621 list_for_each_entry_safe(entry, this, head, list) {
37208879 622 f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
b2955550
JK
623 list_del(&entry->list);
624 SM_I(sbi)->nr_discards -= entry->len;
625 kmem_cache_free(discard_entry_slab, entry);
626 }
351df4b2
JK
627}
628
184a5cd2 629static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
351df4b2
JK
630{
631 struct sit_info *sit_i = SIT_I(sbi);
184a5cd2
CY
632
633 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
351df4b2 634 sit_i->dirty_sentries++;
184a5cd2
CY
635 return false;
636 }
637
638 return true;
351df4b2
JK
639}
640
641static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
642 unsigned int segno, int modified)
643{
644 struct seg_entry *se = get_seg_entry(sbi, segno);
645 se->type = type;
646 if (modified)
647 __mark_sit_entry_dirty(sbi, segno);
648}
649
650static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
651{
652 struct seg_entry *se;
653 unsigned int segno, offset;
654 long int new_vblocks;
655
656 segno = GET_SEGNO(sbi, blkaddr);
657
658 se = get_seg_entry(sbi, segno);
659 new_vblocks = se->valid_blocks + del;
491c0854 660 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
351df4b2 661
9850cf4a 662 f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
351df4b2
JK
663 (new_vblocks > sbi->blocks_per_seg)));
664
665 se->valid_blocks = new_vblocks;
666 se->mtime = get_mtime(sbi);
667 SIT_I(sbi)->max_mtime = se->mtime;
668
669 /* Update valid block bitmap */
670 if (del > 0) {
52aca074 671 if (f2fs_test_and_set_bit(offset, se->cur_valid_map))
05796763 672 f2fs_bug_on(sbi, 1);
a66cdd98
JK
673 if (!f2fs_test_and_set_bit(offset, se->discard_map))
674 sbi->discard_blks--;
351df4b2 675 } else {
52aca074 676 if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map))
05796763 677 f2fs_bug_on(sbi, 1);
a66cdd98
JK
678 if (f2fs_test_and_clear_bit(offset, se->discard_map))
679 sbi->discard_blks++;
351df4b2
JK
680 }
681 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
682 se->ckpt_valid_blocks += del;
683
684 __mark_sit_entry_dirty(sbi, segno);
685
686 /* update total number of valid blocks to be written in ckpt area */
687 SIT_I(sbi)->written_valid_blocks += del;
688
689 if (sbi->segs_per_sec > 1)
690 get_sec_entry(sbi, segno)->valid_blocks += del;
691}
692
5e443818 693void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new)
351df4b2 694{
5e443818
JK
695 update_sit_entry(sbi, new, 1);
696 if (GET_SEGNO(sbi, old) != NULL_SEGNO)
697 update_sit_entry(sbi, old, -1);
698
699 locate_dirty_segment(sbi, GET_SEGNO(sbi, old));
700 locate_dirty_segment(sbi, GET_SEGNO(sbi, new));
351df4b2
JK
701}
702
703void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
704{
705 unsigned int segno = GET_SEGNO(sbi, addr);
706 struct sit_info *sit_i = SIT_I(sbi);
707
9850cf4a 708 f2fs_bug_on(sbi, addr == NULL_ADDR);
351df4b2
JK
709 if (addr == NEW_ADDR)
710 return;
711
712 /* add it into sit main buffer */
713 mutex_lock(&sit_i->sentry_lock);
714
715 update_sit_entry(sbi, addr, -1);
716
717 /* add it into dirty seglist */
718 locate_dirty_segment(sbi, segno);
719
720 mutex_unlock(&sit_i->sentry_lock);
721}
722
0a8165d7 723/*
351df4b2
JK
724 * This function should be resided under the curseg_mutex lock
725 */
726static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
e79efe3b 727 struct f2fs_summary *sum)
351df4b2
JK
728{
729 struct curseg_info *curseg = CURSEG_I(sbi, type);
730 void *addr = curseg->sum_blk;
e79efe3b 731 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
351df4b2 732 memcpy(addr, sum, sizeof(struct f2fs_summary));
351df4b2
JK
733}
734
0a8165d7 735/*
351df4b2
JK
736 * Calculate the number of current summary pages for writing
737 */
3fa06d7b 738int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
351df4b2 739{
351df4b2 740 int valid_sum_count = 0;
9a47938b 741 int i, sum_in_page;
351df4b2
JK
742
743 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
744 if (sbi->ckpt->alloc_type[i] == SSR)
745 valid_sum_count += sbi->blocks_per_seg;
3fa06d7b
CY
746 else {
747 if (for_ra)
748 valid_sum_count += le16_to_cpu(
749 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
750 else
751 valid_sum_count += curseg_blkoff(sbi, i);
752 }
351df4b2
JK
753 }
754
9a47938b
FL
755 sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE -
756 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
757 if (valid_sum_count <= sum_in_page)
351df4b2 758 return 1;
9a47938b
FL
759 else if ((valid_sum_count - sum_in_page) <=
760 (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
351df4b2
JK
761 return 2;
762 return 3;
763}
764
0a8165d7 765/*
351df4b2
JK
766 * Caller should put this summary page
767 */
768struct page *get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
769{
770 return get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
771}
772
773static void write_sum_page(struct f2fs_sb_info *sbi,
774 struct f2fs_summary_block *sum_blk, block_t blk_addr)
775{
776 struct page *page = grab_meta_page(sbi, blk_addr);
777 void *kaddr = page_address(page);
778 memcpy(kaddr, sum_blk, PAGE_CACHE_SIZE);
779 set_page_dirty(page);
780 f2fs_put_page(page, 1);
781}
782
60374688
JK
783static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
784{
785 struct curseg_info *curseg = CURSEG_I(sbi, type);
81fb5e87 786 unsigned int segno = curseg->segno + 1;
60374688
JK
787 struct free_segmap_info *free_i = FREE_I(sbi);
788
7cd8558b 789 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
81fb5e87 790 return !test_bit(segno, free_i->free_segmap);
60374688
JK
791 return 0;
792}
793
0a8165d7 794/*
351df4b2
JK
795 * Find a new segment from the free segments bitmap to right order
796 * This function should be returned with success, otherwise BUG
797 */
798static void get_new_segment(struct f2fs_sb_info *sbi,
799 unsigned int *newseg, bool new_sec, int dir)
800{
801 struct free_segmap_info *free_i = FREE_I(sbi);
351df4b2 802 unsigned int segno, secno, zoneno;
7cd8558b 803 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
351df4b2
JK
804 unsigned int hint = *newseg / sbi->segs_per_sec;
805 unsigned int old_zoneno = GET_ZONENO_FROM_SEGNO(sbi, *newseg);
806 unsigned int left_start = hint;
807 bool init = true;
808 int go_left = 0;
809 int i;
810
1a118ccf 811 spin_lock(&free_i->segmap_lock);
351df4b2
JK
812
813 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
814 segno = find_next_zero_bit(free_i->free_segmap,
7cd8558b 815 MAIN_SEGS(sbi), *newseg + 1);
33afa7fd
JK
816 if (segno - *newseg < sbi->segs_per_sec -
817 (*newseg % sbi->segs_per_sec))
351df4b2
JK
818 goto got_it;
819 }
820find_other_zone:
7cd8558b
JK
821 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
822 if (secno >= MAIN_SECS(sbi)) {
351df4b2
JK
823 if (dir == ALLOC_RIGHT) {
824 secno = find_next_zero_bit(free_i->free_secmap,
7cd8558b
JK
825 MAIN_SECS(sbi), 0);
826 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
351df4b2
JK
827 } else {
828 go_left = 1;
829 left_start = hint - 1;
830 }
831 }
832 if (go_left == 0)
833 goto skip_left;
834
835 while (test_bit(left_start, free_i->free_secmap)) {
836 if (left_start > 0) {
837 left_start--;
838 continue;
839 }
840 left_start = find_next_zero_bit(free_i->free_secmap,
7cd8558b
JK
841 MAIN_SECS(sbi), 0);
842 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
351df4b2
JK
843 break;
844 }
845 secno = left_start;
846skip_left:
847 hint = secno;
848 segno = secno * sbi->segs_per_sec;
849 zoneno = secno / sbi->secs_per_zone;
850
851 /* give up on finding another zone */
852 if (!init)
853 goto got_it;
854 if (sbi->secs_per_zone == 1)
855 goto got_it;
856 if (zoneno == old_zoneno)
857 goto got_it;
858 if (dir == ALLOC_LEFT) {
859 if (!go_left && zoneno + 1 >= total_zones)
860 goto got_it;
861 if (go_left && zoneno == 0)
862 goto got_it;
863 }
864 for (i = 0; i < NR_CURSEG_TYPE; i++)
865 if (CURSEG_I(sbi, i)->zone == zoneno)
866 break;
867
868 if (i < NR_CURSEG_TYPE) {
869 /* zone is in user, try another */
870 if (go_left)
871 hint = zoneno * sbi->secs_per_zone - 1;
872 else if (zoneno + 1 >= total_zones)
873 hint = 0;
874 else
875 hint = (zoneno + 1) * sbi->secs_per_zone;
876 init = false;
877 goto find_other_zone;
878 }
879got_it:
880 /* set it as dirty segment in free segmap */
9850cf4a 881 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
351df4b2
JK
882 __set_inuse(sbi, segno);
883 *newseg = segno;
1a118ccf 884 spin_unlock(&free_i->segmap_lock);
351df4b2
JK
885}
886
887static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
888{
889 struct curseg_info *curseg = CURSEG_I(sbi, type);
890 struct summary_footer *sum_footer;
891
892 curseg->segno = curseg->next_segno;
893 curseg->zone = GET_ZONENO_FROM_SEGNO(sbi, curseg->segno);
894 curseg->next_blkoff = 0;
895 curseg->next_segno = NULL_SEGNO;
896
897 sum_footer = &(curseg->sum_blk->footer);
898 memset(sum_footer, 0, sizeof(struct summary_footer));
899 if (IS_DATASEG(type))
900 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
901 if (IS_NODESEG(type))
902 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
903 __set_sit_entry_type(sbi, type, curseg->segno, modified);
904}
905
0a8165d7 906/*
351df4b2
JK
907 * Allocate a current working segment.
908 * This function always allocates a free segment in LFS manner.
909 */
910static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
911{
912 struct curseg_info *curseg = CURSEG_I(sbi, type);
913 unsigned int segno = curseg->segno;
914 int dir = ALLOC_LEFT;
915
916 write_sum_page(sbi, curseg->sum_blk,
81fb5e87 917 GET_SUM_BLOCK(sbi, segno));
351df4b2
JK
918 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
919 dir = ALLOC_RIGHT;
920
921 if (test_opt(sbi, NOHEAP))
922 dir = ALLOC_RIGHT;
923
924 get_new_segment(sbi, &segno, new_sec, dir);
925 curseg->next_segno = segno;
926 reset_curseg(sbi, type, 1);
927 curseg->alloc_type = LFS;
928}
929
930static void __next_free_blkoff(struct f2fs_sb_info *sbi,
931 struct curseg_info *seg, block_t start)
932{
933 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
e81c93cf 934 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
60a3b782 935 unsigned long *target_map = SIT_I(sbi)->tmp_map;
e81c93cf
CL
936 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
937 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
938 int i, pos;
939
940 for (i = 0; i < entries; i++)
941 target_map[i] = ckpt_map[i] | cur_map[i];
942
943 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
944
945 seg->next_blkoff = pos;
351df4b2
JK
946}
947
0a8165d7 948/*
351df4b2
JK
949 * If a segment is written by LFS manner, next block offset is just obtained
950 * by increasing the current block offset. However, if a segment is written by
951 * SSR manner, next block offset obtained by calling __next_free_blkoff
952 */
953static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
954 struct curseg_info *seg)
955{
956 if (seg->alloc_type == SSR)
957 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
958 else
959 seg->next_blkoff++;
960}
961
0a8165d7 962/*
e1c42045 963 * This function always allocates a used segment(from dirty seglist) by SSR
351df4b2
JK
964 * manner, so it should recover the existing segment information of valid blocks
965 */
966static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse)
967{
968 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
969 struct curseg_info *curseg = CURSEG_I(sbi, type);
970 unsigned int new_segno = curseg->next_segno;
971 struct f2fs_summary_block *sum_node;
972 struct page *sum_page;
973
974 write_sum_page(sbi, curseg->sum_blk,
975 GET_SUM_BLOCK(sbi, curseg->segno));
976 __set_test_and_inuse(sbi, new_segno);
977
978 mutex_lock(&dirty_i->seglist_lock);
979 __remove_dirty_segment(sbi, new_segno, PRE);
980 __remove_dirty_segment(sbi, new_segno, DIRTY);
981 mutex_unlock(&dirty_i->seglist_lock);
982
983 reset_curseg(sbi, type, 1);
984 curseg->alloc_type = SSR;
985 __next_free_blkoff(sbi, curseg, 0);
986
987 if (reuse) {
988 sum_page = get_sum_page(sbi, new_segno);
989 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
990 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
991 f2fs_put_page(sum_page, 1);
992 }
993}
994
43727527
JK
995static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
996{
997 struct curseg_info *curseg = CURSEG_I(sbi, type);
998 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
999
1000 if (IS_NODESEG(type) || !has_not_enough_free_secs(sbi, 0))
1001 return v_ops->get_victim(sbi,
1002 &(curseg)->next_segno, BG_GC, type, SSR);
1003
1004 /* For data segments, let's do SSR more intensively */
1005 for (; type >= CURSEG_HOT_DATA; type--)
1006 if (v_ops->get_victim(sbi, &(curseg)->next_segno,
1007 BG_GC, type, SSR))
1008 return 1;
1009 return 0;
1010}
1011
351df4b2
JK
1012/*
1013 * flush out current segment and replace it with new segment
1014 * This function should be returned with success, otherwise BUG
1015 */
1016static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
1017 int type, bool force)
1018{
1019 struct curseg_info *curseg = CURSEG_I(sbi, type);
351df4b2 1020
7b405275 1021 if (force)
351df4b2 1022 new_curseg(sbi, type, true);
7b405275 1023 else if (type == CURSEG_WARM_NODE)
351df4b2 1024 new_curseg(sbi, type, false);
60374688
JK
1025 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
1026 new_curseg(sbi, type, false);
351df4b2
JK
1027 else if (need_SSR(sbi) && get_ssr_segment(sbi, type))
1028 change_curseg(sbi, type, true);
1029 else
1030 new_curseg(sbi, type, false);
dcdfff65
JK
1031
1032 stat_inc_seg_type(sbi, curseg);
351df4b2
JK
1033}
1034
38aa0889
JK
1035static void __allocate_new_segments(struct f2fs_sb_info *sbi, int type)
1036{
1037 struct curseg_info *curseg = CURSEG_I(sbi, type);
1038 unsigned int old_segno;
1039
1040 old_segno = curseg->segno;
1041 SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true);
1042 locate_dirty_segment(sbi, old_segno);
1043}
1044
351df4b2
JK
1045void allocate_new_segments(struct f2fs_sb_info *sbi)
1046{
351df4b2
JK
1047 int i;
1048
38aa0889
JK
1049 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
1050 __allocate_new_segments(sbi, i);
351df4b2
JK
1051}
1052
1053static const struct segment_allocation default_salloc_ops = {
1054 .allocate_segment = allocate_segment_by_default,
1055};
1056
4b2fecc8
JK
1057int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
1058{
f7ef9b83
JK
1059 __u64 start = F2FS_BYTES_TO_BLK(range->start);
1060 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
4b2fecc8
JK
1061 unsigned int start_segno, end_segno;
1062 struct cp_control cpc;
1063
7cd8558b
JK
1064 if (range->minlen > SEGMENT_SIZE(sbi) || start >= MAX_BLKADDR(sbi) ||
1065 range->len < sbi->blocksize)
4b2fecc8
JK
1066 return -EINVAL;
1067
9bd27ae4 1068 cpc.trimmed = 0;
7cd8558b 1069 if (end <= MAIN_BLKADDR(sbi))
4b2fecc8
JK
1070 goto out;
1071
1072 /* start/end segment number in main_area */
7cd8558b
JK
1073 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
1074 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
1075 GET_SEGNO(sbi, end);
4b2fecc8 1076 cpc.reason = CP_DISCARD;
f7ef9b83 1077 cpc.trim_minlen = F2FS_BYTES_TO_BLK(range->minlen);
4b2fecc8
JK
1078
1079 /* do checkpoint to issue discard commands safely */
bba681cb
JK
1080 for (; start_segno <= end_segno; start_segno = cpc.trim_end + 1) {
1081 cpc.trim_start = start_segno;
a66cdd98
JK
1082
1083 if (sbi->discard_blks == 0)
1084 break;
1085 else if (sbi->discard_blks < BATCHED_TRIM_BLOCKS(sbi))
1086 cpc.trim_end = end_segno;
1087 else
1088 cpc.trim_end = min_t(unsigned int,
1089 rounddown(start_segno +
bba681cb
JK
1090 BATCHED_TRIM_SEGMENTS(sbi),
1091 sbi->segs_per_sec) - 1, end_segno);
1092
1093 mutex_lock(&sbi->gc_mutex);
1094 write_checkpoint(sbi, &cpc);
1095 mutex_unlock(&sbi->gc_mutex);
1096 }
4b2fecc8 1097out:
f7ef9b83 1098 range->len = F2FS_BLK_TO_BYTES(cpc.trimmed);
4b2fecc8
JK
1099 return 0;
1100}
1101
351df4b2
JK
1102static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
1103{
1104 struct curseg_info *curseg = CURSEG_I(sbi, type);
1105 if (curseg->next_blkoff < sbi->blocks_per_seg)
1106 return true;
1107 return false;
1108}
1109
1110static int __get_segment_type_2(struct page *page, enum page_type p_type)
1111{
1112 if (p_type == DATA)
1113 return CURSEG_HOT_DATA;
1114 else
1115 return CURSEG_HOT_NODE;
1116}
1117
1118static int __get_segment_type_4(struct page *page, enum page_type p_type)
1119{
1120 if (p_type == DATA) {
1121 struct inode *inode = page->mapping->host;
1122
1123 if (S_ISDIR(inode->i_mode))
1124 return CURSEG_HOT_DATA;
1125 else
1126 return CURSEG_COLD_DATA;
1127 } else {
a344b9fd
JK
1128 if (IS_DNODE(page) && is_cold_node(page))
1129 return CURSEG_WARM_NODE;
351df4b2
JK
1130 else
1131 return CURSEG_COLD_NODE;
1132 }
1133}
1134
1135static int __get_segment_type_6(struct page *page, enum page_type p_type)
1136{
1137 if (p_type == DATA) {
1138 struct inode *inode = page->mapping->host;
1139
1140 if (S_ISDIR(inode->i_mode))
1141 return CURSEG_HOT_DATA;
354a3399 1142 else if (is_cold_data(page) || file_is_cold(inode))
351df4b2
JK
1143 return CURSEG_COLD_DATA;
1144 else
1145 return CURSEG_WARM_DATA;
1146 } else {
1147 if (IS_DNODE(page))
1148 return is_cold_node(page) ? CURSEG_WARM_NODE :
1149 CURSEG_HOT_NODE;
1150 else
1151 return CURSEG_COLD_NODE;
1152 }
1153}
1154
1155static int __get_segment_type(struct page *page, enum page_type p_type)
1156{
4081363f 1157 switch (F2FS_P_SB(page)->active_logs) {
351df4b2
JK
1158 case 2:
1159 return __get_segment_type_2(page, p_type);
1160 case 4:
1161 return __get_segment_type_4(page, p_type);
351df4b2 1162 }
12a67146 1163 /* NR_CURSEG_TYPE(6) logs by default */
9850cf4a
JK
1164 f2fs_bug_on(F2FS_P_SB(page),
1165 F2FS_P_SB(page)->active_logs != NR_CURSEG_TYPE);
12a67146 1166 return __get_segment_type_6(page, p_type);
351df4b2
JK
1167}
1168
bfad7c2d
JK
1169void allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
1170 block_t old_blkaddr, block_t *new_blkaddr,
1171 struct f2fs_summary *sum, int type)
351df4b2
JK
1172{
1173 struct sit_info *sit_i = SIT_I(sbi);
1174 struct curseg_info *curseg;
38aa0889
JK
1175 bool direct_io = (type == CURSEG_DIRECT_IO);
1176
1177 type = direct_io ? CURSEG_WARM_DATA : type;
351df4b2 1178
351df4b2
JK
1179 curseg = CURSEG_I(sbi, type);
1180
1181 mutex_lock(&curseg->curseg_mutex);
21cb1d99 1182 mutex_lock(&sit_i->sentry_lock);
351df4b2 1183
38aa0889
JK
1184 /* direct_io'ed data is aligned to the segment for better performance */
1185 if (direct_io && curseg->next_blkoff)
1186 __allocate_new_segments(sbi, type);
1187
351df4b2 1188 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
351df4b2
JK
1189
1190 /*
1191 * __add_sum_entry should be resided under the curseg_mutex
1192 * because, this function updates a summary entry in the
1193 * current summary block.
1194 */
e79efe3b 1195 __add_sum_entry(sbi, type, sum);
351df4b2 1196
351df4b2 1197 __refresh_next_blkoff(sbi, curseg);
dcdfff65
JK
1198
1199 stat_inc_block_count(sbi, curseg);
351df4b2 1200
5e443818
JK
1201 if (!__has_curseg_space(sbi, type))
1202 sit_i->s_ops->allocate_segment(sbi, type, false);
351df4b2
JK
1203 /*
1204 * SIT information should be updated before segment allocation,
1205 * since SSR needs latest valid block information.
1206 */
1207 refresh_sit_entry(sbi, old_blkaddr, *new_blkaddr);
5e443818 1208
351df4b2
JK
1209 mutex_unlock(&sit_i->sentry_lock);
1210
bfad7c2d 1211 if (page && IS_NODESEG(type))
351df4b2
JK
1212 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
1213
bfad7c2d
JK
1214 mutex_unlock(&curseg->curseg_mutex);
1215}
1216
05ca3632 1217static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
bfad7c2d 1218{
05ca3632 1219 int type = __get_segment_type(fio->page, fio->type);
bfad7c2d 1220
05ca3632
JK
1221 allocate_data_block(fio->sbi, fio->page, fio->blk_addr,
1222 &fio->blk_addr, sum, type);
bfad7c2d 1223
351df4b2 1224 /* writeout dirty page into bdev */
05ca3632 1225 f2fs_submit_page_mbio(fio);
351df4b2
JK
1226}
1227
577e3495 1228void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
351df4b2 1229{
458e6197 1230 struct f2fs_io_info fio = {
05ca3632 1231 .sbi = sbi,
458e6197 1232 .type = META,
cf04e8eb
JK
1233 .rw = WRITE_SYNC | REQ_META | REQ_PRIO,
1234 .blk_addr = page->index,
05ca3632 1235 .page = page,
458e6197
JK
1236 };
1237
351df4b2 1238 set_page_writeback(page);
05ca3632 1239 f2fs_submit_page_mbio(&fio);
351df4b2
JK
1240}
1241
05ca3632 1242void write_node_page(unsigned int nid, struct f2fs_io_info *fio)
351df4b2
JK
1243{
1244 struct f2fs_summary sum;
05ca3632 1245
351df4b2 1246 set_summary(&sum, nid, 0, 0);
05ca3632 1247 do_write_page(&sum, fio);
351df4b2
JK
1248}
1249
05ca3632 1250void write_data_page(struct dnode_of_data *dn, struct f2fs_io_info *fio)
351df4b2 1251{
05ca3632 1252 struct f2fs_sb_info *sbi = fio->sbi;
351df4b2
JK
1253 struct f2fs_summary sum;
1254 struct node_info ni;
1255
9850cf4a 1256 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
351df4b2
JK
1257 get_node_info(sbi, dn->nid, &ni);
1258 set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
05ca3632 1259 do_write_page(&sum, fio);
e1509cf2 1260 dn->data_blkaddr = fio->blk_addr;
351df4b2
JK
1261}
1262
05ca3632 1263void rewrite_data_page(struct f2fs_io_info *fio)
351df4b2 1264{
05ca3632
JK
1265 stat_inc_inplace_blocks(fio->sbi);
1266 f2fs_submit_page_mbio(fio);
351df4b2
JK
1267}
1268
1269void recover_data_page(struct f2fs_sb_info *sbi,
1270 struct page *page, struct f2fs_summary *sum,
1271 block_t old_blkaddr, block_t new_blkaddr)
1272{
1273 struct sit_info *sit_i = SIT_I(sbi);
1274 struct curseg_info *curseg;
1275 unsigned int segno, old_cursegno;
1276 struct seg_entry *se;
1277 int type;
1278
1279 segno = GET_SEGNO(sbi, new_blkaddr);
1280 se = get_seg_entry(sbi, segno);
1281 type = se->type;
1282
1283 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
1284 if (old_blkaddr == NULL_ADDR)
1285 type = CURSEG_COLD_DATA;
1286 else
1287 type = CURSEG_WARM_DATA;
1288 }
1289 curseg = CURSEG_I(sbi, type);
1290
1291 mutex_lock(&curseg->curseg_mutex);
1292 mutex_lock(&sit_i->sentry_lock);
1293
1294 old_cursegno = curseg->segno;
1295
1296 /* change the current segment */
1297 if (segno != curseg->segno) {
1298 curseg->next_segno = segno;
1299 change_curseg(sbi, type, true);
1300 }
1301
491c0854 1302 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
e79efe3b 1303 __add_sum_entry(sbi, type, sum);
351df4b2
JK
1304
1305 refresh_sit_entry(sbi, old_blkaddr, new_blkaddr);
351df4b2 1306 locate_dirty_segment(sbi, old_cursegno);
351df4b2
JK
1307
1308 mutex_unlock(&sit_i->sentry_lock);
1309 mutex_unlock(&curseg->curseg_mutex);
1310}
1311
df0f8dc0
CY
1312static inline bool is_merged_page(struct f2fs_sb_info *sbi,
1313 struct page *page, enum page_type type)
1314{
1315 enum page_type btype = PAGE_TYPE_OF_BIO(type);
1316 struct f2fs_bio_info *io = &sbi->write_io[btype];
df0f8dc0
CY
1317 struct bio_vec *bvec;
1318 int i;
1319
1320 down_read(&io->io_rwsem);
ce23447f 1321 if (!io->bio)
df0f8dc0
CY
1322 goto out;
1323
ce23447f 1324 bio_for_each_segment_all(bvec, io->bio, i) {
df0f8dc0
CY
1325 if (page == bvec->bv_page) {
1326 up_read(&io->io_rwsem);
1327 return true;
1328 }
1329 }
1330
1331out:
1332 up_read(&io->io_rwsem);
1333 return false;
1334}
1335
93dfe2ac 1336void f2fs_wait_on_page_writeback(struct page *page,
5514f0aa 1337 enum page_type type)
93dfe2ac 1338{
93dfe2ac 1339 if (PageWriteback(page)) {
4081363f
JK
1340 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1341
df0f8dc0
CY
1342 if (is_merged_page(sbi, page, type))
1343 f2fs_submit_merged_bio(sbi, type, WRITE);
93dfe2ac
JK
1344 wait_on_page_writeback(page);
1345 }
1346}
1347
351df4b2
JK
1348static int read_compacted_summaries(struct f2fs_sb_info *sbi)
1349{
1350 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1351 struct curseg_info *seg_i;
1352 unsigned char *kaddr;
1353 struct page *page;
1354 block_t start;
1355 int i, j, offset;
1356
1357 start = start_sum_block(sbi);
1358
1359 page = get_meta_page(sbi, start++);
1360 kaddr = (unsigned char *)page_address(page);
1361
1362 /* Step 1: restore nat cache */
1363 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1364 memcpy(&seg_i->sum_blk->n_nats, kaddr, SUM_JOURNAL_SIZE);
1365
1366 /* Step 2: restore sit cache */
1367 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1368 memcpy(&seg_i->sum_blk->n_sits, kaddr + SUM_JOURNAL_SIZE,
1369 SUM_JOURNAL_SIZE);
1370 offset = 2 * SUM_JOURNAL_SIZE;
1371
1372 /* Step 3: restore summary entries */
1373 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1374 unsigned short blk_off;
1375 unsigned int segno;
1376
1377 seg_i = CURSEG_I(sbi, i);
1378 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
1379 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
1380 seg_i->next_segno = segno;
1381 reset_curseg(sbi, i, 0);
1382 seg_i->alloc_type = ckpt->alloc_type[i];
1383 seg_i->next_blkoff = blk_off;
1384
1385 if (seg_i->alloc_type == SSR)
1386 blk_off = sbi->blocks_per_seg;
1387
1388 for (j = 0; j < blk_off; j++) {
1389 struct f2fs_summary *s;
1390 s = (struct f2fs_summary *)(kaddr + offset);
1391 seg_i->sum_blk->entries[j] = *s;
1392 offset += SUMMARY_SIZE;
1393 if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1394 SUM_FOOTER_SIZE)
1395 continue;
1396
1397 f2fs_put_page(page, 1);
1398 page = NULL;
1399
1400 page = get_meta_page(sbi, start++);
1401 kaddr = (unsigned char *)page_address(page);
1402 offset = 0;
1403 }
1404 }
1405 f2fs_put_page(page, 1);
1406 return 0;
1407}
1408
1409static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
1410{
1411 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1412 struct f2fs_summary_block *sum;
1413 struct curseg_info *curseg;
1414 struct page *new;
1415 unsigned short blk_off;
1416 unsigned int segno = 0;
1417 block_t blk_addr = 0;
1418
1419 /* get segment number and block addr */
1420 if (IS_DATASEG(type)) {
1421 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
1422 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
1423 CURSEG_HOT_DATA]);
119ee914 1424 if (__exist_node_summaries(sbi))
351df4b2
JK
1425 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
1426 else
1427 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
1428 } else {
1429 segno = le32_to_cpu(ckpt->cur_node_segno[type -
1430 CURSEG_HOT_NODE]);
1431 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
1432 CURSEG_HOT_NODE]);
119ee914 1433 if (__exist_node_summaries(sbi))
351df4b2
JK
1434 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
1435 type - CURSEG_HOT_NODE);
1436 else
1437 blk_addr = GET_SUM_BLOCK(sbi, segno);
1438 }
1439
1440 new = get_meta_page(sbi, blk_addr);
1441 sum = (struct f2fs_summary_block *)page_address(new);
1442
1443 if (IS_NODESEG(type)) {
119ee914 1444 if (__exist_node_summaries(sbi)) {
351df4b2
JK
1445 struct f2fs_summary *ns = &sum->entries[0];
1446 int i;
1447 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
1448 ns->version = 0;
1449 ns->ofs_in_node = 0;
1450 }
1451 } else {
d653788a
GZ
1452 int err;
1453
1454 err = restore_node_summary(sbi, segno, sum);
1455 if (err) {
351df4b2 1456 f2fs_put_page(new, 1);
d653788a 1457 return err;
351df4b2
JK
1458 }
1459 }
1460 }
1461
1462 /* set uncompleted segment to curseg */
1463 curseg = CURSEG_I(sbi, type);
1464 mutex_lock(&curseg->curseg_mutex);
1465 memcpy(curseg->sum_blk, sum, PAGE_CACHE_SIZE);
1466 curseg->next_segno = segno;
1467 reset_curseg(sbi, type, 0);
1468 curseg->alloc_type = ckpt->alloc_type[type];
1469 curseg->next_blkoff = blk_off;
1470 mutex_unlock(&curseg->curseg_mutex);
1471 f2fs_put_page(new, 1);
1472 return 0;
1473}
1474
1475static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
1476{
1477 int type = CURSEG_HOT_DATA;
e4fc5fbf 1478 int err;
351df4b2 1479
25ca923b 1480 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG)) {
3fa06d7b
CY
1481 int npages = npages_for_summary_flush(sbi, true);
1482
1483 if (npages >= 2)
1484 ra_meta_pages(sbi, start_sum_block(sbi), npages,
1485 META_CP);
1486
351df4b2
JK
1487 /* restore for compacted data summary */
1488 if (read_compacted_summaries(sbi))
1489 return -EINVAL;
1490 type = CURSEG_HOT_NODE;
1491 }
1492
119ee914 1493 if (__exist_node_summaries(sbi))
3fa06d7b
CY
1494 ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
1495 NR_CURSEG_TYPE - type, META_CP);
1496
e4fc5fbf
CY
1497 for (; type <= CURSEG_COLD_NODE; type++) {
1498 err = read_normal_summaries(sbi, type);
1499 if (err)
1500 return err;
1501 }
1502
351df4b2
JK
1503 return 0;
1504}
1505
1506static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
1507{
1508 struct page *page;
1509 unsigned char *kaddr;
1510 struct f2fs_summary *summary;
1511 struct curseg_info *seg_i;
1512 int written_size = 0;
1513 int i, j;
1514
1515 page = grab_meta_page(sbi, blkaddr++);
1516 kaddr = (unsigned char *)page_address(page);
1517
1518 /* Step 1: write nat cache */
1519 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
1520 memcpy(kaddr, &seg_i->sum_blk->n_nats, SUM_JOURNAL_SIZE);
1521 written_size += SUM_JOURNAL_SIZE;
1522
1523 /* Step 2: write sit cache */
1524 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
1525 memcpy(kaddr + written_size, &seg_i->sum_blk->n_sits,
1526 SUM_JOURNAL_SIZE);
1527 written_size += SUM_JOURNAL_SIZE;
1528
351df4b2
JK
1529 /* Step 3: write summary entries */
1530 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
1531 unsigned short blkoff;
1532 seg_i = CURSEG_I(sbi, i);
1533 if (sbi->ckpt->alloc_type[i] == SSR)
1534 blkoff = sbi->blocks_per_seg;
1535 else
1536 blkoff = curseg_blkoff(sbi, i);
1537
1538 for (j = 0; j < blkoff; j++) {
1539 if (!page) {
1540 page = grab_meta_page(sbi, blkaddr++);
1541 kaddr = (unsigned char *)page_address(page);
1542 written_size = 0;
1543 }
1544 summary = (struct f2fs_summary *)(kaddr + written_size);
1545 *summary = seg_i->sum_blk->entries[j];
1546 written_size += SUMMARY_SIZE;
351df4b2
JK
1547
1548 if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE -
1549 SUM_FOOTER_SIZE)
1550 continue;
1551
e8d61a74 1552 set_page_dirty(page);
351df4b2
JK
1553 f2fs_put_page(page, 1);
1554 page = NULL;
1555 }
1556 }
e8d61a74
CY
1557 if (page) {
1558 set_page_dirty(page);
351df4b2 1559 f2fs_put_page(page, 1);
e8d61a74 1560 }
351df4b2
JK
1561}
1562
1563static void write_normal_summaries(struct f2fs_sb_info *sbi,
1564 block_t blkaddr, int type)
1565{
1566 int i, end;
1567 if (IS_DATASEG(type))
1568 end = type + NR_CURSEG_DATA_TYPE;
1569 else
1570 end = type + NR_CURSEG_NODE_TYPE;
1571
1572 for (i = type; i < end; i++) {
1573 struct curseg_info *sum = CURSEG_I(sbi, i);
1574 mutex_lock(&sum->curseg_mutex);
1575 write_sum_page(sbi, sum->sum_blk, blkaddr + (i - type));
1576 mutex_unlock(&sum->curseg_mutex);
1577 }
1578}
1579
1580void write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1581{
25ca923b 1582 if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_COMPACT_SUM_FLAG))
351df4b2
JK
1583 write_compacted_summaries(sbi, start_blk);
1584 else
1585 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
1586}
1587
1588void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
1589{
119ee914 1590 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
351df4b2
JK
1591}
1592
1593int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
1594 unsigned int val, int alloc)
1595{
1596 int i;
1597
1598 if (type == NAT_JOURNAL) {
1599 for (i = 0; i < nats_in_cursum(sum); i++) {
1600 if (le32_to_cpu(nid_in_journal(sum, i)) == val)
1601 return i;
1602 }
1603 if (alloc && nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES)
1604 return update_nats_in_cursum(sum, 1);
1605 } else if (type == SIT_JOURNAL) {
1606 for (i = 0; i < sits_in_cursum(sum); i++)
1607 if (le32_to_cpu(segno_in_journal(sum, i)) == val)
1608 return i;
1609 if (alloc && sits_in_cursum(sum) < SIT_JOURNAL_ENTRIES)
1610 return update_sits_in_cursum(sum, 1);
1611 }
1612 return -1;
1613}
1614
1615static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
1616 unsigned int segno)
1617{
2cc22186 1618 return get_meta_page(sbi, current_sit_addr(sbi, segno));
351df4b2
JK
1619}
1620
1621static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
1622 unsigned int start)
1623{
1624 struct sit_info *sit_i = SIT_I(sbi);
1625 struct page *src_page, *dst_page;
1626 pgoff_t src_off, dst_off;
1627 void *src_addr, *dst_addr;
1628
1629 src_off = current_sit_addr(sbi, start);
1630 dst_off = next_sit_addr(sbi, src_off);
1631
1632 /* get current sit block page without lock */
1633 src_page = get_meta_page(sbi, src_off);
1634 dst_page = grab_meta_page(sbi, dst_off);
9850cf4a 1635 f2fs_bug_on(sbi, PageDirty(src_page));
351df4b2
JK
1636
1637 src_addr = page_address(src_page);
1638 dst_addr = page_address(dst_page);
1639 memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE);
1640
1641 set_page_dirty(dst_page);
1642 f2fs_put_page(src_page, 1);
1643
1644 set_to_next_sit(sit_i, start);
1645
1646 return dst_page;
1647}
1648
184a5cd2
CY
1649static struct sit_entry_set *grab_sit_entry_set(void)
1650{
1651 struct sit_entry_set *ses =
1652 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC);
1653
1654 ses->entry_cnt = 0;
1655 INIT_LIST_HEAD(&ses->set_list);
1656 return ses;
1657}
1658
1659static void release_sit_entry_set(struct sit_entry_set *ses)
1660{
1661 list_del(&ses->set_list);
1662 kmem_cache_free(sit_entry_set_slab, ses);
1663}
1664
1665static void adjust_sit_entry_set(struct sit_entry_set *ses,
1666 struct list_head *head)
1667{
1668 struct sit_entry_set *next = ses;
1669
1670 if (list_is_last(&ses->set_list, head))
1671 return;
1672
1673 list_for_each_entry_continue(next, head, set_list)
1674 if (ses->entry_cnt <= next->entry_cnt)
1675 break;
1676
1677 list_move_tail(&ses->set_list, &next->set_list);
1678}
1679
1680static void add_sit_entry(unsigned int segno, struct list_head *head)
1681{
1682 struct sit_entry_set *ses;
1683 unsigned int start_segno = START_SEGNO(segno);
1684
1685 list_for_each_entry(ses, head, set_list) {
1686 if (ses->start_segno == start_segno) {
1687 ses->entry_cnt++;
1688 adjust_sit_entry_set(ses, head);
1689 return;
1690 }
1691 }
1692
1693 ses = grab_sit_entry_set();
1694
1695 ses->start_segno = start_segno;
1696 ses->entry_cnt++;
1697 list_add(&ses->set_list, head);
1698}
1699
1700static void add_sits_in_set(struct f2fs_sb_info *sbi)
1701{
1702 struct f2fs_sm_info *sm_info = SM_I(sbi);
1703 struct list_head *set_list = &sm_info->sit_entry_set;
1704 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
184a5cd2
CY
1705 unsigned int segno;
1706
7cd8558b 1707 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
184a5cd2
CY
1708 add_sit_entry(segno, set_list);
1709}
1710
1711static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
351df4b2
JK
1712{
1713 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1714 struct f2fs_summary_block *sum = curseg->sum_blk;
1715 int i;
1716
184a5cd2
CY
1717 for (i = sits_in_cursum(sum) - 1; i >= 0; i--) {
1718 unsigned int segno;
1719 bool dirtied;
1720
1721 segno = le32_to_cpu(segno_in_journal(sum, i));
1722 dirtied = __mark_sit_entry_dirty(sbi, segno);
1723
1724 if (!dirtied)
1725 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
351df4b2 1726 }
184a5cd2 1727 update_sits_in_cursum(sum, -sits_in_cursum(sum));
351df4b2
JK
1728}
1729
0a8165d7 1730/*
351df4b2
JK
1731 * CP calls this function, which flushes SIT entries including sit_journal,
1732 * and moves prefree segs to free segs.
1733 */
4b2fecc8 1734void flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
351df4b2
JK
1735{
1736 struct sit_info *sit_i = SIT_I(sbi);
1737 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
1738 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1739 struct f2fs_summary_block *sum = curseg->sum_blk;
184a5cd2
CY
1740 struct sit_entry_set *ses, *tmp;
1741 struct list_head *head = &SM_I(sbi)->sit_entry_set;
184a5cd2 1742 bool to_journal = true;
4b2fecc8 1743 struct seg_entry *se;
351df4b2
JK
1744
1745 mutex_lock(&curseg->curseg_mutex);
1746 mutex_lock(&sit_i->sentry_lock);
1747
2b11a74b
WL
1748 if (!sit_i->dirty_sentries)
1749 goto out;
1750
351df4b2 1751 /*
184a5cd2
CY
1752 * add and account sit entries of dirty bitmap in sit entry
1753 * set temporarily
351df4b2 1754 */
184a5cd2 1755 add_sits_in_set(sbi);
351df4b2 1756
184a5cd2
CY
1757 /*
1758 * if there are no enough space in journal to store dirty sit
1759 * entries, remove all entries from journal and add and account
1760 * them in sit entry set.
1761 */
1762 if (!__has_cursum_space(sum, sit_i->dirty_sentries, SIT_JOURNAL))
1763 remove_sits_in_journal(sbi);
b2955550 1764
184a5cd2
CY
1765 /*
1766 * there are two steps to flush sit entries:
1767 * #1, flush sit entries to journal in current cold data summary block.
1768 * #2, flush sit entries to sit page.
1769 */
1770 list_for_each_entry_safe(ses, tmp, head, set_list) {
4a257ed6 1771 struct page *page = NULL;
184a5cd2
CY
1772 struct f2fs_sit_block *raw_sit = NULL;
1773 unsigned int start_segno = ses->start_segno;
1774 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
7cd8558b 1775 (unsigned long)MAIN_SEGS(sbi));
184a5cd2
CY
1776 unsigned int segno = start_segno;
1777
1778 if (to_journal &&
1779 !__has_cursum_space(sum, ses->entry_cnt, SIT_JOURNAL))
1780 to_journal = false;
1781
1782 if (!to_journal) {
1783 page = get_next_sit_page(sbi, start_segno);
1784 raw_sit = page_address(page);
351df4b2 1785 }
351df4b2 1786
184a5cd2
CY
1787 /* flush dirty sit entries in region of current sit set */
1788 for_each_set_bit_from(segno, bitmap, end) {
1789 int offset, sit_offset;
4b2fecc8
JK
1790
1791 se = get_seg_entry(sbi, segno);
184a5cd2
CY
1792
1793 /* add discard candidates */
d7bc2484 1794 if (cpc->reason != CP_DISCARD) {
4b2fecc8
JK
1795 cpc->trim_start = segno;
1796 add_discard_addrs(sbi, cpc);
1797 }
184a5cd2
CY
1798
1799 if (to_journal) {
1800 offset = lookup_journal_in_cursum(sum,
1801 SIT_JOURNAL, segno, 1);
1802 f2fs_bug_on(sbi, offset < 0);
1803 segno_in_journal(sum, offset) =
1804 cpu_to_le32(segno);
1805 seg_info_to_raw_sit(se,
1806 &sit_in_journal(sum, offset));
1807 } else {
1808 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
1809 seg_info_to_raw_sit(se,
1810 &raw_sit->entries[sit_offset]);
1811 }
351df4b2 1812
184a5cd2
CY
1813 __clear_bit(segno, bitmap);
1814 sit_i->dirty_sentries--;
1815 ses->entry_cnt--;
351df4b2
JK
1816 }
1817
184a5cd2
CY
1818 if (!to_journal)
1819 f2fs_put_page(page, 1);
1820
1821 f2fs_bug_on(sbi, ses->entry_cnt);
1822 release_sit_entry_set(ses);
351df4b2 1823 }
184a5cd2
CY
1824
1825 f2fs_bug_on(sbi, !list_empty(head));
1826 f2fs_bug_on(sbi, sit_i->dirty_sentries);
184a5cd2 1827out:
4b2fecc8
JK
1828 if (cpc->reason == CP_DISCARD) {
1829 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
1830 add_discard_addrs(sbi, cpc);
1831 }
351df4b2
JK
1832 mutex_unlock(&sit_i->sentry_lock);
1833 mutex_unlock(&curseg->curseg_mutex);
1834
351df4b2
JK
1835 set_prefree_as_free_segments(sbi);
1836}
1837
1838static int build_sit_info(struct f2fs_sb_info *sbi)
1839{
1840 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
1841 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1842 struct sit_info *sit_i;
1843 unsigned int sit_segs, start;
1844 char *src_bitmap, *dst_bitmap;
1845 unsigned int bitmap_size;
1846
1847 /* allocate memory for SIT information */
1848 sit_i = kzalloc(sizeof(struct sit_info), GFP_KERNEL);
1849 if (!sit_i)
1850 return -ENOMEM;
1851
1852 SM_I(sbi)->sit_info = sit_i;
1853
7cd8558b 1854 sit_i->sentries = vzalloc(MAIN_SEGS(sbi) * sizeof(struct seg_entry));
351df4b2
JK
1855 if (!sit_i->sentries)
1856 return -ENOMEM;
1857
7cd8558b 1858 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
351df4b2
JK
1859 sit_i->dirty_sentries_bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1860 if (!sit_i->dirty_sentries_bitmap)
1861 return -ENOMEM;
1862
7cd8558b 1863 for (start = 0; start < MAIN_SEGS(sbi); start++) {
351df4b2
JK
1864 sit_i->sentries[start].cur_valid_map
1865 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1866 sit_i->sentries[start].ckpt_valid_map
1867 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
a66cdd98
JK
1868 sit_i->sentries[start].discard_map
1869 = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1870 if (!sit_i->sentries[start].cur_valid_map ||
1871 !sit_i->sentries[start].ckpt_valid_map ||
1872 !sit_i->sentries[start].discard_map)
351df4b2
JK
1873 return -ENOMEM;
1874 }
1875
60a3b782
JK
1876 sit_i->tmp_map = kzalloc(SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
1877 if (!sit_i->tmp_map)
1878 return -ENOMEM;
1879
351df4b2 1880 if (sbi->segs_per_sec > 1) {
7cd8558b 1881 sit_i->sec_entries = vzalloc(MAIN_SECS(sbi) *
351df4b2
JK
1882 sizeof(struct sec_entry));
1883 if (!sit_i->sec_entries)
1884 return -ENOMEM;
1885 }
1886
1887 /* get information related with SIT */
1888 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
1889
1890 /* setup SIT bitmap from ckeckpoint pack */
1891 bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
1892 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
1893
79b5793b 1894 dst_bitmap = kmemdup(src_bitmap, bitmap_size, GFP_KERNEL);
351df4b2
JK
1895 if (!dst_bitmap)
1896 return -ENOMEM;
351df4b2
JK
1897
1898 /* init SIT information */
1899 sit_i->s_ops = &default_salloc_ops;
1900
1901 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
1902 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
1903 sit_i->written_valid_blocks = le64_to_cpu(ckpt->valid_block_count);
1904 sit_i->sit_bitmap = dst_bitmap;
1905 sit_i->bitmap_size = bitmap_size;
1906 sit_i->dirty_sentries = 0;
1907 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
1908 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
1909 sit_i->mounted_time = CURRENT_TIME_SEC.tv_sec;
1910 mutex_init(&sit_i->sentry_lock);
1911 return 0;
1912}
1913
1914static int build_free_segmap(struct f2fs_sb_info *sbi)
1915{
351df4b2
JK
1916 struct free_segmap_info *free_i;
1917 unsigned int bitmap_size, sec_bitmap_size;
1918
1919 /* allocate memory for free segmap information */
1920 free_i = kzalloc(sizeof(struct free_segmap_info), GFP_KERNEL);
1921 if (!free_i)
1922 return -ENOMEM;
1923
1924 SM_I(sbi)->free_info = free_i;
1925
7cd8558b 1926 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
351df4b2
JK
1927 free_i->free_segmap = kmalloc(bitmap_size, GFP_KERNEL);
1928 if (!free_i->free_segmap)
1929 return -ENOMEM;
1930
7cd8558b 1931 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
351df4b2
JK
1932 free_i->free_secmap = kmalloc(sec_bitmap_size, GFP_KERNEL);
1933 if (!free_i->free_secmap)
1934 return -ENOMEM;
1935
1936 /* set all segments as dirty temporarily */
1937 memset(free_i->free_segmap, 0xff, bitmap_size);
1938 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
1939
1940 /* init free segmap information */
7cd8558b 1941 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
351df4b2
JK
1942 free_i->free_segments = 0;
1943 free_i->free_sections = 0;
1a118ccf 1944 spin_lock_init(&free_i->segmap_lock);
351df4b2
JK
1945 return 0;
1946}
1947
1948static int build_curseg(struct f2fs_sb_info *sbi)
1949{
1042d60f 1950 struct curseg_info *array;
351df4b2
JK
1951 int i;
1952
b434babf 1953 array = kcalloc(NR_CURSEG_TYPE, sizeof(*array), GFP_KERNEL);
351df4b2
JK
1954 if (!array)
1955 return -ENOMEM;
1956
1957 SM_I(sbi)->curseg_array = array;
1958
1959 for (i = 0; i < NR_CURSEG_TYPE; i++) {
1960 mutex_init(&array[i].curseg_mutex);
1961 array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL);
1962 if (!array[i].sum_blk)
1963 return -ENOMEM;
1964 array[i].segno = NULL_SEGNO;
1965 array[i].next_blkoff = 0;
1966 }
1967 return restore_curseg_summaries(sbi);
1968}
1969
1970static void build_sit_entries(struct f2fs_sb_info *sbi)
1971{
1972 struct sit_info *sit_i = SIT_I(sbi);
1973 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
1974 struct f2fs_summary_block *sum = curseg->sum_blk;
74de593a
CY
1975 int sit_blk_cnt = SIT_BLK_CNT(sbi);
1976 unsigned int i, start, end;
1977 unsigned int readed, start_blk = 0;
90a893c7 1978 int nrpages = MAX_BIO_BLOCKS(sbi);
351df4b2 1979
74de593a 1980 do {
662befda 1981 readed = ra_meta_pages(sbi, start_blk, nrpages, META_SIT);
74de593a
CY
1982
1983 start = start_blk * sit_i->sents_per_block;
1984 end = (start_blk + readed) * sit_i->sents_per_block;
1985
7cd8558b 1986 for (; start < end && start < MAIN_SEGS(sbi); start++) {
74de593a
CY
1987 struct seg_entry *se = &sit_i->sentries[start];
1988 struct f2fs_sit_block *sit_blk;
1989 struct f2fs_sit_entry sit;
1990 struct page *page;
1991
1992 mutex_lock(&curseg->curseg_mutex);
1993 for (i = 0; i < sits_in_cursum(sum); i++) {
6c311ec6
CF
1994 if (le32_to_cpu(segno_in_journal(sum, i))
1995 == start) {
74de593a
CY
1996 sit = sit_in_journal(sum, i);
1997 mutex_unlock(&curseg->curseg_mutex);
1998 goto got_it;
1999 }
351df4b2 2000 }
74de593a
CY
2001 mutex_unlock(&curseg->curseg_mutex);
2002
2003 page = get_current_sit_page(sbi, start);
2004 sit_blk = (struct f2fs_sit_block *)page_address(page);
2005 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
2006 f2fs_put_page(page, 1);
351df4b2 2007got_it:
74de593a
CY
2008 check_block_count(sbi, start, &sit);
2009 seg_info_from_raw_sit(se, &sit);
a66cdd98
JK
2010
2011 /* build discard map only one time */
2012 memcpy(se->discard_map, se->cur_valid_map, SIT_VBLOCK_MAP_SIZE);
2013 sbi->discard_blks += sbi->blocks_per_seg - se->valid_blocks;
2014
74de593a
CY
2015 if (sbi->segs_per_sec > 1) {
2016 struct sec_entry *e = get_sec_entry(sbi, start);
2017 e->valid_blocks += se->valid_blocks;
2018 }
351df4b2 2019 }
74de593a
CY
2020 start_blk += readed;
2021 } while (start_blk < sit_blk_cnt);
351df4b2
JK
2022}
2023
2024static void init_free_segmap(struct f2fs_sb_info *sbi)
2025{
2026 unsigned int start;
2027 int type;
2028
7cd8558b 2029 for (start = 0; start < MAIN_SEGS(sbi); start++) {
351df4b2
JK
2030 struct seg_entry *sentry = get_seg_entry(sbi, start);
2031 if (!sentry->valid_blocks)
2032 __set_free(sbi, start);
2033 }
2034
2035 /* set use the current segments */
2036 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
2037 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
2038 __set_test_and_inuse(sbi, curseg_t->segno);
2039 }
2040}
2041
2042static void init_dirty_segmap(struct f2fs_sb_info *sbi)
2043{
2044 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2045 struct free_segmap_info *free_i = FREE_I(sbi);
7cd8558b 2046 unsigned int segno = 0, offset = 0;
351df4b2
JK
2047 unsigned short valid_blocks;
2048
8736fbf0 2049 while (1) {
351df4b2 2050 /* find dirty segment based on free segmap */
7cd8558b
JK
2051 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
2052 if (segno >= MAIN_SEGS(sbi))
351df4b2
JK
2053 break;
2054 offset = segno + 1;
2055 valid_blocks = get_valid_blocks(sbi, segno, 0);
ec325b52 2056 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
351df4b2 2057 continue;
ec325b52
JK
2058 if (valid_blocks > sbi->blocks_per_seg) {
2059 f2fs_bug_on(sbi, 1);
2060 continue;
2061 }
351df4b2
JK
2062 mutex_lock(&dirty_i->seglist_lock);
2063 __locate_dirty_segment(sbi, segno, DIRTY);
2064 mutex_unlock(&dirty_i->seglist_lock);
2065 }
2066}
2067
5ec4e49f 2068static int init_victim_secmap(struct f2fs_sb_info *sbi)
351df4b2
JK
2069{
2070 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
7cd8558b 2071 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
351df4b2 2072
5ec4e49f
JK
2073 dirty_i->victim_secmap = kzalloc(bitmap_size, GFP_KERNEL);
2074 if (!dirty_i->victim_secmap)
351df4b2
JK
2075 return -ENOMEM;
2076 return 0;
2077}
2078
2079static int build_dirty_segmap(struct f2fs_sb_info *sbi)
2080{
2081 struct dirty_seglist_info *dirty_i;
2082 unsigned int bitmap_size, i;
2083
2084 /* allocate memory for dirty segments list information */
2085 dirty_i = kzalloc(sizeof(struct dirty_seglist_info), GFP_KERNEL);
2086 if (!dirty_i)
2087 return -ENOMEM;
2088
2089 SM_I(sbi)->dirty_info = dirty_i;
2090 mutex_init(&dirty_i->seglist_lock);
2091
7cd8558b 2092 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
351df4b2
JK
2093
2094 for (i = 0; i < NR_DIRTY_TYPE; i++) {
2095 dirty_i->dirty_segmap[i] = kzalloc(bitmap_size, GFP_KERNEL);
351df4b2
JK
2096 if (!dirty_i->dirty_segmap[i])
2097 return -ENOMEM;
2098 }
2099
2100 init_dirty_segmap(sbi);
5ec4e49f 2101 return init_victim_secmap(sbi);
351df4b2
JK
2102}
2103
0a8165d7 2104/*
351df4b2
JK
2105 * Update min, max modified time for cost-benefit GC algorithm
2106 */
2107static void init_min_max_mtime(struct f2fs_sb_info *sbi)
2108{
2109 struct sit_info *sit_i = SIT_I(sbi);
2110 unsigned int segno;
2111
2112 mutex_lock(&sit_i->sentry_lock);
2113
2114 sit_i->min_mtime = LLONG_MAX;
2115
7cd8558b 2116 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
351df4b2
JK
2117 unsigned int i;
2118 unsigned long long mtime = 0;
2119
2120 for (i = 0; i < sbi->segs_per_sec; i++)
2121 mtime += get_seg_entry(sbi, segno + i)->mtime;
2122
2123 mtime = div_u64(mtime, sbi->segs_per_sec);
2124
2125 if (sit_i->min_mtime > mtime)
2126 sit_i->min_mtime = mtime;
2127 }
2128 sit_i->max_mtime = get_mtime(sbi);
2129 mutex_unlock(&sit_i->sentry_lock);
2130}
2131
2132int build_segment_manager(struct f2fs_sb_info *sbi)
2133{
2134 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
2135 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1042d60f 2136 struct f2fs_sm_info *sm_info;
351df4b2
JK
2137 int err;
2138
2139 sm_info = kzalloc(sizeof(struct f2fs_sm_info), GFP_KERNEL);
2140 if (!sm_info)
2141 return -ENOMEM;
2142
2143 /* init sm info */
2144 sbi->sm_info = sm_info;
351df4b2
JK
2145 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
2146 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
2147 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
2148 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
2149 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
2150 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
2151 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
58c41035
JK
2152 sm_info->rec_prefree_segments = sm_info->main_segments *
2153 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
9b5f136f 2154 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
216fbd64 2155 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
c1ce1b02 2156 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
351df4b2 2157
7fd9e544
JK
2158 INIT_LIST_HEAD(&sm_info->discard_list);
2159 sm_info->nr_discards = 0;
2160 sm_info->max_discards = 0;
2161
bba681cb
JK
2162 sm_info->trim_sections = DEF_BATCHED_TRIM_SECTIONS;
2163
184a5cd2
CY
2164 INIT_LIST_HEAD(&sm_info->sit_entry_set);
2165
b270ad6f 2166 if (test_opt(sbi, FLUSH_MERGE) && !f2fs_readonly(sbi->sb)) {
2163d198
GZ
2167 err = create_flush_cmd_control(sbi);
2168 if (err)
a688b9d9 2169 return err;
6b4afdd7
JK
2170 }
2171
351df4b2
JK
2172 err = build_sit_info(sbi);
2173 if (err)
2174 return err;
2175 err = build_free_segmap(sbi);
2176 if (err)
2177 return err;
2178 err = build_curseg(sbi);
2179 if (err)
2180 return err;
2181
2182 /* reinit free segmap based on SIT */
2183 build_sit_entries(sbi);
2184
2185 init_free_segmap(sbi);
2186 err = build_dirty_segmap(sbi);
2187 if (err)
2188 return err;
2189
2190 init_min_max_mtime(sbi);
2191 return 0;
2192}
2193
2194static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
2195 enum dirty_type dirty_type)
2196{
2197 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2198
2199 mutex_lock(&dirty_i->seglist_lock);
2200 kfree(dirty_i->dirty_segmap[dirty_type]);
2201 dirty_i->nr_dirty[dirty_type] = 0;
2202 mutex_unlock(&dirty_i->seglist_lock);
2203}
2204
5ec4e49f 2205static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
351df4b2
JK
2206{
2207 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
5ec4e49f 2208 kfree(dirty_i->victim_secmap);
351df4b2
JK
2209}
2210
2211static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
2212{
2213 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2214 int i;
2215
2216 if (!dirty_i)
2217 return;
2218
2219 /* discard pre-free/dirty segments list */
2220 for (i = 0; i < NR_DIRTY_TYPE; i++)
2221 discard_dirty_segmap(sbi, i);
2222
5ec4e49f 2223 destroy_victim_secmap(sbi);
351df4b2
JK
2224 SM_I(sbi)->dirty_info = NULL;
2225 kfree(dirty_i);
2226}
2227
2228static void destroy_curseg(struct f2fs_sb_info *sbi)
2229{
2230 struct curseg_info *array = SM_I(sbi)->curseg_array;
2231 int i;
2232
2233 if (!array)
2234 return;
2235 SM_I(sbi)->curseg_array = NULL;
2236 for (i = 0; i < NR_CURSEG_TYPE; i++)
2237 kfree(array[i].sum_blk);
2238 kfree(array);
2239}
2240
2241static void destroy_free_segmap(struct f2fs_sb_info *sbi)
2242{
2243 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
2244 if (!free_i)
2245 return;
2246 SM_I(sbi)->free_info = NULL;
2247 kfree(free_i->free_segmap);
2248 kfree(free_i->free_secmap);
2249 kfree(free_i);
2250}
2251
2252static void destroy_sit_info(struct f2fs_sb_info *sbi)
2253{
2254 struct sit_info *sit_i = SIT_I(sbi);
2255 unsigned int start;
2256
2257 if (!sit_i)
2258 return;
2259
2260 if (sit_i->sentries) {
7cd8558b 2261 for (start = 0; start < MAIN_SEGS(sbi); start++) {
351df4b2
JK
2262 kfree(sit_i->sentries[start].cur_valid_map);
2263 kfree(sit_i->sentries[start].ckpt_valid_map);
a66cdd98 2264 kfree(sit_i->sentries[start].discard_map);
351df4b2
JK
2265 }
2266 }
60a3b782
JK
2267 kfree(sit_i->tmp_map);
2268
351df4b2
JK
2269 vfree(sit_i->sentries);
2270 vfree(sit_i->sec_entries);
2271 kfree(sit_i->dirty_sentries_bitmap);
2272
2273 SM_I(sbi)->sit_info = NULL;
2274 kfree(sit_i->sit_bitmap);
2275 kfree(sit_i);
2276}
2277
2278void destroy_segment_manager(struct f2fs_sb_info *sbi)
2279{
2280 struct f2fs_sm_info *sm_info = SM_I(sbi);
a688b9d9 2281
3b03f724
CY
2282 if (!sm_info)
2283 return;
2163d198 2284 destroy_flush_cmd_control(sbi);
351df4b2
JK
2285 destroy_dirty_segmap(sbi);
2286 destroy_curseg(sbi);
2287 destroy_free_segmap(sbi);
2288 destroy_sit_info(sbi);
2289 sbi->sm_info = NULL;
2290 kfree(sm_info);
2291}
7fd9e544
JK
2292
2293int __init create_segment_manager_caches(void)
2294{
2295 discard_entry_slab = f2fs_kmem_cache_create("discard_entry",
e8512d2e 2296 sizeof(struct discard_entry));
7fd9e544 2297 if (!discard_entry_slab)
184a5cd2
CY
2298 goto fail;
2299
2300 sit_entry_set_slab = f2fs_kmem_cache_create("sit_entry_set",
c9ee0085 2301 sizeof(struct sit_entry_set));
184a5cd2
CY
2302 if (!sit_entry_set_slab)
2303 goto destory_discard_entry;
88b88a66
JK
2304
2305 inmem_entry_slab = f2fs_kmem_cache_create("inmem_page_entry",
2306 sizeof(struct inmem_pages));
2307 if (!inmem_entry_slab)
2308 goto destroy_sit_entry_set;
7fd9e544 2309 return 0;
184a5cd2 2310
88b88a66
JK
2311destroy_sit_entry_set:
2312 kmem_cache_destroy(sit_entry_set_slab);
184a5cd2
CY
2313destory_discard_entry:
2314 kmem_cache_destroy(discard_entry_slab);
2315fail:
2316 return -ENOMEM;
7fd9e544
JK
2317}
2318
2319void destroy_segment_manager_caches(void)
2320{
184a5cd2 2321 kmem_cache_destroy(sit_entry_set_slab);
7fd9e544 2322 kmem_cache_destroy(discard_entry_slab);
88b88a66 2323 kmem_cache_destroy(inmem_entry_slab);
7fd9e544 2324}