]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/f2fs/segment.c
f2fs: fix long latency due to discard during umount
[mirror_ubuntu-jammy-kernel.git] / fs / f2fs / segment.c
CommitLineData
7c1a000d 1// SPDX-License-Identifier: GPL-2.0
0a8165d7 2/*
351df4b2
JK
3 * fs/f2fs/segment.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
351df4b2
JK
7 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/bio.h>
11#include <linux/blkdev.h>
690e4a3e 12#include <linux/prefetch.h>
6b4afdd7 13#include <linux/kthread.h>
74de593a 14#include <linux/swap.h>
60b99b48 15#include <linux/timer.h>
1d7be270 16#include <linux/freezer.h>
1eb1ef4a 17#include <linux/sched/signal.h>
351df4b2
JK
18
19#include "f2fs.h"
20#include "segment.h"
21#include "node.h"
5f656541 22#include "gc.h"
9e4ded3f 23#include "trace.h"
6ec178da 24#include <trace/events/f2fs.h>
351df4b2 25
9a7f143a
CL
26#define __reverse_ffz(x) __reverse_ffs(~(x))
27
7fd9e544 28static struct kmem_cache *discard_entry_slab;
b01a9201 29static struct kmem_cache *discard_cmd_slab;
184a5cd2 30static struct kmem_cache *sit_entry_set_slab;
88b88a66 31static struct kmem_cache *inmem_entry_slab;
7fd9e544 32
f96999c3
JK
33static unsigned long __reverse_ulong(unsigned char *str)
34{
35 unsigned long tmp = 0;
36 int shift = 24, idx = 0;
37
38#if BITS_PER_LONG == 64
39 shift = 56;
40#endif
41 while (shift >= 0) {
42 tmp |= (unsigned long)str[idx++] << shift;
43 shift -= BITS_PER_BYTE;
44 }
45 return tmp;
46}
47
9a7f143a
CL
48/*
49 * __reverse_ffs is copied from include/asm-generic/bitops/__ffs.h since
50 * MSB and LSB are reversed in a byte by f2fs_set_bit.
51 */
52static inline unsigned long __reverse_ffs(unsigned long word)
53{
54 int num = 0;
55
56#if BITS_PER_LONG == 64
f96999c3 57 if ((word & 0xffffffff00000000UL) == 0)
9a7f143a 58 num += 32;
f96999c3 59 else
9a7f143a 60 word >>= 32;
9a7f143a 61#endif
f96999c3 62 if ((word & 0xffff0000) == 0)
9a7f143a 63 num += 16;
f96999c3 64 else
9a7f143a 65 word >>= 16;
f96999c3
JK
66
67 if ((word & 0xff00) == 0)
9a7f143a 68 num += 8;
f96999c3 69 else
9a7f143a 70 word >>= 8;
f96999c3 71
9a7f143a
CL
72 if ((word & 0xf0) == 0)
73 num += 4;
74 else
75 word >>= 4;
f96999c3 76
9a7f143a
CL
77 if ((word & 0xc) == 0)
78 num += 2;
79 else
80 word >>= 2;
f96999c3 81
9a7f143a
CL
82 if ((word & 0x2) == 0)
83 num += 1;
84 return num;
85}
86
87/*
e1c42045 88 * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because
9a7f143a 89 * f2fs_set_bit makes MSB and LSB reversed in a byte.
692223d1 90 * @size must be integral times of unsigned long.
9a7f143a 91 * Example:
f96999c3
JK
92 * MSB <--> LSB
93 * f2fs_set_bit(0, bitmap) => 1000 0000
94 * f2fs_set_bit(7, bitmap) => 0000 0001
9a7f143a
CL
95 */
96static unsigned long __find_rev_next_bit(const unsigned long *addr,
97 unsigned long size, unsigned long offset)
98{
99 const unsigned long *p = addr + BIT_WORD(offset);
692223d1 100 unsigned long result = size;
9a7f143a 101 unsigned long tmp;
9a7f143a
CL
102
103 if (offset >= size)
104 return size;
105
692223d1 106 size -= (offset & ~(BITS_PER_LONG - 1));
9a7f143a 107 offset %= BITS_PER_LONG;
f96999c3 108
692223d1
FL
109 while (1) {
110 if (*p == 0)
111 goto pass;
9a7f143a 112
f96999c3 113 tmp = __reverse_ulong((unsigned char *)p);
692223d1
FL
114
115 tmp &= ~0UL >> offset;
116 if (size < BITS_PER_LONG)
117 tmp &= (~0UL << (BITS_PER_LONG - size));
9a7f143a 118 if (tmp)
692223d1
FL
119 goto found;
120pass:
121 if (size <= BITS_PER_LONG)
122 break;
9a7f143a 123 size -= BITS_PER_LONG;
692223d1 124 offset = 0;
f96999c3 125 p++;
9a7f143a 126 }
692223d1
FL
127 return result;
128found:
129 return result - size + __reverse_ffs(tmp);
9a7f143a
CL
130}
131
132static unsigned long __find_rev_next_zero_bit(const unsigned long *addr,
133 unsigned long size, unsigned long offset)
134{
135 const unsigned long *p = addr + BIT_WORD(offset);
80609448 136 unsigned long result = size;
9a7f143a 137 unsigned long tmp;
9a7f143a
CL
138
139 if (offset >= size)
140 return size;
141
80609448 142 size -= (offset & ~(BITS_PER_LONG - 1));
9a7f143a 143 offset %= BITS_PER_LONG;
80609448
JK
144
145 while (1) {
146 if (*p == ~0UL)
147 goto pass;
148
f96999c3 149 tmp = __reverse_ulong((unsigned char *)p);
80609448
JK
150
151 if (offset)
152 tmp |= ~0UL << (BITS_PER_LONG - offset);
153 if (size < BITS_PER_LONG)
154 tmp |= ~0UL >> size;
f96999c3 155 if (tmp != ~0UL)
80609448
JK
156 goto found;
157pass:
158 if (size <= BITS_PER_LONG)
159 break;
9a7f143a 160 size -= BITS_PER_LONG;
80609448 161 offset = 0;
f96999c3 162 p++;
9a7f143a 163 }
80609448
JK
164 return result;
165found:
166 return result - size + __reverse_ffz(tmp);
9a7f143a
CL
167}
168
4d57b86d 169bool f2fs_need_SSR(struct f2fs_sb_info *sbi)
b3a97a2a
JK
170{
171 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
172 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
173 int imeta_secs = get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
174
b0332a0f 175 if (f2fs_lfs_mode(sbi))
b3a97a2a 176 return false;
5b0e9539 177 if (sbi->gc_mode == GC_URGENT)
b3a97a2a 178 return true;
4354994f
DR
179 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
180 return true;
b3a97a2a
JK
181
182 return free_sections(sbi) <= (node_secs + 2 * dent_secs + imeta_secs +
a2a12b67 183 SM_I(sbi)->min_ssr_sections + reserved_sections(sbi));
b3a97a2a
JK
184}
185
4d57b86d 186void f2fs_register_inmem_page(struct inode *inode, struct page *page)
88b88a66 187{
88b88a66 188 struct inmem_pages *new;
9be32d72 189
9e4ded3f 190 f2fs_trace_pid(page);
0722b101 191
240a5915 192 f2fs_set_page_private(page, (unsigned long)ATOMIC_WRITTEN_PAGE);
decd36b6 193
88b88a66
JK
194 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
195
196 /* add atomic page indices to the list */
197 new->page = page;
198 INIT_LIST_HEAD(&new->list);
decd36b6 199
88b88a66 200 /* increase reference count with clean state */
88b88a66 201 get_page(page);
743b620c
JK
202 mutex_lock(&F2FS_I(inode)->inmem_lock);
203 list_add_tail(&new->list, &F2FS_I(inode)->inmem_pages);
8dcf2ff7 204 inc_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
743b620c 205 mutex_unlock(&F2FS_I(inode)->inmem_lock);
8ce67cb0
JK
206
207 trace_f2fs_register_inmem_page(page, INMEM);
88b88a66
JK
208}
209
28bc106b 210static int __revoke_inmem_pages(struct inode *inode,
48432984
CY
211 struct list_head *head, bool drop, bool recover,
212 bool trylock)
29b96b54 213{
28bc106b 214 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29b96b54 215 struct inmem_pages *cur, *tmp;
28bc106b 216 int err = 0;
29b96b54
CY
217
218 list_for_each_entry_safe(cur, tmp, head, list) {
28bc106b
CY
219 struct page *page = cur->page;
220
221 if (drop)
222 trace_f2fs_commit_inmem_page(page, INMEM_DROP);
223
48432984
CY
224 if (trylock) {
225 /*
226 * to avoid deadlock in between page lock and
227 * inmem_lock.
228 */
229 if (!trylock_page(page))
230 continue;
231 } else {
232 lock_page(page);
233 }
29b96b54 234
bae0ee7a 235 f2fs_wait_on_page_writeback(page, DATA, true, true);
e5e5732d 236
28bc106b
CY
237 if (recover) {
238 struct dnode_of_data dn;
239 struct node_info ni;
240
241 trace_f2fs_commit_inmem_page(page, INMEM_REVOKE);
7f2b4e8e 242retry:
28bc106b 243 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d
CY
244 err = f2fs_get_dnode_of_data(&dn, page->index,
245 LOOKUP_NODE);
7f2b4e8e
CY
246 if (err) {
247 if (err == -ENOMEM) {
5df7731f
CY
248 congestion_wait(BLK_RW_ASYNC,
249 DEFAULT_IO_TIMEOUT);
7f2b4e8e
CY
250 cond_resched();
251 goto retry;
252 }
28bc106b
CY
253 err = -EAGAIN;
254 goto next;
255 }
7735730d
CY
256
257 err = f2fs_get_node_info(sbi, dn.nid, &ni);
258 if (err) {
259 f2fs_put_dnode(&dn);
260 return err;
261 }
262
f1d2564a 263 if (cur->old_addr == NEW_ADDR) {
4d57b86d 264 f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
f1d2564a
DJ
265 f2fs_update_data_blkaddr(&dn, NEW_ADDR);
266 } else
267 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
28bc106b
CY
268 cur->old_addr, ni.version, true, true);
269 f2fs_put_dnode(&dn);
270 }
271next:
63c52d78 272 /* we don't need to invalidate this in the sccessful status */
2baf0781 273 if (drop || recover) {
63c52d78 274 ClearPageUptodate(page);
2baf0781
CY
275 clear_cold_data(page);
276 }
240a5915 277 f2fs_clear_page_private(page);
28bc106b 278 f2fs_put_page(page, 1);
29b96b54
CY
279
280 list_del(&cur->list);
281 kmem_cache_free(inmem_entry_slab, cur);
282 dec_page_count(F2FS_I_SB(inode), F2FS_INMEM_PAGES);
283 }
28bc106b 284 return err;
29b96b54
CY
285}
286
4d57b86d 287void f2fs_drop_inmem_pages_all(struct f2fs_sb_info *sbi, bool gc_failure)
57864ae5
JK
288{
289 struct list_head *head = &sbi->inode_list[ATOMIC_FILE];
290 struct inode *inode;
291 struct f2fs_inode_info *fi;
677017d1
ST
292 unsigned int count = sbi->atomic_files;
293 unsigned int looped = 0;
57864ae5
JK
294next:
295 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
296 if (list_empty(head)) {
297 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
298 return;
299 }
300 fi = list_first_entry(head, struct f2fs_inode_info, inmem_ilist);
301 inode = igrab(&fi->vfs_inode);
677017d1
ST
302 if (inode)
303 list_move_tail(&fi->inmem_ilist, head);
57864ae5
JK
304 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
305
306 if (inode) {
2ef79ecb 307 if (gc_failure) {
677017d1
ST
308 if (!fi->i_gc_failures[GC_FAILURE_ATOMIC])
309 goto skip;
2ef79ecb 310 }
2ef79ecb 311 set_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
4d57b86d 312 f2fs_drop_inmem_pages(inode);
677017d1 313skip:
57864ae5
JK
314 iput(inode);
315 }
5df7731f 316 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
57864ae5 317 cond_resched();
677017d1
ST
318 if (gc_failure) {
319 if (++looped >= count)
320 return;
321 }
57864ae5
JK
322 goto next;
323}
324
4d57b86d 325void f2fs_drop_inmem_pages(struct inode *inode)
29b96b54 326{
57864ae5 327 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
29b96b54
CY
328 struct f2fs_inode_info *fi = F2FS_I(inode);
329
48432984
CY
330 while (!list_empty(&fi->inmem_pages)) {
331 mutex_lock(&fi->inmem_lock);
332 __revoke_inmem_pages(inode, &fi->inmem_pages,
333 true, false, true);
48432984
CY
334 mutex_unlock(&fi->inmem_lock);
335 }
5fe45743 336
2ef79ecb 337 fi->i_gc_failures[GC_FAILURE_ATOMIC] = 0;
743b620c
JK
338
339 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
340 if (!list_empty(&fi->inmem_ilist))
341 list_del_init(&fi->inmem_ilist);
677017d1
ST
342 if (f2fs_is_atomic_file(inode)) {
343 clear_inode_flag(inode, FI_ATOMIC_FILE);
344 sbi->atomic_files--;
345 }
743b620c 346 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
29b96b54
CY
347}
348
4d57b86d 349void f2fs_drop_inmem_page(struct inode *inode, struct page *page)
8c242db9
JK
350{
351 struct f2fs_inode_info *fi = F2FS_I(inode);
352 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
353 struct list_head *head = &fi->inmem_pages;
354 struct inmem_pages *cur = NULL;
355
356 f2fs_bug_on(sbi, !IS_ATOMIC_WRITTEN_PAGE(page));
357
358 mutex_lock(&fi->inmem_lock);
359 list_for_each_entry(cur, head, list) {
360 if (cur->page == page)
361 break;
362 }
363
d0891e84 364 f2fs_bug_on(sbi, list_empty(head) || cur->page != page);
8c242db9
JK
365 list_del(&cur->list);
366 mutex_unlock(&fi->inmem_lock);
367
368 dec_page_count(sbi, F2FS_INMEM_PAGES);
369 kmem_cache_free(inmem_entry_slab, cur);
370
371 ClearPageUptodate(page);
240a5915 372 f2fs_clear_page_private(page);
8c242db9
JK
373 f2fs_put_page(page, 0);
374
375 trace_f2fs_commit_inmem_page(page, INMEM_INVALIDATE);
376}
377
4d57b86d 378static int __f2fs_commit_inmem_pages(struct inode *inode)
88b88a66
JK
379{
380 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
381 struct f2fs_inode_info *fi = F2FS_I(inode);
382 struct inmem_pages *cur, *tmp;
88b88a66 383 struct f2fs_io_info fio = {
05ca3632 384 .sbi = sbi,
39d787be 385 .ino = inode->i_ino,
88b88a66 386 .type = DATA,
04d328de 387 .op = REQ_OP_WRITE,
70fd7614 388 .op_flags = REQ_SYNC | REQ_PRIO,
b0af6d49 389 .io_type = FS_DATA_IO,
88b88a66 390 };
cf52b27a 391 struct list_head revoke_list;
bab475c5 392 bool submit_bio = false;
edb27dee 393 int err = 0;
88b88a66 394
cf52b27a
CY
395 INIT_LIST_HEAD(&revoke_list);
396
88b88a66 397 list_for_each_entry_safe(cur, tmp, &fi->inmem_pages, list) {
28bc106b
CY
398 struct page *page = cur->page;
399
400 lock_page(page);
401 if (page->mapping == inode->i_mapping) {
402 trace_f2fs_commit_inmem_page(page, INMEM);
403
bae0ee7a 404 f2fs_wait_on_page_writeback(page, DATA, true, true);
8d64d365
CY
405
406 set_page_dirty(page);
933439c8 407 if (clear_page_dirty_for_io(page)) {
29b96b54 408 inode_dec_dirty_pages(inode);
4d57b86d 409 f2fs_remove_dirty_inode(inode);
933439c8 410 }
640cc189 411retry:
28bc106b 412 fio.page = page;
e959c8f5 413 fio.old_blkaddr = NULL_ADDR;
4d978078 414 fio.encrypted_page = NULL;
cc15620b 415 fio.need_lock = LOCK_DONE;
4d57b86d 416 err = f2fs_do_write_data_page(&fio);
29b96b54 417 if (err) {
640cc189 418 if (err == -ENOMEM) {
5df7731f
CY
419 congestion_wait(BLK_RW_ASYNC,
420 DEFAULT_IO_TIMEOUT);
640cc189
JK
421 cond_resched();
422 goto retry;
423 }
28bc106b 424 unlock_page(page);
29b96b54 425 break;
70c640b1 426 }
28bc106b
CY
427 /* record old blkaddr for revoking */
428 cur->old_addr = fio.old_blkaddr;
bab475c5 429 submit_bio = true;
28bc106b
CY
430 }
431 unlock_page(page);
cf52b27a 432 list_move_tail(&cur->list, &revoke_list);
88b88a66 433 }
29b96b54 434
bab475c5
CY
435 if (submit_bio)
436 f2fs_submit_merged_write_cond(sbi, inode, NULL, 0, DATA);
28bc106b 437
cf52b27a
CY
438 if (err) {
439 /*
440 * try to revoke all committed pages, but still we could fail
441 * due to no memory or other reason, if that happened, EAGAIN
442 * will be returned, which means in such case, transaction is
443 * already not integrity, caller should use journal to do the
444 * recovery or rewrite & commit last transaction. For other
445 * error number, revoking was done by filesystem itself.
446 */
48432984
CY
447 err = __revoke_inmem_pages(inode, &revoke_list,
448 false, true, false);
cf52b27a
CY
449
450 /* drop all uncommitted pages */
48432984
CY
451 __revoke_inmem_pages(inode, &fi->inmem_pages,
452 true, false, false);
cf52b27a 453 } else {
48432984
CY
454 __revoke_inmem_pages(inode, &revoke_list,
455 false, false, false);
cf52b27a 456 }
28bc106b 457
29b96b54
CY
458 return err;
459}
460
4d57b86d 461int f2fs_commit_inmem_pages(struct inode *inode)
29b96b54
CY
462{
463 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
464 struct f2fs_inode_info *fi = F2FS_I(inode);
28bc106b 465 int err;
29b96b54
CY
466
467 f2fs_balance_fs(sbi, true);
29b96b54 468
6f8d4455
JK
469 down_write(&fi->i_gc_rwsem[WRITE]);
470
471 f2fs_lock_op(sbi);
5fe45743
CY
472 set_inode_flag(inode, FI_ATOMIC_COMMIT);
473
29b96b54 474 mutex_lock(&fi->inmem_lock);
4d57b86d 475 err = __f2fs_commit_inmem_pages(inode);
88b88a66
JK
476 mutex_unlock(&fi->inmem_lock);
477
5fe45743
CY
478 clear_inode_flag(inode, FI_ATOMIC_COMMIT);
479
29b96b54 480 f2fs_unlock_op(sbi);
6f8d4455
JK
481 up_write(&fi->i_gc_rwsem[WRITE]);
482
edb27dee 483 return err;
88b88a66
JK
484}
485
0a8165d7 486/*
351df4b2
JK
487 * This function balances dirty node and dentry pages.
488 * In addition, it controls garbage collection.
489 */
2c4db1a6 490void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
351df4b2 491{
55523519 492 if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
c45d6002 493 f2fs_show_injection_info(sbi, FAULT_CHECKPOINT);
0f348028 494 f2fs_stop_checkpoint(sbi, false);
55523519 495 }
0f348028 496
e589c2c4 497 /* balance_fs_bg is able to be pending */
a7881893 498 if (need && excess_cached_nats(sbi))
7bcd0cfa 499 f2fs_balance_fs_bg(sbi, false);
e589c2c4 500
00e09c0b 501 if (!f2fs_is_checkpoint_ready(sbi))
4354994f
DR
502 return;
503
351df4b2 504 /*
029cd28c
JK
505 * We should do GC or end up with checkpoint, if there are so many dirty
506 * dir/node pages without enough free segments.
351df4b2 507 */
7f3037a5 508 if (has_not_enough_free_secs(sbi, 0, 0)) {
fb24fea7 509 down_write(&sbi->gc_lock);
e066b83c 510 f2fs_gc(sbi, false, false, NULL_SEGNO);
351df4b2
JK
511 }
512}
513
7bcd0cfa 514void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
4660f9c0 515{
64c74a7a
CY
516 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
517 return;
518
1dcc336b 519 /* try to shrink extent cache when there is no enough memory */
4d57b86d 520 if (!f2fs_available_free_memory(sbi, EXTENT_CACHE))
554df79e 521 f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER);
1dcc336b 522
1b38dc8e 523 /* check the # of cached NAT entries */
4d57b86d
CY
524 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES))
525 f2fs_try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
1b38dc8e 526
4d57b86d
CY
527 if (!f2fs_available_free_memory(sbi, FREE_NIDS))
528 f2fs_try_to_free_nids(sbi, MAX_FREE_NIDS);
ad4edb83 529 else
4d57b86d 530 f2fs_build_free_nids(sbi, false, false);
31696580 531
a7d10cf3 532 if (!is_idle(sbi, REQ_TIME) &&
fd8c8caf 533 (!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
f455c8a5 534 return;
31696580 535
1b38dc8e 536 /* checkpoint is the only way to shrink partial cached entries */
4d57b86d
CY
537 if (!f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
538 !f2fs_available_free_memory(sbi, INO_ENTRIES) ||
7d768d2c
CY
539 excess_prefree_segs(sbi) ||
540 excess_dirty_nats(sbi) ||
fd8c8caf 541 excess_dirty_nodes(sbi) ||
f455c8a5 542 f2fs_time_over(sbi, CP_TIME)) {
7bcd0cfa 543 if (test_opt(sbi, DATA_FLUSH) && from_bg) {
e9f5b8b8
CY
544 struct blk_plug plug;
545
040d2bb3
CY
546 mutex_lock(&sbi->flush_lock);
547
e9f5b8b8 548 blk_start_plug(&plug);
4d57b86d 549 f2fs_sync_dirty_inodes(sbi, FILE_INODE);
e9f5b8b8 550 blk_finish_plug(&plug);
040d2bb3
CY
551
552 mutex_unlock(&sbi->flush_lock);
e9f5b8b8 553 }
4660f9c0 554 f2fs_sync_fs(sbi->sb, true);
42190d2a 555 stat_inc_bg_cp_count(sbi->stat_info);
36b35a0d 556 }
4660f9c0
JK
557}
558
20fda56b
KM
559static int __submit_flush_wait(struct f2fs_sb_info *sbi,
560 struct block_device *bdev)
3c62be17 561{
dc37910d 562 struct bio *bio;
3c62be17
JK
563 int ret;
564
dc37910d
CY
565 bio = f2fs_bio_alloc(sbi, 0, false);
566 if (!bio)
567 return -ENOMEM;
568
3adc5fcb 569 bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
74d46992 570 bio_set_dev(bio, bdev);
3c62be17
JK
571 ret = submit_bio_wait(bio);
572 bio_put(bio);
20fda56b
KM
573
574 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER),
575 test_opt(sbi, FLUSH_MERGE), ret);
3c62be17
JK
576 return ret;
577}
578
39d787be 579static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
3c62be17 580{
39d787be 581 int ret = 0;
3c62be17
JK
582 int i;
583
0916878d 584 if (!f2fs_is_multi_device(sbi))
39d787be 585 return __submit_flush_wait(sbi, sbi->sb->s_bdev);
20fda56b 586
39d787be 587 for (i = 0; i < sbi->s_ndevs; i++) {
4d57b86d 588 if (!f2fs_is_dirty_device(sbi, ino, i, FLUSH_INO))
39d787be 589 continue;
20fda56b
KM
590 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
591 if (ret)
592 break;
3c62be17
JK
593 }
594 return ret;
595}
596
2163d198 597static int issue_flush_thread(void *data)
6b4afdd7
JK
598{
599 struct f2fs_sb_info *sbi = data;
b01a9201 600 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
a688b9d9 601 wait_queue_head_t *q = &fcc->flush_wait_queue;
6b4afdd7
JK
602repeat:
603 if (kthread_should_stop())
604 return 0;
605
dc6febb6
CY
606 sb_start_intwrite(sbi->sb);
607
721bd4d5 608 if (!llist_empty(&fcc->issue_list)) {
6b4afdd7
JK
609 struct flush_cmd *cmd, *next;
610 int ret;
611
721bd4d5
GZ
612 fcc->dispatch_list = llist_del_all(&fcc->issue_list);
613 fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
614
39d787be
CY
615 cmd = llist_entry(fcc->dispatch_list, struct flush_cmd, llnode);
616
617 ret = submit_flush_wait(sbi, cmd->ino);
8b8dd65f
CY
618 atomic_inc(&fcc->issued_flush);
619
721bd4d5
GZ
620 llist_for_each_entry_safe(cmd, next,
621 fcc->dispatch_list, llnode) {
6b4afdd7 622 cmd->ret = ret;
6b4afdd7
JK
623 complete(&cmd->wait);
624 }
a688b9d9 625 fcc->dispatch_list = NULL;
6b4afdd7
JK
626 }
627
dc6febb6
CY
628 sb_end_intwrite(sbi->sb);
629
a688b9d9 630 wait_event_interruptible(*q,
721bd4d5 631 kthread_should_stop() || !llist_empty(&fcc->issue_list));
6b4afdd7
JK
632 goto repeat;
633}
634
39d787be 635int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
6b4afdd7 636{
b01a9201 637 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
adf8d90b 638 struct flush_cmd cmd;
8b8dd65f 639 int ret;
6b4afdd7 640
0f7b2abd
JK
641 if (test_opt(sbi, NOBARRIER))
642 return 0;
643
8b8dd65f 644 if (!test_opt(sbi, FLUSH_MERGE)) {
72691af6 645 atomic_inc(&fcc->queued_flush);
39d787be 646 ret = submit_flush_wait(sbi, ino);
72691af6 647 atomic_dec(&fcc->queued_flush);
8b8dd65f
CY
648 atomic_inc(&fcc->issued_flush);
649 return ret;
650 }
740432f8 651
0916878d
DLM
652 if (atomic_inc_return(&fcc->queued_flush) == 1 ||
653 f2fs_is_multi_device(sbi)) {
39d787be 654 ret = submit_flush_wait(sbi, ino);
72691af6 655 atomic_dec(&fcc->queued_flush);
8b8dd65f
CY
656
657 atomic_inc(&fcc->issued_flush);
740432f8
JK
658 return ret;
659 }
6b4afdd7 660
39d787be 661 cmd.ino = ino;
adf8d90b 662 init_completion(&cmd.wait);
6b4afdd7 663
721bd4d5 664 llist_add(&cmd.llnode, &fcc->issue_list);
6b4afdd7 665
6f890df0
CY
666 /* update issue_list before we wake up issue_flush thread */
667 smp_mb();
668
669 if (waitqueue_active(&fcc->flush_wait_queue))
a688b9d9 670 wake_up(&fcc->flush_wait_queue);
6b4afdd7 671
5eba8c5d
JK
672 if (fcc->f2fs_issue_flush) {
673 wait_for_completion(&cmd.wait);
72691af6 674 atomic_dec(&fcc->queued_flush);
5eba8c5d 675 } else {
d3238691
CY
676 struct llist_node *list;
677
678 list = llist_del_all(&fcc->issue_list);
679 if (!list) {
680 wait_for_completion(&cmd.wait);
72691af6 681 atomic_dec(&fcc->queued_flush);
d3238691
CY
682 } else {
683 struct flush_cmd *tmp, *next;
684
39d787be 685 ret = submit_flush_wait(sbi, ino);
d3238691
CY
686
687 llist_for_each_entry_safe(tmp, next, list, llnode) {
688 if (tmp == &cmd) {
689 cmd.ret = ret;
72691af6 690 atomic_dec(&fcc->queued_flush);
d3238691
CY
691 continue;
692 }
693 tmp->ret = ret;
694 complete(&tmp->wait);
695 }
696 }
5eba8c5d 697 }
adf8d90b
CY
698
699 return cmd.ret;
6b4afdd7
JK
700}
701
4d57b86d 702int f2fs_create_flush_cmd_control(struct f2fs_sb_info *sbi)
2163d198
GZ
703{
704 dev_t dev = sbi->sb->s_bdev->bd_dev;
705 struct flush_cmd_control *fcc;
706 int err = 0;
707
b01a9201
JK
708 if (SM_I(sbi)->fcc_info) {
709 fcc = SM_I(sbi)->fcc_info;
d871cd04
YS
710 if (fcc->f2fs_issue_flush)
711 return err;
5eba8c5d
JK
712 goto init_thread;
713 }
714
acbf054d 715 fcc = f2fs_kzalloc(sbi, sizeof(struct flush_cmd_control), GFP_KERNEL);
2163d198
GZ
716 if (!fcc)
717 return -ENOMEM;
8b8dd65f 718 atomic_set(&fcc->issued_flush, 0);
72691af6 719 atomic_set(&fcc->queued_flush, 0);
2163d198 720 init_waitqueue_head(&fcc->flush_wait_queue);
721bd4d5 721 init_llist_head(&fcc->issue_list);
b01a9201 722 SM_I(sbi)->fcc_info = fcc;
d4fdf8ba
YH
723 if (!test_opt(sbi, FLUSH_MERGE))
724 return err;
725
5eba8c5d 726init_thread:
2163d198
GZ
727 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
728 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
729 if (IS_ERR(fcc->f2fs_issue_flush)) {
730 err = PTR_ERR(fcc->f2fs_issue_flush);
5222595d 731 kvfree(fcc);
b01a9201 732 SM_I(sbi)->fcc_info = NULL;
2163d198
GZ
733 return err;
734 }
2163d198
GZ
735
736 return err;
737}
738
4d57b86d 739void f2fs_destroy_flush_cmd_control(struct f2fs_sb_info *sbi, bool free)
2163d198 740{
b01a9201 741 struct flush_cmd_control *fcc = SM_I(sbi)->fcc_info;
2163d198 742
5eba8c5d
JK
743 if (fcc && fcc->f2fs_issue_flush) {
744 struct task_struct *flush_thread = fcc->f2fs_issue_flush;
745
746 fcc->f2fs_issue_flush = NULL;
747 kthread_stop(flush_thread);
748 }
749 if (free) {
5222595d 750 kvfree(fcc);
b01a9201 751 SM_I(sbi)->fcc_info = NULL;
5eba8c5d 752 }
2163d198
GZ
753}
754
1228b482
CY
755int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
756{
757 int ret = 0, i;
758
0916878d 759 if (!f2fs_is_multi_device(sbi))
1228b482
CY
760 return 0;
761
762 for (i = 1; i < sbi->s_ndevs; i++) {
763 if (!f2fs_test_bit(i, (char *)&sbi->dirty_device))
764 continue;
765 ret = __submit_flush_wait(sbi, FDEV(i).bdev);
766 if (ret)
767 break;
768
769 spin_lock(&sbi->dev_lock);
770 f2fs_clear_bit(i, (char *)&sbi->dirty_device);
771 spin_unlock(&sbi->dev_lock);
772 }
773
774 return ret;
775}
776
351df4b2
JK
777static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
778 enum dirty_type dirty_type)
779{
780 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
781
782 /* need not be added */
783 if (IS_CURSEG(sbi, segno))
784 return;
785
786 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[dirty_type]))
787 dirty_i->nr_dirty[dirty_type]++;
788
789 if (dirty_type == DIRTY) {
790 struct seg_entry *sentry = get_seg_entry(sbi, segno);
4625d6aa 791 enum dirty_type t = sentry->type;
b2f2c390 792
ec325b52
JK
793 if (unlikely(t >= DIRTY)) {
794 f2fs_bug_on(sbi, 1);
795 return;
796 }
4625d6aa
CL
797 if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t]))
798 dirty_i->nr_dirty[t]++;
351df4b2
JK
799 }
800}
801
802static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno,
803 enum dirty_type dirty_type)
804{
805 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
806
807 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type]))
808 dirty_i->nr_dirty[dirty_type]--;
809
810 if (dirty_type == DIRTY) {
4625d6aa
CL
811 struct seg_entry *sentry = get_seg_entry(sbi, segno);
812 enum dirty_type t = sentry->type;
813
814 if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t]))
815 dirty_i->nr_dirty[t]--;
b2f2c390 816
bbf9f7d9 817 if (get_valid_blocks(sbi, segno, true) == 0) {
4ddb1a4d 818 clear_bit(GET_SEC_FROM_SEG(sbi, segno),
5ec4e49f 819 dirty_i->victim_secmap);
bbf9f7d9
ST
820#ifdef CONFIG_F2FS_CHECK_FS
821 clear_bit(segno, SIT_I(sbi)->invalid_segmap);
822#endif
823 }
351df4b2
JK
824 }
825}
826
0a8165d7 827/*
351df4b2
JK
828 * Should not occur error such as -ENOMEM.
829 * Adding dirty entry into seglist is not critical operation.
830 * If a given segment is one of current working segments, it won't be added.
831 */
8d8451af 832static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
351df4b2
JK
833{
834 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4354994f 835 unsigned short valid_blocks, ckpt_valid_blocks;
351df4b2
JK
836
837 if (segno == NULL_SEGNO || IS_CURSEG(sbi, segno))
838 return;
839
840 mutex_lock(&dirty_i->seglist_lock);
841
302bd348 842 valid_blocks = get_valid_blocks(sbi, segno, false);
4354994f 843 ckpt_valid_blocks = get_ckpt_valid_blocks(sbi, segno);
351df4b2 844
4354994f
DR
845 if (valid_blocks == 0 && (!is_sbi_flag_set(sbi, SBI_CP_DISABLED) ||
846 ckpt_valid_blocks == sbi->blocks_per_seg)) {
351df4b2
JK
847 __locate_dirty_segment(sbi, segno, PRE);
848 __remove_dirty_segment(sbi, segno, DIRTY);
849 } else if (valid_blocks < sbi->blocks_per_seg) {
850 __locate_dirty_segment(sbi, segno, DIRTY);
851 } else {
852 /* Recovery routine with SSR needs this */
853 __remove_dirty_segment(sbi, segno, DIRTY);
854 }
855
856 mutex_unlock(&dirty_i->seglist_lock);
351df4b2
JK
857}
858
4354994f
DR
859/* This moves currently empty dirty blocks to prefree. Must hold seglist_lock */
860void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
861{
862 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
863 unsigned int segno;
864
865 mutex_lock(&dirty_i->seglist_lock);
866 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
867 if (get_valid_blocks(sbi, segno, false))
868 continue;
869 if (IS_CURSEG(sbi, segno))
870 continue;
871 __locate_dirty_segment(sbi, segno, PRE);
872 __remove_dirty_segment(sbi, segno, DIRTY);
873 }
874 mutex_unlock(&dirty_i->seglist_lock);
875}
876
4d3aed70 877block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
4354994f 878{
ae4ad7ea
DR
879 int ovp_hole_segs =
880 (overprovision_segments(sbi) - reserved_segments(sbi));
881 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
4d3aed70 882 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4354994f 883 block_t holes[2] = {0, 0}; /* DATA and NODE */
4d3aed70 884 block_t unusable;
4354994f
DR
885 struct seg_entry *se;
886 unsigned int segno;
887
888 mutex_lock(&dirty_i->seglist_lock);
889 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
890 se = get_seg_entry(sbi, segno);
891 if (IS_NODESEG(se->type))
892 holes[NODE] += sbi->blocks_per_seg - se->valid_blocks;
893 else
894 holes[DATA] += sbi->blocks_per_seg - se->valid_blocks;
895 }
896 mutex_unlock(&dirty_i->seglist_lock);
897
4d3aed70
DR
898 unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
899 if (unusable > ovp_holes)
900 return unusable - ovp_holes;
901 return 0;
902}
903
904int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
905{
906 int ovp_hole_segs =
907 (overprovision_segments(sbi) - reserved_segments(sbi));
908 if (unusable > F2FS_OPTION(sbi).unusable_cap)
4354994f 909 return -EAGAIN;
db610a64 910 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
ae4ad7ea 911 dirty_segments(sbi) > ovp_hole_segs)
db610a64 912 return -EAGAIN;
4354994f
DR
913 return 0;
914}
915
916/* This is only used by SBI_CP_DISABLED */
917static unsigned int get_free_segment(struct f2fs_sb_info *sbi)
918{
919 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
920 unsigned int segno = 0;
921
922 mutex_lock(&dirty_i->seglist_lock);
923 for_each_set_bit(segno, dirty_i->dirty_segmap[DIRTY], MAIN_SEGS(sbi)) {
924 if (get_valid_blocks(sbi, segno, false))
925 continue;
926 if (get_ckpt_valid_blocks(sbi, segno))
927 continue;
928 mutex_unlock(&dirty_i->seglist_lock);
929 return segno;
930 }
931 mutex_unlock(&dirty_i->seglist_lock);
932 return NULL_SEGNO;
933}
934
004b6862 935static struct discard_cmd *__create_discard_cmd(struct f2fs_sb_info *sbi,
c81abe34
JK
936 struct block_device *bdev, block_t lstart,
937 block_t start, block_t len)
275b66b0 938{
0b54fb84 939 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
ba48a33e 940 struct list_head *pend_list;
b01a9201 941 struct discard_cmd *dc;
275b66b0 942
ba48a33e
CY
943 f2fs_bug_on(sbi, !len);
944
945 pend_list = &dcc->pend_list[plist_idx(len)];
946
b01a9201
JK
947 dc = f2fs_kmem_cache_alloc(discard_cmd_slab, GFP_NOFS);
948 INIT_LIST_HEAD(&dc->list);
c81abe34 949 dc->bdev = bdev;
b01a9201 950 dc->lstart = lstart;
c81abe34 951 dc->start = start;
b01a9201 952 dc->len = len;
ec9895ad 953 dc->ref = 0;
15469963 954 dc->state = D_PREP;
72691af6 955 dc->queued = 0;
c81abe34 956 dc->error = 0;
b01a9201 957 init_completion(&dc->wait);
22d375dd 958 list_add_tail(&dc->list, pend_list);
35ec7d57
CY
959 spin_lock_init(&dc->lock);
960 dc->bio_ref = 0;
5f32366a 961 atomic_inc(&dcc->discard_cmd_cnt);
d84d1cbd 962 dcc->undiscard_blks += len;
004b6862
CY
963
964 return dc;
965}
966
967static struct discard_cmd *__attach_discard_cmd(struct f2fs_sb_info *sbi,
968 struct block_device *bdev, block_t lstart,
969 block_t start, block_t len,
4dada3fd
CY
970 struct rb_node *parent, struct rb_node **p,
971 bool leftmost)
004b6862
CY
972{
973 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
974 struct discard_cmd *dc;
975
976 dc = __create_discard_cmd(sbi, bdev, lstart, start, len);
977
978 rb_link_node(&dc->rb_node, parent, p);
4dada3fd 979 rb_insert_color_cached(&dc->rb_node, &dcc->root, leftmost);
004b6862
CY
980
981 return dc;
15469963
JK
982}
983
004b6862
CY
984static void __detach_discard_cmd(struct discard_cmd_control *dcc,
985 struct discard_cmd *dc)
15469963 986{
dcc9165d 987 if (dc->state == D_DONE)
72691af6 988 atomic_sub(dc->queued, &dcc->queued_discard);
004b6862
CY
989
990 list_del(&dc->list);
4dada3fd 991 rb_erase_cached(&dc->rb_node, &dcc->root);
d84d1cbd 992 dcc->undiscard_blks -= dc->len;
004b6862
CY
993
994 kmem_cache_free(discard_cmd_slab, dc);
995
996 atomic_dec(&dcc->discard_cmd_cnt);
997}
998
999static void __remove_discard_cmd(struct f2fs_sb_info *sbi,
1000 struct discard_cmd *dc)
1001{
1002 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
35ec7d57 1003 unsigned long flags;
dcc9165d 1004
2ec6f2ef
CY
1005 trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
1006
35ec7d57
CY
1007 spin_lock_irqsave(&dc->lock, flags);
1008 if (dc->bio_ref) {
1009 spin_unlock_irqrestore(&dc->lock, flags);
1010 return;
1011 }
1012 spin_unlock_irqrestore(&dc->lock, flags);
1013
d9703d90
CY
1014 f2fs_bug_on(sbi, dc->ref);
1015
c81abe34
JK
1016 if (dc->error == -EOPNOTSUPP)
1017 dc->error = 0;
15469963 1018
c81abe34 1019 if (dc->error)
22d7ea13 1020 printk_ratelimited(
c45d6002
CY
1021 "%sF2FS-fs (%s): Issue discard(%u, %u, %u) failed, ret: %d",
1022 KERN_INFO, sbi->sb->s_id,
1023 dc->lstart, dc->start, dc->len, dc->error);
004b6862 1024 __detach_discard_cmd(dcc, dc);
275b66b0
CY
1025}
1026
c81abe34
JK
1027static void f2fs_submit_discard_endio(struct bio *bio)
1028{
1029 struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
35ec7d57 1030 unsigned long flags;
c81abe34 1031
4e4cbee9 1032 dc->error = blk_status_to_errno(bio->bi_status);
35ec7d57
CY
1033
1034 spin_lock_irqsave(&dc->lock, flags);
1035 dc->bio_ref--;
1036 if (!dc->bio_ref && dc->state == D_SUBMIT) {
1037 dc->state = D_DONE;
1038 complete_all(&dc->wait);
1039 }
1040 spin_unlock_irqrestore(&dc->lock, flags);
c81abe34
JK
1041 bio_put(bio);
1042}
1043
94b1e10e 1044static void __check_sit_bitmap(struct f2fs_sb_info *sbi,
6915ea9d
CY
1045 block_t start, block_t end)
1046{
1047#ifdef CONFIG_F2FS_CHECK_FS
1048 struct seg_entry *sentry;
1049 unsigned int segno;
1050 block_t blk = start;
1051 unsigned long offset, size, max_blocks = sbi->blocks_per_seg;
1052 unsigned long *map;
1053
1054 while (blk < end) {
1055 segno = GET_SEGNO(sbi, blk);
1056 sentry = get_seg_entry(sbi, segno);
1057 offset = GET_BLKOFF_FROM_SEG0(sbi, blk);
1058
008396e1
YS
1059 if (end < START_BLOCK(sbi, segno + 1))
1060 size = GET_BLKOFF_FROM_SEG0(sbi, end);
1061 else
1062 size = max_blocks;
6915ea9d
CY
1063 map = (unsigned long *)(sentry->cur_valid_map);
1064 offset = __find_rev_next_bit(map, size, offset);
1065 f2fs_bug_on(sbi, offset != size);
008396e1 1066 blk = START_BLOCK(sbi, segno + 1);
6915ea9d
CY
1067 }
1068#endif
1069}
1070
8bb4f253
JK
1071static void __init_discard_policy(struct f2fs_sb_info *sbi,
1072 struct discard_policy *dpolicy,
1073 int discard_type, unsigned int granularity)
1074{
1075 /* common policy */
1076 dpolicy->type = discard_type;
1077 dpolicy->sync = true;
20ee4382 1078 dpolicy->ordered = false;
8bb4f253
JK
1079 dpolicy->granularity = granularity;
1080
1081 dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
1082 dpolicy->io_aware_gran = MAX_PLIST_NUM;
6ce48b0c 1083 dpolicy->timeout = false;
8bb4f253
JK
1084
1085 if (discard_type == DPOLICY_BG) {
1086 dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
f9d1dced 1087 dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
8bb4f253
JK
1088 dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1089 dpolicy->io_aware = true;
cba60849 1090 dpolicy->sync = false;
20ee4382 1091 dpolicy->ordered = true;
8bb4f253
JK
1092 if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
1093 dpolicy->granularity = 1;
1094 dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
1095 }
1096 } else if (discard_type == DPOLICY_FORCE) {
1097 dpolicy->min_interval = DEF_MIN_DISCARD_ISSUE_TIME;
f9d1dced 1098 dpolicy->mid_interval = DEF_MID_DISCARD_ISSUE_TIME;
8bb4f253
JK
1099 dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
1100 dpolicy->io_aware = false;
1101 } else if (discard_type == DPOLICY_FSTRIM) {
1102 dpolicy->io_aware = false;
1103 } else if (discard_type == DPOLICY_UMOUNT) {
1104 dpolicy->io_aware = false;
b8623253
JK
1105 /* we need to issue all to keep CP_TRIMMED_FLAG */
1106 dpolicy->granularity = 1;
6ce48b0c 1107 dpolicy->timeout = true;
8bb4f253
JK
1108 }
1109}
1110
35ec7d57
CY
1111static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1112 struct block_device *bdev, block_t lstart,
1113 block_t start, block_t len);
c81abe34 1114/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
6b9cb124 1115static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
78997b56 1116 struct discard_policy *dpolicy,
35ec7d57
CY
1117 struct discard_cmd *dc,
1118 unsigned int *issued)
c81abe34 1119{
35ec7d57
CY
1120 struct block_device *bdev = dc->bdev;
1121 struct request_queue *q = bdev_get_queue(bdev);
1122 unsigned int max_discard_blocks =
1123 SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
c81abe34 1124 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
78997b56
CY
1125 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1126 &(dcc->fstrim_list) : &(dcc->wait_list);
78997b56 1127 int flag = dpolicy->sync ? REQ_SYNC : 0;
35ec7d57
CY
1128 block_t lstart, start, len, total_len;
1129 int err = 0;
c81abe34
JK
1130
1131 if (dc->state != D_PREP)
6b9cb124 1132 return 0;
c81abe34 1133
d6184774 1134 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
6b9cb124 1135 return 0;
d6184774 1136
35ec7d57
CY
1137 trace_f2fs_issue_discard(bdev, dc->start, dc->len);
1138
1139 lstart = dc->lstart;
1140 start = dc->start;
1141 len = dc->len;
1142 total_len = len;
1143
1144 dc->len = 0;
1145
1146 while (total_len && *issued < dpolicy->max_requests && !err) {
1147 struct bio *bio = NULL;
1148 unsigned long flags;
1149 bool last = true;
1150
1151 if (len > max_discard_blocks) {
1152 len = max_discard_blocks;
1153 last = false;
1154 }
1155
1156 (*issued)++;
1157 if (*issued == dpolicy->max_requests)
1158 last = true;
1159
1160 dc->len += len;
1161
b83dcfe6 1162 if (time_to_inject(sbi, FAULT_DISCARD)) {
c45d6002 1163 f2fs_show_injection_info(sbi, FAULT_DISCARD);
b83dcfe6
CY
1164 err = -EIO;
1165 goto submit;
1166 }
35ec7d57
CY
1167 err = __blkdev_issue_discard(bdev,
1168 SECTOR_FROM_BLOCK(start),
1169 SECTOR_FROM_BLOCK(len),
1170 GFP_NOFS, 0, &bio);
b83dcfe6 1171submit:
6b9cb124 1172 if (err) {
35ec7d57 1173 spin_lock_irqsave(&dc->lock, flags);
6b9cb124 1174 if (dc->state == D_PARTIAL)
35ec7d57 1175 dc->state = D_SUBMIT;
35ec7d57
CY
1176 spin_unlock_irqrestore(&dc->lock, flags);
1177
6b9cb124
CY
1178 break;
1179 }
35ec7d57 1180
6b9cb124 1181 f2fs_bug_on(sbi, !bio);
35ec7d57 1182
6b9cb124
CY
1183 /*
1184 * should keep before submission to avoid D_DONE
1185 * right away
1186 */
1187 spin_lock_irqsave(&dc->lock, flags);
1188 if (last)
1189 dc->state = D_SUBMIT;
1190 else
1191 dc->state = D_PARTIAL;
1192 dc->bio_ref++;
1193 spin_unlock_irqrestore(&dc->lock, flags);
35ec7d57 1194
72691af6
JK
1195 atomic_inc(&dcc->queued_discard);
1196 dc->queued++;
6b9cb124 1197 list_move_tail(&dc->list, wait_list);
b0af6d49 1198
6b9cb124 1199 /* sanity check on discard range */
9249dded 1200 __check_sit_bitmap(sbi, lstart, lstart + len);
35ec7d57 1201
6b9cb124
CY
1202 bio->bi_private = dc;
1203 bio->bi_end_io = f2fs_submit_discard_endio;
1204 bio->bi_opf |= flag;
1205 submit_bio(bio);
1206
1207 atomic_inc(&dcc->issued_discard);
1208
1209 f2fs_update_iostat(sbi, FS_DISCARD, 1);
35ec7d57
CY
1210
1211 lstart += len;
1212 start += len;
1213 total_len -= len;
1214 len = total_len;
c81abe34 1215 }
35ec7d57 1216
6b9cb124 1217 if (!err && len)
35ec7d57 1218 __update_discard_tree_range(sbi, bdev, lstart, start, len);
6b9cb124 1219 return err;
c81abe34
JK
1220}
1221
004b6862
CY
1222static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
1223 struct block_device *bdev, block_t lstart,
1224 block_t start, block_t len,
1225 struct rb_node **insert_p,
1226 struct rb_node *insert_parent)
c81abe34 1227{
004b6862 1228 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
dca6951f 1229 struct rb_node **p;
004b6862
CY
1230 struct rb_node *parent = NULL;
1231 struct discard_cmd *dc = NULL;
4dada3fd 1232 bool leftmost = true;
004b6862
CY
1233
1234 if (insert_p && insert_parent) {
1235 parent = insert_parent;
1236 p = insert_p;
1237 goto do_insert;
1238 }
c81abe34 1239
4dada3fd
CY
1240 p = f2fs_lookup_rb_tree_for_insert(sbi, &dcc->root, &parent,
1241 lstart, &leftmost);
004b6862 1242do_insert:
4dada3fd
CY
1243 dc = __attach_discard_cmd(sbi, bdev, lstart, start, len, parent,
1244 p, leftmost);
004b6862
CY
1245 if (!dc)
1246 return NULL;
c81abe34 1247
004b6862 1248 return dc;
c81abe34
JK
1249}
1250
ba48a33e
CY
1251static void __relocate_discard_cmd(struct discard_cmd_control *dcc,
1252 struct discard_cmd *dc)
1253{
1254 list_move_tail(&dc->list, &dcc->pend_list[plist_idx(dc->len)]);
1255}
1256
3d6a650f
YH
1257static void __punch_discard_cmd(struct f2fs_sb_info *sbi,
1258 struct discard_cmd *dc, block_t blkaddr)
1259{
ba48a33e 1260 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
004b6862
CY
1261 struct discard_info di = dc->di;
1262 bool modified = false;
3d6a650f 1263
004b6862 1264 if (dc->state == D_DONE || dc->len == 1) {
3d6a650f
YH
1265 __remove_discard_cmd(sbi, dc);
1266 return;
1267 }
1268
d84d1cbd
CY
1269 dcc->undiscard_blks -= di.len;
1270
004b6862 1271 if (blkaddr > di.lstart) {
3d6a650f 1272 dc->len = blkaddr - dc->lstart;
d84d1cbd 1273 dcc->undiscard_blks += dc->len;
ba48a33e 1274 __relocate_discard_cmd(dcc, dc);
004b6862
CY
1275 modified = true;
1276 }
1277
1278 if (blkaddr < di.lstart + di.len - 1) {
1279 if (modified) {
1280 __insert_discard_tree(sbi, dc->bdev, blkaddr + 1,
1281 di.start + blkaddr + 1 - di.lstart,
1282 di.lstart + di.len - 1 - blkaddr,
1283 NULL, NULL);
1284 } else {
1285 dc->lstart++;
1286 dc->len--;
1287 dc->start++;
d84d1cbd 1288 dcc->undiscard_blks += dc->len;
ba48a33e 1289 __relocate_discard_cmd(dcc, dc);
004b6862 1290 }
3d6a650f
YH
1291 }
1292}
1293
004b6862
CY
1294static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
1295 struct block_device *bdev, block_t lstart,
1296 block_t start, block_t len)
275b66b0 1297{
0b54fb84 1298 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
004b6862
CY
1299 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1300 struct discard_cmd *dc;
1301 struct discard_info di = {0};
1302 struct rb_node **insert_p = NULL, *insert_parent = NULL;
35ec7d57
CY
1303 struct request_queue *q = bdev_get_queue(bdev);
1304 unsigned int max_discard_blocks =
1305 SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
004b6862 1306 block_t end = lstart + len;
275b66b0 1307
4d57b86d 1308 dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
004b6862
CY
1309 NULL, lstart,
1310 (struct rb_entry **)&prev_dc,
1311 (struct rb_entry **)&next_dc,
4dada3fd 1312 &insert_p, &insert_parent, true, NULL);
004b6862
CY
1313 if (dc)
1314 prev_dc = dc;
1315
1316 if (!prev_dc) {
1317 di.lstart = lstart;
1318 di.len = next_dc ? next_dc->lstart - lstart : len;
1319 di.len = min(di.len, len);
1320 di.start = start;
22d375dd 1321 }
15469963 1322
004b6862
CY
1323 while (1) {
1324 struct rb_node *node;
1325 bool merged = false;
1326 struct discard_cmd *tdc = NULL;
1327
1328 if (prev_dc) {
1329 di.lstart = prev_dc->lstart + prev_dc->len;
1330 if (di.lstart < lstart)
1331 di.lstart = lstart;
1332 if (di.lstart >= end)
1333 break;
1334
1335 if (!next_dc || next_dc->lstart > end)
1336 di.len = end - di.lstart;
1337 else
1338 di.len = next_dc->lstart - di.lstart;
1339 di.start = start + di.lstart - lstart;
1340 }
1341
1342 if (!di.len)
1343 goto next;
1344
1345 if (prev_dc && prev_dc->state == D_PREP &&
1346 prev_dc->bdev == bdev &&
35ec7d57
CY
1347 __is_discard_back_mergeable(&di, &prev_dc->di,
1348 max_discard_blocks)) {
004b6862 1349 prev_dc->di.len += di.len;
d84d1cbd 1350 dcc->undiscard_blks += di.len;
ba48a33e 1351 __relocate_discard_cmd(dcc, prev_dc);
004b6862
CY
1352 di = prev_dc->di;
1353 tdc = prev_dc;
1354 merged = true;
1355 }
1356
1357 if (next_dc && next_dc->state == D_PREP &&
1358 next_dc->bdev == bdev &&
35ec7d57
CY
1359 __is_discard_front_mergeable(&di, &next_dc->di,
1360 max_discard_blocks)) {
004b6862
CY
1361 next_dc->di.lstart = di.lstart;
1362 next_dc->di.len += di.len;
1363 next_dc->di.start = di.start;
d84d1cbd 1364 dcc->undiscard_blks += di.len;
ba48a33e 1365 __relocate_discard_cmd(dcc, next_dc);
004b6862
CY
1366 if (tdc)
1367 __remove_discard_cmd(sbi, tdc);
004b6862 1368 merged = true;
4e6a8d9b 1369 }
004b6862 1370
df0f6b44 1371 if (!merged) {
004b6862
CY
1372 __insert_discard_tree(sbi, bdev, di.lstart, di.start,
1373 di.len, NULL, NULL);
df0f6b44 1374 }
004b6862
CY
1375 next:
1376 prev_dc = next_dc;
1377 if (!prev_dc)
1378 break;
1379
1380 node = rb_next(&prev_dc->rb_node);
1381 next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1382 }
004b6862
CY
1383}
1384
1385static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
1386 struct block_device *bdev, block_t blkstart, block_t blklen)
1387{
1388 block_t lblkstart = blkstart;
1389
7f3d7719
DLM
1390 if (!f2fs_bdev_support_discard(bdev))
1391 return 0;
1392
0243a5f9 1393 trace_f2fs_queue_discard(bdev, blkstart, blklen);
004b6862 1394
0916878d 1395 if (f2fs_is_multi_device(sbi)) {
004b6862
CY
1396 int devi = f2fs_target_device_index(sbi, blkstart);
1397
1398 blkstart -= FDEV(devi).start_blk;
1399 }
35ec7d57 1400 mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
004b6862 1401 __update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
35ec7d57 1402 mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
004b6862
CY
1403 return 0;
1404}
1405
20ee4382
CY
1406static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
1407 struct discard_policy *dpolicy)
1408{
1409 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1410 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
1411 struct rb_node **insert_p = NULL, *insert_parent = NULL;
1412 struct discard_cmd *dc;
1413 struct blk_plug plug;
1414 unsigned int pos = dcc->next_pos;
1415 unsigned int issued = 0;
1416 bool io_interrupted = false;
1417
1418 mutex_lock(&dcc->cmd_lock);
1419 dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
1420 NULL, pos,
1421 (struct rb_entry **)&prev_dc,
1422 (struct rb_entry **)&next_dc,
4dada3fd 1423 &insert_p, &insert_parent, true, NULL);
20ee4382
CY
1424 if (!dc)
1425 dc = next_dc;
1426
1427 blk_start_plug(&plug);
1428
1429 while (dc) {
1430 struct rb_node *node;
6b9cb124 1431 int err = 0;
20ee4382
CY
1432
1433 if (dc->state != D_PREP)
1434 goto next;
1435
a7d10cf3 1436 if (dpolicy->io_aware && !is_idle(sbi, DISCARD_TIME)) {
20ee4382
CY
1437 io_interrupted = true;
1438 break;
1439 }
1440
1441 dcc->next_pos = dc->lstart + dc->len;
6b9cb124 1442 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
20ee4382 1443
35ec7d57 1444 if (issued >= dpolicy->max_requests)
20ee4382
CY
1445 break;
1446next:
1447 node = rb_next(&dc->rb_node);
6b9cb124
CY
1448 if (err)
1449 __remove_discard_cmd(sbi, dc);
20ee4382
CY
1450 dc = rb_entry_safe(node, struct discard_cmd, rb_node);
1451 }
1452
1453 blk_finish_plug(&plug);
1454
1455 if (!dc)
1456 dcc->next_pos = 0;
1457
1458 mutex_unlock(&dcc->cmd_lock);
1459
1460 if (!issued && io_interrupted)
1461 issued = -1;
1462
1463 return issued;
1464}
141af6ba
ST
1465static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
1466 struct discard_policy *dpolicy);
20ee4382 1467
78997b56
CY
1468static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1469 struct discard_policy *dpolicy)
bd5b0738
CY
1470{
1471 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1472 struct list_head *pend_list;
1473 struct discard_cmd *dc, *tmp;
1474 struct blk_plug plug;
141af6ba 1475 int i, issued;
e6c6de18 1476 bool io_interrupted = false;
bd5b0738 1477
6ce48b0c
CY
1478 if (dpolicy->timeout)
1479 f2fs_update_time(sbi, UMOUNT_DISCARD_TIMEOUT);
03f2c02d 1480
141af6ba
ST
1481retry:
1482 issued = 0;
78997b56 1483 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
6ce48b0c
CY
1484 if (dpolicy->timeout &&
1485 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
03f2c02d
JK
1486 break;
1487
78997b56
CY
1488 if (i + 1 < dpolicy->granularity)
1489 break;
20ee4382
CY
1490
1491 if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
1492 return __issue_discard_cmd_orderly(sbi, dpolicy);
1493
bd5b0738 1494 pend_list = &dcc->pend_list[i];
33da62cf
CY
1495
1496 mutex_lock(&dcc->cmd_lock);
49c60c67
CY
1497 if (list_empty(pend_list))
1498 goto next;
67fce70b
CY
1499 if (unlikely(dcc->rbtree_check))
1500 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
1501 &dcc->root));
33da62cf 1502 blk_start_plug(&plug);
bd5b0738
CY
1503 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1504 f2fs_bug_on(sbi, dc->state != D_PREP);
1505
6ce48b0c
CY
1506 if (dpolicy->timeout &&
1507 f2fs_time_over(sbi, UMOUNT_DISCARD_TIMEOUT))
6e0cd4a9
HX
1508 break;
1509
ecc9aa00 1510 if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
a7d10cf3 1511 !is_idle(sbi, DISCARD_TIME)) {
e6c6de18 1512 io_interrupted = true;
522d1711 1513 break;
969d1b18 1514 }
e6c6de18 1515
35ec7d57 1516 __submit_discard_cmd(sbi, dpolicy, dc, &issued);
522d1711 1517
35ec7d57 1518 if (issued >= dpolicy->max_requests)
33da62cf 1519 break;
bd5b0738 1520 }
33da62cf 1521 blk_finish_plug(&plug);
49c60c67 1522next:
33da62cf
CY
1523 mutex_unlock(&dcc->cmd_lock);
1524
522d1711 1525 if (issued >= dpolicy->max_requests || io_interrupted)
33da62cf 1526 break;
bd5b0738 1527 }
969d1b18 1528
141af6ba
ST
1529 if (dpolicy->type == DPOLICY_UMOUNT && issued) {
1530 __wait_all_discard_cmd(sbi, dpolicy);
1531 goto retry;
1532 }
1533
e6c6de18
CY
1534 if (!issued && io_interrupted)
1535 issued = -1;
1536
969d1b18
CY
1537 return issued;
1538}
1539
cf5c759f 1540static bool __drop_discard_cmd(struct f2fs_sb_info *sbi)
969d1b18
CY
1541{
1542 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1543 struct list_head *pend_list;
1544 struct discard_cmd *dc, *tmp;
1545 int i;
cf5c759f 1546 bool dropped = false;
969d1b18
CY
1547
1548 mutex_lock(&dcc->cmd_lock);
1549 for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
1550 pend_list = &dcc->pend_list[i];
1551 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1552 f2fs_bug_on(sbi, dc->state != D_PREP);
1553 __remove_discard_cmd(sbi, dc);
cf5c759f 1554 dropped = true;
969d1b18
CY
1555 }
1556 }
1557 mutex_unlock(&dcc->cmd_lock);
cf5c759f
CY
1558
1559 return dropped;
bd5b0738
CY
1560}
1561
4d57b86d 1562void f2fs_drop_discard_cmd(struct f2fs_sb_info *sbi)
7950e9ac
CY
1563{
1564 __drop_discard_cmd(sbi);
1565}
1566
0ea80512 1567static unsigned int __wait_one_discard_bio(struct f2fs_sb_info *sbi,
2a510c00
CY
1568 struct discard_cmd *dc)
1569{
1570 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
0ea80512 1571 unsigned int len = 0;
2a510c00
CY
1572
1573 wait_for_completion_io(&dc->wait);
1574 mutex_lock(&dcc->cmd_lock);
1575 f2fs_bug_on(sbi, dc->state != D_DONE);
1576 dc->ref--;
0ea80512
CY
1577 if (!dc->ref) {
1578 if (!dc->error)
1579 len = dc->len;
2a510c00 1580 __remove_discard_cmd(sbi, dc);
0ea80512 1581 }
2a510c00 1582 mutex_unlock(&dcc->cmd_lock);
0ea80512
CY
1583
1584 return len;
2a510c00
CY
1585}
1586
0ea80512 1587static unsigned int __wait_discard_cmd_range(struct f2fs_sb_info *sbi,
78997b56
CY
1588 struct discard_policy *dpolicy,
1589 block_t start, block_t end)
63a94fa1
CY
1590{
1591 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
78997b56
CY
1592 struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
1593 &(dcc->fstrim_list) : &(dcc->wait_list);
63a94fa1 1594 struct discard_cmd *dc, *tmp;
6afae633 1595 bool need_wait;
0ea80512 1596 unsigned int trimmed = 0;
6afae633
CY
1597
1598next:
1599 need_wait = false;
63a94fa1
CY
1600
1601 mutex_lock(&dcc->cmd_lock);
1602 list_for_each_entry_safe(dc, tmp, wait_list, list) {
8412663d
CY
1603 if (dc->lstart + dc->len <= start || end <= dc->lstart)
1604 continue;
78997b56 1605 if (dc->len < dpolicy->granularity)
8412663d 1606 continue;
78997b56 1607 if (dc->state == D_DONE && !dc->ref) {
63a94fa1 1608 wait_for_completion_io(&dc->wait);
0ea80512
CY
1609 if (!dc->error)
1610 trimmed += dc->len;
63a94fa1 1611 __remove_discard_cmd(sbi, dc);
6afae633
CY
1612 } else {
1613 dc->ref++;
1614 need_wait = true;
1615 break;
63a94fa1
CY
1616 }
1617 }
1618 mutex_unlock(&dcc->cmd_lock);
6afae633
CY
1619
1620 if (need_wait) {
0ea80512 1621 trimmed += __wait_one_discard_bio(sbi, dc);
6afae633
CY
1622 goto next;
1623 }
0ea80512
CY
1624
1625 return trimmed;
63a94fa1
CY
1626}
1627
01f9cf6d 1628static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
78997b56 1629 struct discard_policy *dpolicy)
8412663d 1630{
9a997188 1631 struct discard_policy dp;
01f9cf6d 1632 unsigned int discard_blks;
9a997188 1633
01f9cf6d
CY
1634 if (dpolicy)
1635 return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
9a997188
JK
1636
1637 /* wait all */
8bb4f253 1638 __init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
01f9cf6d 1639 discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
8bb4f253 1640 __init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
01f9cf6d
CY
1641 discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
1642
1643 return discard_blks;
8412663d
CY
1644}
1645
004b6862 1646/* This should be covered by global mutex, &sit_i->sentry_lock */
94b1e10e 1647static void f2fs_wait_discard_bio(struct f2fs_sb_info *sbi, block_t blkaddr)
004b6862
CY
1648{
1649 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1650 struct discard_cmd *dc;
ec9895ad 1651 bool need_wait = false;
004b6862
CY
1652
1653 mutex_lock(&dcc->cmd_lock);
4d57b86d
CY
1654 dc = (struct discard_cmd *)f2fs_lookup_rb_tree(&dcc->root,
1655 NULL, blkaddr);
004b6862 1656 if (dc) {
ec9895ad
CY
1657 if (dc->state == D_PREP) {
1658 __punch_discard_cmd(sbi, dc, blkaddr);
1659 } else {
1660 dc->ref++;
1661 need_wait = true;
1662 }
275b66b0 1663 }
d431413f 1664 mutex_unlock(&dcc->cmd_lock);
ec9895ad 1665
2a510c00
CY
1666 if (need_wait)
1667 __wait_one_discard_bio(sbi, dc);
d431413f
CY
1668}
1669
4d57b86d 1670void f2fs_stop_discard_thread(struct f2fs_sb_info *sbi)
cce13252
CY
1671{
1672 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1673
1674 if (dcc && dcc->f2fs_issue_discard) {
1675 struct task_struct *discard_thread = dcc->f2fs_issue_discard;
1676
1677 dcc->f2fs_issue_discard = NULL;
1678 kthread_stop(discard_thread);
ec9895ad 1679 }
d431413f
CY
1680}
1681
8412663d 1682/* This comes from f2fs_put_super */
03f2c02d 1683bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi)
969d1b18
CY
1684{
1685 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
78997b56 1686 struct discard_policy dpolicy;
cf5c759f 1687 bool dropped;
969d1b18 1688
8bb4f253
JK
1689 __init_discard_policy(sbi, &dpolicy, DPOLICY_UMOUNT,
1690 dcc->discard_granularity);
78997b56 1691 __issue_discard_cmd(sbi, &dpolicy);
cf5c759f 1692 dropped = __drop_discard_cmd(sbi);
cf5c759f 1693
9a997188
JK
1694 /* just to make sure there is no pending discard commands */
1695 __wait_all_discard_cmd(sbi, NULL);
2482c432
CY
1696
1697 f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
cf5c759f 1698 return dropped;
969d1b18
CY
1699}
1700
15469963
JK
1701static int issue_discard_thread(void *data)
1702{
1703 struct f2fs_sb_info *sbi = data;
1704 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1705 wait_queue_head_t *q = &dcc->discard_wait_queue;
78997b56 1706 struct discard_policy dpolicy;
969d1b18
CY
1707 unsigned int wait_ms = DEF_MIN_DISCARD_ISSUE_TIME;
1708 int issued;
15469963 1709
1d7be270 1710 set_freezable();
15469963 1711
1d7be270 1712 do {
8bb4f253 1713 __init_discard_policy(sbi, &dpolicy, DPOLICY_BG,
78997b56
CY
1714 dcc->discard_granularity);
1715
969d1b18
CY
1716 wait_event_interruptible_timeout(*q,
1717 kthread_should_stop() || freezing(current) ||
1718 dcc->discard_wake,
1719 msecs_to_jiffies(wait_ms));
35a9a766
SY
1720
1721 if (dcc->discard_wake)
1722 dcc->discard_wake = 0;
1723
76c7bfb3
JK
1724 /* clean up pending candidates before going to sleep */
1725 if (atomic_read(&dcc->queued_discard))
1726 __wait_all_discard_cmd(sbi, NULL);
1727
1d7be270
JK
1728 if (try_to_freeze())
1729 continue;
3b60d802
CY
1730 if (f2fs_readonly(sbi->sb))
1731 continue;
1d7be270
JK
1732 if (kthread_should_stop())
1733 return 0;
d6184774
YH
1734 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1735 wait_ms = dpolicy.max_interval;
1736 continue;
1737 }
15469963 1738
5b0e9539 1739 if (sbi->gc_mode == GC_URGENT)
8bb4f253 1740 __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1);
969d1b18 1741
dc6febb6
CY
1742 sb_start_intwrite(sbi->sb);
1743
78997b56 1744 issued = __issue_discard_cmd(sbi, &dpolicy);
f9d1dced 1745 if (issued > 0) {
78997b56
CY
1746 __wait_all_discard_cmd(sbi, &dpolicy);
1747 wait_ms = dpolicy.min_interval;
f9d1dced 1748 } else if (issued == -1){
a7d10cf3
ST
1749 wait_ms = f2fs_time_to_wait(sbi, DISCARD_TIME);
1750 if (!wait_ms)
abde73c7 1751 wait_ms = dpolicy.mid_interval;
969d1b18 1752 } else {
78997b56 1753 wait_ms = dpolicy.max_interval;
969d1b18 1754 }
1d7be270 1755
dc6febb6 1756 sb_end_intwrite(sbi->sb);
1d7be270 1757
1d7be270
JK
1758 } while (!kthread_should_stop());
1759 return 0;
15469963
JK
1760}
1761
f46e8809 1762#ifdef CONFIG_BLK_DEV_ZONED
3c62be17
JK
1763static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1764 struct block_device *bdev, block_t blkstart, block_t blklen)
f46e8809 1765{
92592285 1766 sector_t sector, nr_sects;
10a875f8 1767 block_t lblkstart = blkstart;
3c62be17
JK
1768 int devi = 0;
1769
0916878d 1770 if (f2fs_is_multi_device(sbi)) {
3c62be17 1771 devi = f2fs_target_device_index(sbi, blkstart);
95175daf
DLM
1772 if (blkstart < FDEV(devi).start_blk ||
1773 blkstart > FDEV(devi).end_blk) {
dcbb4c10 1774 f2fs_err(sbi, "Invalid block %x", blkstart);
95175daf
DLM
1775 return -EIO;
1776 }
3c62be17
JK
1777 blkstart -= FDEV(devi).start_blk;
1778 }
f46e8809 1779
95175daf
DLM
1780 /* For sequential zones, reset the zone write pointer */
1781 if (f2fs_blkz_is_seq(sbi, devi, blkstart)) {
92592285
JK
1782 sector = SECTOR_FROM_BLOCK(blkstart);
1783 nr_sects = SECTOR_FROM_BLOCK(blklen);
1784
1785 if (sector & (bdev_zone_sectors(bdev) - 1) ||
1786 nr_sects != bdev_zone_sectors(bdev)) {
dcbb4c10
JP
1787 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1788 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1789 blkstart, blklen);
92592285
JK
1790 return -EIO;
1791 }
d50aaeec 1792 trace_f2fs_issue_reset_zone(bdev, blkstart);
6c1b1da5
AJ
1793 return blkdev_zone_mgmt(bdev, REQ_OP_ZONE_RESET,
1794 sector, nr_sects, GFP_NOFS);
f46e8809 1795 }
95175daf
DLM
1796
1797 /* For conventional zones, use regular discard if supported */
95175daf 1798 return __queue_discard_cmd(sbi, bdev, lblkstart, blklen);
f46e8809
DLM
1799}
1800#endif
1801
3c62be17
JK
1802static int __issue_discard_async(struct f2fs_sb_info *sbi,
1803 struct block_device *bdev, block_t blkstart, block_t blklen)
1804{
1805#ifdef CONFIG_BLK_DEV_ZONED
7f3d7719 1806 if (f2fs_sb_has_blkzoned(sbi) && bdev_is_zoned(bdev))
3c62be17
JK
1807 return __f2fs_issue_discard_zone(sbi, bdev, blkstart, blklen);
1808#endif
c81abe34 1809 return __queue_discard_cmd(sbi, bdev, blkstart, blklen);
3c62be17
JK
1810}
1811
1e87a78d 1812static int f2fs_issue_discard(struct f2fs_sb_info *sbi,
37208879
JK
1813 block_t blkstart, block_t blklen)
1814{
3c62be17
JK
1815 sector_t start = blkstart, len = 0;
1816 struct block_device *bdev;
a66cdd98
JK
1817 struct seg_entry *se;
1818 unsigned int offset;
1819 block_t i;
3c62be17
JK
1820 int err = 0;
1821
1822 bdev = f2fs_target_device(sbi, blkstart, NULL);
1823
1824 for (i = blkstart; i < blkstart + blklen; i++, len++) {
1825 if (i != start) {
1826 struct block_device *bdev2 =
1827 f2fs_target_device(sbi, i, NULL);
1828
1829 if (bdev2 != bdev) {
1830 err = __issue_discard_async(sbi, bdev,
1831 start, len);
1832 if (err)
1833 return err;
1834 bdev = bdev2;
1835 start = i;
1836 len = 0;
1837 }
1838 }
a66cdd98 1839
a66cdd98
JK
1840 se = get_seg_entry(sbi, GET_SEGNO(sbi, i));
1841 offset = GET_BLKOFF_FROM_SEG0(sbi, i);
1842
1843 if (!f2fs_test_and_set_bit(offset, se->discard_map))
1844 sbi->discard_blks--;
1845 }
f46e8809 1846
3c62be17
JK
1847 if (len)
1848 err = __issue_discard_async(sbi, bdev, start, len);
1849 return err;
1e87a78d
JK
1850}
1851
25290fa5
JK
1852static bool add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc,
1853 bool check_only)
adf4983b 1854{
b2955550
JK
1855 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
1856 int max_blocks = sbi->blocks_per_seg;
4b2fecc8 1857 struct seg_entry *se = get_seg_entry(sbi, cpc->trim_start);
b2955550
JK
1858 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
1859 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
a66cdd98 1860 unsigned long *discard_map = (unsigned long *)se->discard_map;
60a3b782 1861 unsigned long *dmap = SIT_I(sbi)->tmp_map;
b2955550 1862 unsigned int start = 0, end = -1;
c473f1a9 1863 bool force = (cpc->reason & CP_DISCARD);
a7eeb823 1864 struct discard_entry *de = NULL;
46f84c2c 1865 struct list_head *head = &SM_I(sbi)->dcc_info->entry_list;
b2955550
JK
1866 int i;
1867
7d20c8ab 1868 if (se->valid_blocks == max_blocks || !f2fs_hw_support_discard(sbi))
25290fa5 1869 return false;
b2955550 1870
a66cdd98 1871 if (!force) {
7d20c8ab 1872 if (!f2fs_realtime_discard_enable(sbi) || !se->valid_blocks ||
0b54fb84
JK
1873 SM_I(sbi)->dcc_info->nr_discards >=
1874 SM_I(sbi)->dcc_info->max_discards)
25290fa5 1875 return false;
4b2fecc8
JK
1876 }
1877
b2955550
JK
1878 /* SIT_VBLOCK_MAP_SIZE should be multiple of sizeof(unsigned long) */
1879 for (i = 0; i < entries; i++)
a66cdd98 1880 dmap[i] = force ? ~ckpt_map[i] & ~discard_map[i] :
d7bc2484 1881 (cur_map[i] ^ ckpt_map[i]) & ckpt_map[i];
b2955550 1882
0b54fb84
JK
1883 while (force || SM_I(sbi)->dcc_info->nr_discards <=
1884 SM_I(sbi)->dcc_info->max_discards) {
b2955550
JK
1885 start = __find_rev_next_bit(dmap, max_blocks, end + 1);
1886 if (start >= max_blocks)
1887 break;
1888
1889 end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
c7b41e16
YH
1890 if (force && start && end != max_blocks
1891 && (end - start) < cpc->trim_minlen)
1892 continue;
1893
25290fa5
JK
1894 if (check_only)
1895 return true;
1896
a7eeb823
CY
1897 if (!de) {
1898 de = f2fs_kmem_cache_alloc(discard_entry_slab,
1899 GFP_F2FS_ZERO);
1900 de->start_blkaddr = START_BLOCK(sbi, cpc->trim_start);
1901 list_add_tail(&de->list, head);
1902 }
1903
1904 for (i = start; i < end; i++)
1905 __set_bit_le(i, (void *)de->discard_map);
1906
1907 SM_I(sbi)->dcc_info->nr_discards += end - start;
b2955550 1908 }
25290fa5 1909 return false;
b2955550
JK
1910}
1911
af8ff65b
CY
1912static void release_discard_addr(struct discard_entry *entry)
1913{
1914 list_del(&entry->list);
1915 kmem_cache_free(discard_entry_slab, entry);
1916}
1917
4d57b86d 1918void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi)
4b2fecc8 1919{
46f84c2c 1920 struct list_head *head = &(SM_I(sbi)->dcc_info->entry_list);
4b2fecc8
JK
1921 struct discard_entry *entry, *this;
1922
1923 /* drop caches */
af8ff65b
CY
1924 list_for_each_entry_safe(entry, this, head, list)
1925 release_discard_addr(entry);
4b2fecc8
JK
1926}
1927
0a8165d7 1928/*
4d57b86d 1929 * Should call f2fs_clear_prefree_segments after checkpoint is done.
351df4b2
JK
1930 */
1931static void set_prefree_as_free_segments(struct f2fs_sb_info *sbi)
1932{
1933 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
b65ee148 1934 unsigned int segno;
351df4b2
JK
1935
1936 mutex_lock(&dirty_i->seglist_lock);
7cd8558b 1937 for_each_set_bit(segno, dirty_i->dirty_segmap[PRE], MAIN_SEGS(sbi))
351df4b2 1938 __set_test_and_free(sbi, segno);
351df4b2
JK
1939 mutex_unlock(&dirty_i->seglist_lock);
1940}
1941
4d57b86d
CY
1942void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
1943 struct cp_control *cpc)
351df4b2 1944{
969d1b18
CY
1945 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
1946 struct list_head *head = &dcc->entry_list;
2d7b822a 1947 struct discard_entry *entry, *this;
351df4b2 1948 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
29e59c14 1949 unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
29e59c14 1950 unsigned int start = 0, end = -1;
36abef4e 1951 unsigned int secno, start_segno;
c473f1a9 1952 bool force = (cpc->reason & CP_DISCARD);
b0332a0f 1953 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
351df4b2
JK
1954
1955 mutex_lock(&dirty_i->seglist_lock);
29e59c14 1956
351df4b2 1957 while (1) {
29e59c14 1958 int i;
ad6672bb
YS
1959
1960 if (need_align && end != -1)
1961 end--;
7cd8558b
JK
1962 start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
1963 if (start >= MAIN_SEGS(sbi))
351df4b2 1964 break;
7cd8558b
JK
1965 end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
1966 start + 1);
29e59c14 1967
ad6672bb
YS
1968 if (need_align) {
1969 start = rounddown(start, sbi->segs_per_sec);
1970 end = roundup(end, sbi->segs_per_sec);
1971 }
29e59c14 1972
ad6672bb
YS
1973 for (i = start; i < end; i++) {
1974 if (test_and_clear_bit(i, prefree_map))
1975 dirty_i->nr_dirty[PRE]--;
1976 }
29e59c14 1977
7d20c8ab 1978 if (!f2fs_realtime_discard_enable(sbi))
29e59c14 1979 continue;
351df4b2 1980
650d3c4e
YH
1981 if (force && start >= cpc->trim_start &&
1982 (end - 1) <= cpc->trim_end)
1983 continue;
1984
b0332a0f 1985 if (!f2fs_lfs_mode(sbi) || !__is_large_section(sbi)) {
36abef4e 1986 f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
37208879 1987 (end - start) << sbi->log_blocks_per_seg);
36abef4e
JK
1988 continue;
1989 }
1990next:
4ddb1a4d
JK
1991 secno = GET_SEC_FROM_SEG(sbi, start);
1992 start_segno = GET_SEG_FROM_SEC(sbi, secno);
36abef4e 1993 if (!IS_CURSEC(sbi, secno) &&
302bd348 1994 !get_valid_blocks(sbi, start, true))
36abef4e
JK
1995 f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
1996 sbi->segs_per_sec << sbi->log_blocks_per_seg);
1997
1998 start = start_segno + sbi->segs_per_sec;
1999 if (start < end)
2000 goto next;
8b107f5b
JK
2001 else
2002 end = start - 1;
351df4b2
JK
2003 }
2004 mutex_unlock(&dirty_i->seglist_lock);
b2955550
JK
2005
2006 /* send small discards */
2d7b822a 2007 list_for_each_entry_safe(entry, this, head, list) {
a7eeb823
CY
2008 unsigned int cur_pos = 0, next_pos, len, total_len = 0;
2009 bool is_valid = test_bit_le(0, entry->discard_map);
2010
2011find_next:
2012 if (is_valid) {
2013 next_pos = find_next_zero_bit_le(entry->discard_map,
2014 sbi->blocks_per_seg, cur_pos);
2015 len = next_pos - cur_pos;
2016
7beb01f7 2017 if (f2fs_sb_has_blkzoned(sbi) ||
acfd2810 2018 (force && len < cpc->trim_minlen))
a7eeb823
CY
2019 goto skip;
2020
2021 f2fs_issue_discard(sbi, entry->start_blkaddr + cur_pos,
2022 len);
a7eeb823
CY
2023 total_len += len;
2024 } else {
2025 next_pos = find_next_bit_le(entry->discard_map,
2026 sbi->blocks_per_seg, cur_pos);
2027 }
836b5a63 2028skip:
a7eeb823
CY
2029 cur_pos = next_pos;
2030 is_valid = !is_valid;
2031
2032 if (cur_pos < sbi->blocks_per_seg)
2033 goto find_next;
2034
af8ff65b 2035 release_discard_addr(entry);
969d1b18 2036 dcc->nr_discards -= total_len;
b2955550 2037 }
34e159da 2038
01983c71 2039 wake_up_discard_thread(sbi, false);
351df4b2
JK
2040}
2041
8ed59745 2042static int create_discard_cmd_control(struct f2fs_sb_info *sbi)
0b54fb84 2043{
15469963 2044 dev_t dev = sbi->sb->s_bdev->bd_dev;
0b54fb84 2045 struct discard_cmd_control *dcc;
ba48a33e 2046 int err = 0, i;
0b54fb84
JK
2047
2048 if (SM_I(sbi)->dcc_info) {
2049 dcc = SM_I(sbi)->dcc_info;
2050 goto init_thread;
2051 }
2052
acbf054d 2053 dcc = f2fs_kzalloc(sbi, sizeof(struct discard_cmd_control), GFP_KERNEL);
0b54fb84
JK
2054 if (!dcc)
2055 return -ENOMEM;
2056
969d1b18 2057 dcc->discard_granularity = DEFAULT_DISCARD_GRANULARITY;
46f84c2c 2058 INIT_LIST_HEAD(&dcc->entry_list);
78997b56 2059 for (i = 0; i < MAX_PLIST_NUM; i++)
ba48a33e 2060 INIT_LIST_HEAD(&dcc->pend_list[i]);
46f84c2c 2061 INIT_LIST_HEAD(&dcc->wait_list);
8412663d 2062 INIT_LIST_HEAD(&dcc->fstrim_list);
15469963 2063 mutex_init(&dcc->cmd_lock);
8b8dd65f 2064 atomic_set(&dcc->issued_discard, 0);
72691af6 2065 atomic_set(&dcc->queued_discard, 0);
5f32366a 2066 atomic_set(&dcc->discard_cmd_cnt, 0);
0b54fb84 2067 dcc->nr_discards = 0;
d618ebaf 2068 dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
d84d1cbd 2069 dcc->undiscard_blks = 0;
20ee4382 2070 dcc->next_pos = 0;
4dada3fd 2071 dcc->root = RB_ROOT_CACHED;
67fce70b 2072 dcc->rbtree_check = false;
0b54fb84 2073
15469963 2074 init_waitqueue_head(&dcc->discard_wait_queue);
0b54fb84
JK
2075 SM_I(sbi)->dcc_info = dcc;
2076init_thread:
15469963
JK
2077 dcc->f2fs_issue_discard = kthread_run(issue_discard_thread, sbi,
2078 "f2fs_discard-%u:%u", MAJOR(dev), MINOR(dev));
2079 if (IS_ERR(dcc->f2fs_issue_discard)) {
2080 err = PTR_ERR(dcc->f2fs_issue_discard);
5222595d 2081 kvfree(dcc);
15469963
JK
2082 SM_I(sbi)->dcc_info = NULL;
2083 return err;
2084 }
2085
0b54fb84
JK
2086 return err;
2087}
2088
f099405f 2089static void destroy_discard_cmd_control(struct f2fs_sb_info *sbi)
0b54fb84
JK
2090{
2091 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2092
f099405f
CY
2093 if (!dcc)
2094 return;
2095
4d57b86d 2096 f2fs_stop_discard_thread(sbi);
f099405f 2097
04f9287a
CY
2098 /*
2099 * Recovery can cache discard commands, so in error path of
2100 * fill_super(), it needs to give a chance to handle them.
2101 */
2102 if (unlikely(atomic_read(&dcc->discard_cmd_cnt)))
2103 f2fs_issue_discard_timeout(sbi);
2104
5222595d 2105 kvfree(dcc);
f099405f 2106 SM_I(sbi)->dcc_info = NULL;
0b54fb84
JK
2107}
2108
184a5cd2 2109static bool __mark_sit_entry_dirty(struct f2fs_sb_info *sbi, unsigned int segno)
351df4b2
JK
2110{
2111 struct sit_info *sit_i = SIT_I(sbi);
184a5cd2
CY
2112
2113 if (!__test_and_set_bit(segno, sit_i->dirty_sentries_bitmap)) {
351df4b2 2114 sit_i->dirty_sentries++;
184a5cd2
CY
2115 return false;
2116 }
2117
2118 return true;
351df4b2
JK
2119}
2120
2121static void __set_sit_entry_type(struct f2fs_sb_info *sbi, int type,
2122 unsigned int segno, int modified)
2123{
2124 struct seg_entry *se = get_seg_entry(sbi, segno);
2125 se->type = type;
2126 if (modified)
2127 __mark_sit_entry_dirty(sbi, segno);
2128}
2129
2130static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2131{
2132 struct seg_entry *se;
2133 unsigned int segno, offset;
2134 long int new_vblocks;
6415fedc
YS
2135 bool exist;
2136#ifdef CONFIG_F2FS_CHECK_FS
2137 bool mir_exist;
2138#endif
351df4b2
JK
2139
2140 segno = GET_SEGNO(sbi, blkaddr);
2141
2142 se = get_seg_entry(sbi, segno);
2143 new_vblocks = se->valid_blocks + del;
491c0854 2144 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
351df4b2 2145
9850cf4a 2146 f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) ||
351df4b2
JK
2147 (new_vblocks > sbi->blocks_per_seg)));
2148
2149 se->valid_blocks = new_vblocks;
a1f72ac2
CY
2150 se->mtime = get_mtime(sbi, false);
2151 if (se->mtime > SIT_I(sbi)->max_mtime)
2152 SIT_I(sbi)->max_mtime = se->mtime;
351df4b2
JK
2153
2154 /* Update valid block bitmap */
2155 if (del > 0) {
6415fedc 2156 exist = f2fs_test_and_set_bit(offset, se->cur_valid_map);
355e7891 2157#ifdef CONFIG_F2FS_CHECK_FS
6415fedc
YS
2158 mir_exist = f2fs_test_and_set_bit(offset,
2159 se->cur_valid_map_mir);
2160 if (unlikely(exist != mir_exist)) {
dcbb4c10
JP
2161 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2162 blkaddr, exist);
05796763 2163 f2fs_bug_on(sbi, 1);
6415fedc 2164 }
355e7891 2165#endif
6415fedc 2166 if (unlikely(exist)) {
dcbb4c10
JP
2167 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2168 blkaddr);
6415fedc 2169 f2fs_bug_on(sbi, 1);
35ee82ca
YS
2170 se->valid_blocks--;
2171 del = 0;
355e7891 2172 }
6415fedc 2173
7d20c8ab 2174 if (!f2fs_test_and_set_bit(offset, se->discard_map))
a66cdd98 2175 sbi->discard_blks--;
720037f9 2176
899fee36
CY
2177 /*
2178 * SSR should never reuse block which is checkpointed
2179 * or newly invalidated.
2180 */
2181 if (!is_sbi_flag_set(sbi, SBI_CP_DISABLED)) {
720037f9
JK
2182 if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map))
2183 se->ckpt_valid_blocks++;
2184 }
351df4b2 2185 } else {
6415fedc 2186 exist = f2fs_test_and_clear_bit(offset, se->cur_valid_map);
355e7891 2187#ifdef CONFIG_F2FS_CHECK_FS
6415fedc
YS
2188 mir_exist = f2fs_test_and_clear_bit(offset,
2189 se->cur_valid_map_mir);
2190 if (unlikely(exist != mir_exist)) {
dcbb4c10
JP
2191 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2192 blkaddr, exist);
05796763 2193 f2fs_bug_on(sbi, 1);
6415fedc 2194 }
355e7891 2195#endif
6415fedc 2196 if (unlikely(!exist)) {
dcbb4c10
JP
2197 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2198 blkaddr);
6415fedc 2199 f2fs_bug_on(sbi, 1);
35ee82ca
YS
2200 se->valid_blocks++;
2201 del = 0;
4354994f
DR
2202 } else if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2203 /*
2204 * If checkpoints are off, we must not reuse data that
2205 * was used in the previous checkpoint. If it was used
2206 * before, we must track that to know how much space we
2207 * really have.
2208 */
c9c8ed50
CY
2209 if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
2210 spin_lock(&sbi->stat_lock);
4354994f 2211 sbi->unusable_block_count++;
c9c8ed50
CY
2212 spin_unlock(&sbi->stat_lock);
2213 }
355e7891 2214 }
6415fedc 2215
7d20c8ab 2216 if (f2fs_test_and_clear_bit(offset, se->discard_map))
a66cdd98 2217 sbi->discard_blks++;
351df4b2
JK
2218 }
2219 if (!f2fs_test_bit(offset, se->ckpt_valid_map))
2220 se->ckpt_valid_blocks += del;
2221
2222 __mark_sit_entry_dirty(sbi, segno);
2223
2224 /* update total number of valid blocks to be written in ckpt area */
2225 SIT_I(sbi)->written_valid_blocks += del;
2226
2c70c5e3 2227 if (__is_large_section(sbi))
351df4b2
JK
2228 get_sec_entry(sbi, segno)->valid_blocks += del;
2229}
2230
4d57b86d 2231void f2fs_invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr)
351df4b2
JK
2232{
2233 unsigned int segno = GET_SEGNO(sbi, addr);
2234 struct sit_info *sit_i = SIT_I(sbi);
2235
9850cf4a 2236 f2fs_bug_on(sbi, addr == NULL_ADDR);
4c8ff709 2237 if (addr == NEW_ADDR || addr == COMPRESS_ADDR)
351df4b2
JK
2238 return;
2239
6aa58d8a
CY
2240 invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
2241
351df4b2 2242 /* add it into sit main buffer */
3d26fa6b 2243 down_write(&sit_i->sentry_lock);
351df4b2
JK
2244
2245 update_sit_entry(sbi, addr, -1);
2246
2247 /* add it into dirty seglist */
2248 locate_dirty_segment(sbi, segno);
2249
3d26fa6b 2250 up_write(&sit_i->sentry_lock);
351df4b2
JK
2251}
2252
4d57b86d 2253bool f2fs_is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr)
6e2c64ad
JK
2254{
2255 struct sit_info *sit_i = SIT_I(sbi);
2256 unsigned int segno, offset;
2257 struct seg_entry *se;
2258 bool is_cp = false;
2259
93770ab7 2260 if (!__is_valid_data_blkaddr(blkaddr))
6e2c64ad
JK
2261 return true;
2262
3d26fa6b 2263 down_read(&sit_i->sentry_lock);
6e2c64ad
JK
2264
2265 segno = GET_SEGNO(sbi, blkaddr);
2266 se = get_seg_entry(sbi, segno);
2267 offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
2268
2269 if (f2fs_test_bit(offset, se->ckpt_valid_map))
2270 is_cp = true;
2271
3d26fa6b 2272 up_read(&sit_i->sentry_lock);
6e2c64ad
JK
2273
2274 return is_cp;
2275}
2276
0a8165d7 2277/*
351df4b2
JK
2278 * This function should be resided under the curseg_mutex lock
2279 */
2280static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
e79efe3b 2281 struct f2fs_summary *sum)
351df4b2
JK
2282{
2283 struct curseg_info *curseg = CURSEG_I(sbi, type);
2284 void *addr = curseg->sum_blk;
e79efe3b 2285 addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
351df4b2 2286 memcpy(addr, sum, sizeof(struct f2fs_summary));
351df4b2
JK
2287}
2288
0a8165d7 2289/*
351df4b2
JK
2290 * Calculate the number of current summary pages for writing
2291 */
4d57b86d 2292int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra)
351df4b2 2293{
351df4b2 2294 int valid_sum_count = 0;
9a47938b 2295 int i, sum_in_page;
351df4b2
JK
2296
2297 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
2298 if (sbi->ckpt->alloc_type[i] == SSR)
2299 valid_sum_count += sbi->blocks_per_seg;
3fa06d7b
CY
2300 else {
2301 if (for_ra)
2302 valid_sum_count += le16_to_cpu(
2303 F2FS_CKPT(sbi)->cur_data_blkoff[i]);
2304 else
2305 valid_sum_count += curseg_blkoff(sbi, i);
2306 }
351df4b2
JK
2307 }
2308
09cbfeaf 2309 sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE -
9a47938b
FL
2310 SUM_FOOTER_SIZE) / SUMMARY_SIZE;
2311 if (valid_sum_count <= sum_in_page)
351df4b2 2312 return 1;
9a47938b 2313 else if ((valid_sum_count - sum_in_page) <=
09cbfeaf 2314 (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE)
351df4b2
JK
2315 return 2;
2316 return 3;
2317}
2318
0a8165d7 2319/*
351df4b2
JK
2320 * Caller should put this summary page
2321 */
4d57b86d 2322struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
351df4b2 2323{
7735730d 2324 return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno));
351df4b2
JK
2325}
2326
4d57b86d
CY
2327void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
2328 void *src, block_t blk_addr)
351df4b2 2329{
4d57b86d 2330 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
381722d2 2331
0537b811 2332 memcpy(page_address(page), src, PAGE_SIZE);
351df4b2
JK
2333 set_page_dirty(page);
2334 f2fs_put_page(page, 1);
2335}
2336
381722d2
CY
2337static void write_sum_page(struct f2fs_sb_info *sbi,
2338 struct f2fs_summary_block *sum_blk, block_t blk_addr)
2339{
4d57b86d 2340 f2fs_update_meta_page(sbi, (void *)sum_blk, blk_addr);
381722d2
CY
2341}
2342
b7ad7512
CY
2343static void write_current_sum_page(struct f2fs_sb_info *sbi,
2344 int type, block_t blk_addr)
2345{
2346 struct curseg_info *curseg = CURSEG_I(sbi, type);
4d57b86d 2347 struct page *page = f2fs_grab_meta_page(sbi, blk_addr);
b7ad7512
CY
2348 struct f2fs_summary_block *src = curseg->sum_blk;
2349 struct f2fs_summary_block *dst;
2350
2351 dst = (struct f2fs_summary_block *)page_address(page);
81114baa 2352 memset(dst, 0, PAGE_SIZE);
b7ad7512
CY
2353
2354 mutex_lock(&curseg->curseg_mutex);
2355
2356 down_read(&curseg->journal_rwsem);
2357 memcpy(&dst->journal, curseg->journal, SUM_JOURNAL_SIZE);
2358 up_read(&curseg->journal_rwsem);
2359
2360 memcpy(dst->entries, src->entries, SUM_ENTRY_SIZE);
2361 memcpy(&dst->footer, &src->footer, SUM_FOOTER_SIZE);
2362
2363 mutex_unlock(&curseg->curseg_mutex);
2364
2365 set_page_dirty(page);
2366 f2fs_put_page(page, 1);
2367}
2368
a7881893
JK
2369static int is_next_segment_free(struct f2fs_sb_info *sbi, int type)
2370{
2371 struct curseg_info *curseg = CURSEG_I(sbi, type);
2372 unsigned int segno = curseg->segno + 1;
2373 struct free_segmap_info *free_i = FREE_I(sbi);
2374
2375 if (segno < MAIN_SEGS(sbi) && segno % sbi->segs_per_sec)
2376 return !test_bit(segno, free_i->free_segmap);
2377 return 0;
2378}
2379
0a8165d7 2380/*
351df4b2
JK
2381 * Find a new segment from the free segments bitmap to right order
2382 * This function should be returned with success, otherwise BUG
2383 */
2384static void get_new_segment(struct f2fs_sb_info *sbi,
2385 unsigned int *newseg, bool new_sec, int dir)
2386{
2387 struct free_segmap_info *free_i = FREE_I(sbi);
351df4b2 2388 unsigned int segno, secno, zoneno;
7cd8558b 2389 unsigned int total_zones = MAIN_SECS(sbi) / sbi->secs_per_zone;
4ddb1a4d
JK
2390 unsigned int hint = GET_SEC_FROM_SEG(sbi, *newseg);
2391 unsigned int old_zoneno = GET_ZONE_FROM_SEG(sbi, *newseg);
351df4b2
JK
2392 unsigned int left_start = hint;
2393 bool init = true;
2394 int go_left = 0;
2395 int i;
2396
1a118ccf 2397 spin_lock(&free_i->segmap_lock);
351df4b2
JK
2398
2399 if (!new_sec && ((*newseg + 1) % sbi->segs_per_sec)) {
2400 segno = find_next_zero_bit(free_i->free_segmap,
4ddb1a4d
JK
2401 GET_SEG_FROM_SEC(sbi, hint + 1), *newseg + 1);
2402 if (segno < GET_SEG_FROM_SEC(sbi, hint + 1))
351df4b2
JK
2403 goto got_it;
2404 }
2405find_other_zone:
7cd8558b
JK
2406 secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
2407 if (secno >= MAIN_SECS(sbi)) {
351df4b2
JK
2408 if (dir == ALLOC_RIGHT) {
2409 secno = find_next_zero_bit(free_i->free_secmap,
7cd8558b
JK
2410 MAIN_SECS(sbi), 0);
2411 f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
351df4b2
JK
2412 } else {
2413 go_left = 1;
2414 left_start = hint - 1;
2415 }
2416 }
2417 if (go_left == 0)
2418 goto skip_left;
2419
2420 while (test_bit(left_start, free_i->free_secmap)) {
2421 if (left_start > 0) {
2422 left_start--;
2423 continue;
2424 }
2425 left_start = find_next_zero_bit(free_i->free_secmap,
7cd8558b
JK
2426 MAIN_SECS(sbi), 0);
2427 f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
351df4b2
JK
2428 break;
2429 }
2430 secno = left_start;
2431skip_left:
4ddb1a4d
JK
2432 segno = GET_SEG_FROM_SEC(sbi, secno);
2433 zoneno = GET_ZONE_FROM_SEC(sbi, secno);
351df4b2
JK
2434
2435 /* give up on finding another zone */
2436 if (!init)
2437 goto got_it;
2438 if (sbi->secs_per_zone == 1)
2439 goto got_it;
2440 if (zoneno == old_zoneno)
2441 goto got_it;
2442 if (dir == ALLOC_LEFT) {
2443 if (!go_left && zoneno + 1 >= total_zones)
2444 goto got_it;
2445 if (go_left && zoneno == 0)
2446 goto got_it;
2447 }
2448 for (i = 0; i < NR_CURSEG_TYPE; i++)
2449 if (CURSEG_I(sbi, i)->zone == zoneno)
2450 break;
2451
2452 if (i < NR_CURSEG_TYPE) {
2453 /* zone is in user, try another */
2454 if (go_left)
2455 hint = zoneno * sbi->secs_per_zone - 1;
2456 else if (zoneno + 1 >= total_zones)
2457 hint = 0;
2458 else
2459 hint = (zoneno + 1) * sbi->secs_per_zone;
2460 init = false;
2461 goto find_other_zone;
2462 }
2463got_it:
2464 /* set it as dirty segment in free segmap */
9850cf4a 2465 f2fs_bug_on(sbi, test_bit(segno, free_i->free_segmap));
351df4b2
JK
2466 __set_inuse(sbi, segno);
2467 *newseg = segno;
1a118ccf 2468 spin_unlock(&free_i->segmap_lock);
351df4b2
JK
2469}
2470
2471static void reset_curseg(struct f2fs_sb_info *sbi, int type, int modified)
2472{
2473 struct curseg_info *curseg = CURSEG_I(sbi, type);
2474 struct summary_footer *sum_footer;
2475
2476 curseg->segno = curseg->next_segno;
4ddb1a4d 2477 curseg->zone = GET_ZONE_FROM_SEG(sbi, curseg->segno);
351df4b2
JK
2478 curseg->next_blkoff = 0;
2479 curseg->next_segno = NULL_SEGNO;
2480
2481 sum_footer = &(curseg->sum_blk->footer);
2482 memset(sum_footer, 0, sizeof(struct summary_footer));
2483 if (IS_DATASEG(type))
2484 SET_SUM_TYPE(sum_footer, SUM_TYPE_DATA);
2485 if (IS_NODESEG(type))
2486 SET_SUM_TYPE(sum_footer, SUM_TYPE_NODE);
2487 __set_sit_entry_type(sbi, type, curseg->segno, modified);
2488}
2489
7a20b8a6
JK
2490static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
2491{
a7881893 2492 /* if segs_per_sec is large than 1, we need to keep original policy. */
2c70c5e3 2493 if (__is_large_section(sbi))
a7881893
JK
2494 return CURSEG_I(sbi, type)->segno;
2495
4354994f
DR
2496 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2497 return 0;
2498
b94929d9
YS
2499 if (test_opt(sbi, NOHEAP) &&
2500 (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
7a20b8a6
JK
2501 return 0;
2502
e066b83c
JK
2503 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
2504 return SIT_I(sbi)->last_victim[ALLOC_NEXT];
07939627
JK
2505
2506 /* find segments from 0 to reuse freed segments */
63189b78 2507 if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE)
07939627
JK
2508 return 0;
2509
7a20b8a6
JK
2510 return CURSEG_I(sbi, type)->segno;
2511}
2512
0a8165d7 2513/*
351df4b2
JK
2514 * Allocate a current working segment.
2515 * This function always allocates a free segment in LFS manner.
2516 */
2517static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
2518{
2519 struct curseg_info *curseg = CURSEG_I(sbi, type);
2520 unsigned int segno = curseg->segno;
2521 int dir = ALLOC_LEFT;
2522
2523 write_sum_page(sbi, curseg->sum_blk,
81fb5e87 2524 GET_SUM_BLOCK(sbi, segno));
351df4b2
JK
2525 if (type == CURSEG_WARM_DATA || type == CURSEG_COLD_DATA)
2526 dir = ALLOC_RIGHT;
2527
2528 if (test_opt(sbi, NOHEAP))
2529 dir = ALLOC_RIGHT;
2530
7a20b8a6 2531 segno = __get_next_segno(sbi, type);
351df4b2
JK
2532 get_new_segment(sbi, &segno, new_sec, dir);
2533 curseg->next_segno = segno;
2534 reset_curseg(sbi, type, 1);
2535 curseg->alloc_type = LFS;
2536}
2537
2538static void __next_free_blkoff(struct f2fs_sb_info *sbi,
2539 struct curseg_info *seg, block_t start)
2540{
2541 struct seg_entry *se = get_seg_entry(sbi, seg->segno);
e81c93cf 2542 int entries = SIT_VBLOCK_MAP_SIZE / sizeof(unsigned long);
60a3b782 2543 unsigned long *target_map = SIT_I(sbi)->tmp_map;
e81c93cf
CL
2544 unsigned long *ckpt_map = (unsigned long *)se->ckpt_valid_map;
2545 unsigned long *cur_map = (unsigned long *)se->cur_valid_map;
2546 int i, pos;
2547
2548 for (i = 0; i < entries; i++)
2549 target_map[i] = ckpt_map[i] | cur_map[i];
2550
2551 pos = __find_rev_next_zero_bit(target_map, sbi->blocks_per_seg, start);
2552
2553 seg->next_blkoff = pos;
351df4b2
JK
2554}
2555
0a8165d7 2556/*
351df4b2
JK
2557 * If a segment is written by LFS manner, next block offset is just obtained
2558 * by increasing the current block offset. However, if a segment is written by
2559 * SSR manner, next block offset obtained by calling __next_free_blkoff
2560 */
2561static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
2562 struct curseg_info *seg)
2563{
2564 if (seg->alloc_type == SSR)
2565 __next_free_blkoff(sbi, seg, seg->next_blkoff + 1);
2566 else
2567 seg->next_blkoff++;
2568}
2569
0a8165d7 2570/*
e1c42045 2571 * This function always allocates a used segment(from dirty seglist) by SSR
351df4b2
JK
2572 * manner, so it should recover the existing segment information of valid blocks
2573 */
025d63a4 2574static void change_curseg(struct f2fs_sb_info *sbi, int type)
351df4b2
JK
2575{
2576 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
2577 struct curseg_info *curseg = CURSEG_I(sbi, type);
2578 unsigned int new_segno = curseg->next_segno;
2579 struct f2fs_summary_block *sum_node;
2580 struct page *sum_page;
2581
2582 write_sum_page(sbi, curseg->sum_blk,
2583 GET_SUM_BLOCK(sbi, curseg->segno));
2584 __set_test_and_inuse(sbi, new_segno);
2585
2586 mutex_lock(&dirty_i->seglist_lock);
2587 __remove_dirty_segment(sbi, new_segno, PRE);
2588 __remove_dirty_segment(sbi, new_segno, DIRTY);
2589 mutex_unlock(&dirty_i->seglist_lock);
2590
2591 reset_curseg(sbi, type, 1);
2592 curseg->alloc_type = SSR;
2593 __next_free_blkoff(sbi, curseg, 0);
2594
4d57b86d 2595 sum_page = f2fs_get_sum_page(sbi, new_segno);
edc55aaf 2596 f2fs_bug_on(sbi, IS_ERR(sum_page));
025d63a4
CY
2597 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
2598 memcpy(curseg->sum_blk, sum_node, SUM_ENTRY_SIZE);
2599 f2fs_put_page(sum_page, 1);
351df4b2
JK
2600}
2601
43727527
JK
2602static int get_ssr_segment(struct f2fs_sb_info *sbi, int type)
2603{
2604 struct curseg_info *curseg = CURSEG_I(sbi, type);
2605 const struct victim_selection *v_ops = DIRTY_I(sbi)->v_ops;
e066b83c 2606 unsigned segno = NULL_SEGNO;
d27c3d89
CY
2607 int i, cnt;
2608 bool reversed = false;
c192f7a4 2609
4d57b86d 2610 /* f2fs_need_SSR() already forces to do this */
e066b83c
JK
2611 if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) {
2612 curseg->next_segno = segno;
c192f7a4 2613 return 1;
e066b83c 2614 }
43727527 2615
70d625cb
JK
2616 /* For node segments, let's do SSR more intensively */
2617 if (IS_NODESEG(type)) {
d27c3d89
CY
2618 if (type >= CURSEG_WARM_NODE) {
2619 reversed = true;
2620 i = CURSEG_COLD_NODE;
2621 } else {
2622 i = CURSEG_HOT_NODE;
2623 }
2624 cnt = NR_CURSEG_NODE_TYPE;
70d625cb 2625 } else {
d27c3d89
CY
2626 if (type >= CURSEG_WARM_DATA) {
2627 reversed = true;
2628 i = CURSEG_COLD_DATA;
2629 } else {
2630 i = CURSEG_HOT_DATA;
2631 }
2632 cnt = NR_CURSEG_DATA_TYPE;
70d625cb 2633 }
43727527 2634
d27c3d89 2635 for (; cnt-- > 0; reversed ? i-- : i++) {
c192f7a4
JK
2636 if (i == type)
2637 continue;
e066b83c
JK
2638 if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) {
2639 curseg->next_segno = segno;
43727527 2640 return 1;
e066b83c 2641 }
c192f7a4 2642 }
4354994f
DR
2643
2644 /* find valid_blocks=0 in dirty list */
2645 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2646 segno = get_free_segment(sbi);
2647 if (segno != NULL_SEGNO) {
2648 curseg->next_segno = segno;
2649 return 1;
2650 }
2651 }
43727527
JK
2652 return 0;
2653}
2654
351df4b2
JK
2655/*
2656 * flush out current segment and replace it with new segment
2657 * This function should be returned with success, otherwise BUG
2658 */
2659static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2660 int type, bool force)
2661{
a7881893
JK
2662 struct curseg_info *curseg = CURSEG_I(sbi, type);
2663
7b405275 2664 if (force)
351df4b2 2665 new_curseg(sbi, type, true);
5b6c6be2
JK
2666 else if (!is_set_ckpt_flags(sbi, CP_CRC_RECOVERY_FLAG) &&
2667 type == CURSEG_WARM_NODE)
351df4b2 2668 new_curseg(sbi, type, false);
4354994f
DR
2669 else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type) &&
2670 likely(!is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
a7881893 2671 new_curseg(sbi, type, false);
4d57b86d 2672 else if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
025d63a4 2673 change_curseg(sbi, type);
351df4b2
JK
2674 else
2675 new_curseg(sbi, type, false);
dcdfff65 2676
a7881893 2677 stat_inc_seg_type(sbi, curseg);
351df4b2
JK
2678}
2679
04f0b2ea
QS
2680void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
2681 unsigned int start, unsigned int end)
2682{
2683 struct curseg_info *curseg = CURSEG_I(sbi, type);
2684 unsigned int segno;
2685
2686 down_read(&SM_I(sbi)->curseg_lock);
2687 mutex_lock(&curseg->curseg_mutex);
2688 down_write(&SIT_I(sbi)->sentry_lock);
2689
2690 segno = CURSEG_I(sbi, type)->segno;
2691 if (segno < start || segno > end)
2692 goto unlock;
2693
2694 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
2695 change_curseg(sbi, type);
2696 else
2697 new_curseg(sbi, type, true);
2698
2699 stat_inc_seg_type(sbi, curseg);
2700
2701 locate_dirty_segment(sbi, segno);
2702unlock:
2703 up_write(&SIT_I(sbi)->sentry_lock);
2704
2705 if (segno != curseg->segno)
dcbb4c10
JP
2706 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
2707 type, segno, curseg->segno);
04f0b2ea
QS
2708
2709 mutex_unlock(&curseg->curseg_mutex);
2710 up_read(&SM_I(sbi)->curseg_lock);
2711}
2712
f5a53edc 2713void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type)
351df4b2 2714{
6ae1be13
JK
2715 struct curseg_info *curseg;
2716 unsigned int old_segno;
351df4b2
JK
2717 int i;
2718
3d26fa6b
CY
2719 down_write(&SIT_I(sbi)->sentry_lock);
2720
6ae1be13 2721 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
f5a53edc
JK
2722 if (type != NO_CHECK_TYPE && i != type)
2723 continue;
2724
6ae1be13 2725 curseg = CURSEG_I(sbi, i);
f5a53edc
JK
2726 if (type == NO_CHECK_TYPE || curseg->next_blkoff ||
2727 get_valid_blocks(sbi, curseg->segno, false) ||
2728 get_ckpt_valid_blocks(sbi, curseg->segno)) {
2729 old_segno = curseg->segno;
2730 SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true);
2731 locate_dirty_segment(sbi, old_segno);
2732 }
6ae1be13 2733 }
3d26fa6b
CY
2734
2735 up_write(&SIT_I(sbi)->sentry_lock);
351df4b2
JK
2736}
2737
2738static const struct segment_allocation default_salloc_ops = {
2739 .allocate_segment = allocate_segment_by_default,
2740};
2741
4d57b86d
CY
2742bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
2743 struct cp_control *cpc)
25290fa5
JK
2744{
2745 __u64 trim_start = cpc->trim_start;
2746 bool has_candidate = false;
2747
3d26fa6b 2748 down_write(&SIT_I(sbi)->sentry_lock);
25290fa5
JK
2749 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++) {
2750 if (add_discard_addrs(sbi, cpc, true)) {
2751 has_candidate = true;
2752 break;
2753 }
2754 }
3d26fa6b 2755 up_write(&SIT_I(sbi)->sentry_lock);
25290fa5
JK
2756
2757 cpc->trim_start = trim_start;
2758 return has_candidate;
2759}
2760
01f9cf6d 2761static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
9a997188
JK
2762 struct discard_policy *dpolicy,
2763 unsigned int start, unsigned int end)
2764{
2765 struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
2766 struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
2767 struct rb_node **insert_p = NULL, *insert_parent = NULL;
2768 struct discard_cmd *dc;
2769 struct blk_plug plug;
2770 int issued;
01f9cf6d 2771 unsigned int trimmed = 0;
9a997188
JK
2772
2773next:
2774 issued = 0;
2775
2776 mutex_lock(&dcc->cmd_lock);
67fce70b
CY
2777 if (unlikely(dcc->rbtree_check))
2778 f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
2779 &dcc->root));
9a997188 2780
4d57b86d 2781 dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
9a997188
JK
2782 NULL, start,
2783 (struct rb_entry **)&prev_dc,
2784 (struct rb_entry **)&next_dc,
4dada3fd 2785 &insert_p, &insert_parent, true, NULL);
9a997188
JK
2786 if (!dc)
2787 dc = next_dc;
2788
2789 blk_start_plug(&plug);
2790
2791 while (dc && dc->lstart <= end) {
2792 struct rb_node *node;
6b9cb124 2793 int err = 0;
9a997188
JK
2794
2795 if (dc->len < dpolicy->granularity)
2796 goto skip;
2797
2798 if (dc->state != D_PREP) {
2799 list_move_tail(&dc->list, &dcc->fstrim_list);
2800 goto skip;
2801 }
2802
6b9cb124 2803 err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
9a997188 2804
35ec7d57 2805 if (issued >= dpolicy->max_requests) {
9a997188
JK
2806 start = dc->lstart + dc->len;
2807
6b9cb124
CY
2808 if (err)
2809 __remove_discard_cmd(sbi, dc);
2810
9a997188
JK
2811 blk_finish_plug(&plug);
2812 mutex_unlock(&dcc->cmd_lock);
01f9cf6d 2813 trimmed += __wait_all_discard_cmd(sbi, NULL);
5df7731f 2814 congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
9a997188
JK
2815 goto next;
2816 }
2817skip:
2818 node = rb_next(&dc->rb_node);
6b9cb124
CY
2819 if (err)
2820 __remove_discard_cmd(sbi, dc);
9a997188
JK
2821 dc = rb_entry_safe(node, struct discard_cmd, rb_node);
2822
2823 if (fatal_signal_pending(current))
2824 break;
2825 }
2826
2827 blk_finish_plug(&plug);
2828 mutex_unlock(&dcc->cmd_lock);
01f9cf6d
CY
2829
2830 return trimmed;
9a997188
JK
2831}
2832
4b2fecc8
JK
2833int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2834{
f7ef9b83
JK
2835 __u64 start = F2FS_BYTES_TO_BLK(range->start);
2836 __u64 end = start + F2FS_BYTES_TO_BLK(range->len) - 1;
377224c4 2837 unsigned int start_segno, end_segno;
8412663d 2838 block_t start_block, end_block;
4b2fecc8 2839 struct cp_control cpc;
78997b56 2840 struct discard_policy dpolicy;
0ea80512 2841 unsigned long long trimmed = 0;
c34f42e2 2842 int err = 0;
b0332a0f 2843 bool need_align = f2fs_lfs_mode(sbi) && __is_large_section(sbi);
4b2fecc8 2844
836b5a63 2845 if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
4b2fecc8
JK
2846 return -EINVAL;
2847
3f16ecd9
CY
2848 if (end < MAIN_BLKADDR(sbi))
2849 goto out;
4b2fecc8 2850
ed214a11 2851 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
dcbb4c10 2852 f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
10f966bb 2853 return -EFSCORRUPTED;
ed214a11
YH
2854 }
2855
4b2fecc8 2856 /* start/end segment number in main_area */
7cd8558b
JK
2857 start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
2858 end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
2859 GET_SEGNO(sbi, end);
ad6672bb
YS
2860 if (need_align) {
2861 start_segno = rounddown(start_segno, sbi->segs_per_sec);
2862 end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
2863 }
8412663d 2864
4b2fecc8 2865 cpc.reason = CP_DISCARD;
836b5a63 2866 cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
377224c4
CY
2867 cpc.trim_start = start_segno;
2868 cpc.trim_end = end_segno;
4b2fecc8 2869
377224c4
CY
2870 if (sbi->discard_blks == 0)
2871 goto out;
74fa5f3d 2872
fb24fea7 2873 down_write(&sbi->gc_lock);
4d57b86d 2874 err = f2fs_write_checkpoint(sbi, &cpc);
fb24fea7 2875 up_write(&sbi->gc_lock);
377224c4
CY
2876 if (err)
2877 goto out;
8412663d 2878
e555da9f
JK
2879 /*
2880 * We filed discard candidates, but actually we don't need to wait for
2881 * all of them, since they'll be issued in idle time along with runtime
2882 * discard option. User configuration looks like using runtime discard
2883 * or periodic fstrim instead of it.
2884 */
7d20c8ab 2885 if (f2fs_realtime_discard_enable(sbi))
5a615492
JK
2886 goto out;
2887
2888 start_block = START_BLOCK(sbi, start_segno);
2889 end_block = START_BLOCK(sbi, end_segno + 1);
2890
2891 __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
01f9cf6d
CY
2892 trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
2893 start_block, end_block);
5a615492 2894
01f9cf6d 2895 trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
0ea80512 2896 start_block, end_block);
377224c4 2897out:
6eae2694
CY
2898 if (!err)
2899 range->len = F2FS_BLK_TO_BYTES(trimmed);
c34f42e2 2900 return err;
4b2fecc8
JK
2901}
2902
351df4b2
JK
2903static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
2904{
2905 struct curseg_info *curseg = CURSEG_I(sbi, type);
2906 if (curseg->next_blkoff < sbi->blocks_per_seg)
2907 return true;
2908 return false;
2909}
2910
4d57b86d 2911int f2fs_rw_hint_to_seg_type(enum rw_hint hint)
4f0a03d3
HL
2912{
2913 switch (hint) {
2914 case WRITE_LIFE_SHORT:
2915 return CURSEG_HOT_DATA;
2916 case WRITE_LIFE_EXTREME:
2917 return CURSEG_COLD_DATA;
2918 default:
2919 return CURSEG_WARM_DATA;
2920 }
2921}
2922
0cdd3195
HL
2923/* This returns write hints for each segment type. This hints will be
2924 * passed down to block layer. There are mapping tables which depend on
2925 * the mount option 'whint_mode'.
2926 *
2927 * 1) whint_mode=off. F2FS only passes down WRITE_LIFE_NOT_SET.
2928 *
2929 * 2) whint_mode=user-based. F2FS tries to pass down hints given by users.
2930 *
2931 * User F2FS Block
2932 * ---- ---- -----
2933 * META WRITE_LIFE_NOT_SET
2934 * HOT_NODE "
2935 * WARM_NODE "
2936 * COLD_NODE "
2937 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
2938 * extension list " "
2939 *
2940 * -- buffered io
2941 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2942 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2943 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
2944 * WRITE_LIFE_NONE " "
2945 * WRITE_LIFE_MEDIUM " "
2946 * WRITE_LIFE_LONG " "
2947 *
2948 * -- direct io
2949 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2950 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2951 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
2952 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
2953 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
2954 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
2955 *
f2e703f9
HL
2956 * 3) whint_mode=fs-based. F2FS passes down hints with its policy.
2957 *
2958 * User F2FS Block
2959 * ---- ---- -----
2960 * META WRITE_LIFE_MEDIUM;
2961 * HOT_NODE WRITE_LIFE_NOT_SET
2962 * WARM_NODE "
2963 * COLD_NODE WRITE_LIFE_NONE
2964 * ioctl(COLD) COLD_DATA WRITE_LIFE_EXTREME
2965 * extension list " "
2966 *
2967 * -- buffered io
2968 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2969 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2970 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_LONG
2971 * WRITE_LIFE_NONE " "
2972 * WRITE_LIFE_MEDIUM " "
2973 * WRITE_LIFE_LONG " "
2974 *
2975 * -- direct io
2976 * WRITE_LIFE_EXTREME COLD_DATA WRITE_LIFE_EXTREME
2977 * WRITE_LIFE_SHORT HOT_DATA WRITE_LIFE_SHORT
2978 * WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
2979 * WRITE_LIFE_NONE " WRITE_LIFE_NONE
2980 * WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
2981 * WRITE_LIFE_LONG " WRITE_LIFE_LONG
0cdd3195
HL
2982 */
2983
4d57b86d 2984enum rw_hint f2fs_io_type_to_rw_hint(struct f2fs_sb_info *sbi,
0cdd3195
HL
2985 enum page_type type, enum temp_type temp)
2986{
63189b78 2987 if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_USER) {
0cdd3195 2988 if (type == DATA) {
f2e703f9 2989 if (temp == WARM)
0cdd3195 2990 return WRITE_LIFE_NOT_SET;
f2e703f9
HL
2991 else if (temp == HOT)
2992 return WRITE_LIFE_SHORT;
2993 else if (temp == COLD)
2994 return WRITE_LIFE_EXTREME;
0cdd3195
HL
2995 } else {
2996 return WRITE_LIFE_NOT_SET;
2997 }
63189b78 2998 } else if (F2FS_OPTION(sbi).whint_mode == WHINT_MODE_FS) {
f2e703f9
HL
2999 if (type == DATA) {
3000 if (temp == WARM)
3001 return WRITE_LIFE_LONG;
3002 else if (temp == HOT)
3003 return WRITE_LIFE_SHORT;
3004 else if (temp == COLD)
3005 return WRITE_LIFE_EXTREME;
3006 } else if (type == NODE) {
3007 if (temp == WARM || temp == HOT)
3008 return WRITE_LIFE_NOT_SET;
3009 else if (temp == COLD)
3010 return WRITE_LIFE_NONE;
3011 } else if (type == META) {
3012 return WRITE_LIFE_MEDIUM;
3013 }
0cdd3195 3014 }
f2e703f9 3015 return WRITE_LIFE_NOT_SET;
0cdd3195
HL
3016}
3017
81377bd6 3018static int __get_segment_type_2(struct f2fs_io_info *fio)
351df4b2 3019{
81377bd6 3020 if (fio->type == DATA)
351df4b2
JK
3021 return CURSEG_HOT_DATA;
3022 else
3023 return CURSEG_HOT_NODE;
3024}
3025
81377bd6 3026static int __get_segment_type_4(struct f2fs_io_info *fio)
351df4b2 3027{
81377bd6
JK
3028 if (fio->type == DATA) {
3029 struct inode *inode = fio->page->mapping->host;
351df4b2
JK
3030
3031 if (S_ISDIR(inode->i_mode))
3032 return CURSEG_HOT_DATA;
3033 else
3034 return CURSEG_COLD_DATA;
3035 } else {
81377bd6 3036 if (IS_DNODE(fio->page) && is_cold_node(fio->page))
a344b9fd 3037 return CURSEG_WARM_NODE;
351df4b2
JK
3038 else
3039 return CURSEG_COLD_NODE;
3040 }
3041}
3042
81377bd6 3043static int __get_segment_type_6(struct f2fs_io_info *fio)
351df4b2 3044{
81377bd6
JK
3045 if (fio->type == DATA) {
3046 struct inode *inode = fio->page->mapping->host;
351df4b2 3047
4c8ff709
CY
3048 if (is_cold_data(fio->page) || file_is_cold(inode) ||
3049 f2fs_compressed_file(inode))
351df4b2 3050 return CURSEG_COLD_DATA;
b6a06cbb 3051 if (file_is_hot(inode) ||
b4c3ca8b 3052 is_inode_flag_set(inode, FI_HOT_DATA) ||
2079f115
CY
3053 f2fs_is_atomic_file(inode) ||
3054 f2fs_is_volatile_file(inode))
ef095d19 3055 return CURSEG_HOT_DATA;
4d57b86d 3056 return f2fs_rw_hint_to_seg_type(inode->i_write_hint);
351df4b2 3057 } else {
81377bd6
JK
3058 if (IS_DNODE(fio->page))
3059 return is_cold_node(fio->page) ? CURSEG_WARM_NODE :
351df4b2 3060 CURSEG_HOT_NODE;
ef095d19 3061 return CURSEG_COLD_NODE;
351df4b2
JK
3062 }
3063}
3064
81377bd6 3065static int __get_segment_type(struct f2fs_io_info *fio)
351df4b2 3066{
a912b54d
JK
3067 int type = 0;
3068
63189b78 3069 switch (F2FS_OPTION(fio->sbi).active_logs) {
351df4b2 3070 case 2:
a912b54d
JK
3071 type = __get_segment_type_2(fio);
3072 break;
351df4b2 3073 case 4:
a912b54d
JK
3074 type = __get_segment_type_4(fio);
3075 break;
3076 case 6:
3077 type = __get_segment_type_6(fio);
3078 break;
3079 default:
3080 f2fs_bug_on(fio->sbi, true);
351df4b2 3081 }
81377bd6 3082
a912b54d
JK
3083 if (IS_HOT(type))
3084 fio->temp = HOT;
3085 else if (IS_WARM(type))
3086 fio->temp = WARM;
3087 else
3088 fio->temp = COLD;
3089 return type;
351df4b2
JK
3090}
3091
4d57b86d 3092void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
bfad7c2d 3093 block_t old_blkaddr, block_t *new_blkaddr,
fb830fc5
CY
3094 struct f2fs_summary *sum, int type,
3095 struct f2fs_io_info *fio, bool add_list)
351df4b2
JK
3096{
3097 struct sit_info *sit_i = SIT_I(sbi);
6ae1be13 3098 struct curseg_info *curseg = CURSEG_I(sbi, type);
f5a53edc
JK
3099 bool put_pin_sem = false;
3100
3101 if (type == CURSEG_COLD_DATA) {
3102 /* GC during CURSEG_COLD_DATA_PINNED allocation */
3103 if (down_read_trylock(&sbi->pin_sem)) {
3104 put_pin_sem = true;
3105 } else {
3106 type = CURSEG_WARM_DATA;
3107 curseg = CURSEG_I(sbi, type);
3108 }
3109 } else if (type == CURSEG_COLD_DATA_PINNED) {
3110 type = CURSEG_COLD_DATA;
3111 }
351df4b2 3112
2b60311d
CY
3113 down_read(&SM_I(sbi)->curseg_lock);
3114
351df4b2 3115 mutex_lock(&curseg->curseg_mutex);
3d26fa6b 3116 down_write(&sit_i->sentry_lock);
351df4b2
JK
3117
3118 *new_blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
351df4b2 3119
4e6a8d9b
JK
3120 f2fs_wait_discard_bio(sbi, *new_blkaddr);
3121
351df4b2
JK
3122 /*
3123 * __add_sum_entry should be resided under the curseg_mutex
3124 * because, this function updates a summary entry in the
3125 * current summary block.
3126 */
e79efe3b 3127 __add_sum_entry(sbi, type, sum);
351df4b2 3128
351df4b2 3129 __refresh_next_blkoff(sbi, curseg);
dcdfff65
JK
3130
3131 stat_inc_block_count(sbi, curseg);
351df4b2 3132
65f1b80b
YS
3133 /*
3134 * SIT information should be updated before segment allocation,
3135 * since SSR needs latest valid block information.
3136 */
3137 update_sit_entry(sbi, *new_blkaddr, 1);
3138 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
3139 update_sit_entry(sbi, old_blkaddr, -1);
3140
c6f82fe9
JK
3141 if (!__has_curseg_space(sbi, type))
3142 sit_i->s_ops->allocate_segment(sbi, type, false);
65f1b80b 3143
351df4b2 3144 /*
65f1b80b
YS
3145 * segment dirty status should be updated after segment allocation,
3146 * so we just need to update status only one time after previous
3147 * segment being closed.
351df4b2 3148 */
65f1b80b
YS
3149 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3150 locate_dirty_segment(sbi, GET_SEGNO(sbi, *new_blkaddr));
5e443818 3151
3d26fa6b 3152 up_write(&sit_i->sentry_lock);
351df4b2 3153
704956ec 3154 if (page && IS_NODESEG(type)) {
351df4b2
JK
3155 fill_node_footer_blkaddr(page, NEXT_FREE_BLKADDR(sbi, curseg));
3156
704956ec
CY
3157 f2fs_inode_chksum_set(sbi, page);
3158 }
3159
8223ecc4
CY
3160 if (F2FS_IO_ALIGNED(sbi))
3161 fio->retry = false;
3162
fb830fc5
CY
3163 if (add_list) {
3164 struct f2fs_bio_info *io;
3165
3166 INIT_LIST_HEAD(&fio->list);
3167 fio->in_list = true;
3168 io = sbi->write_io[fio->type] + fio->temp;
3169 spin_lock(&io->io_lock);
3170 list_add_tail(&fio->list, &io->io_list);
3171 spin_unlock(&io->io_lock);
3172 }
3173
bfad7c2d 3174 mutex_unlock(&curseg->curseg_mutex);
2b60311d
CY
3175
3176 up_read(&SM_I(sbi)->curseg_lock);
f5a53edc
JK
3177
3178 if (put_pin_sem)
3179 up_read(&sbi->pin_sem);
bfad7c2d
JK
3180}
3181
39d787be
CY
3182static void update_device_state(struct f2fs_io_info *fio)
3183{
3184 struct f2fs_sb_info *sbi = fio->sbi;
3185 unsigned int devidx;
3186
0916878d 3187 if (!f2fs_is_multi_device(sbi))
39d787be
CY
3188 return;
3189
3190 devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
3191
3192 /* update device state for fsync */
4d57b86d 3193 f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
1228b482
CY
3194
3195 /* update device state for checkpoint */
3196 if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
3197 spin_lock(&sbi->dev_lock);
3198 f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
3199 spin_unlock(&sbi->dev_lock);
3200 }
39d787be
CY
3201}
3202
05ca3632 3203static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
bfad7c2d 3204{
81377bd6 3205 int type = __get_segment_type(fio);
b0332a0f 3206 bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA);
bfad7c2d 3207
107a805d
CY
3208 if (keep_order)
3209 down_read(&fio->sbi->io_order_lock);
0a595eba 3210reallocate:
4d57b86d 3211 f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
fb830fc5 3212 &fio->new_blkaddr, sum, type, fio, true);
6aa58d8a
CY
3213 if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
3214 invalidate_mapping_pages(META_MAPPING(fio->sbi),
3215 fio->old_blkaddr, fio->old_blkaddr);
bfad7c2d 3216
351df4b2 3217 /* writeout dirty page into bdev */
fe16efe6
CY
3218 f2fs_submit_page_write(fio);
3219 if (fio->retry) {
0a595eba
JK
3220 fio->old_blkaddr = fio->new_blkaddr;
3221 goto reallocate;
3222 }
fe16efe6
CY
3223
3224 update_device_state(fio);
3225
107a805d
CY
3226 if (keep_order)
3227 up_read(&fio->sbi->io_order_lock);
351df4b2
JK
3228}
3229
4d57b86d 3230void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page,
b0af6d49 3231 enum iostat_type io_type)
351df4b2 3232{
458e6197 3233 struct f2fs_io_info fio = {
05ca3632 3234 .sbi = sbi,
458e6197 3235 .type = META,
0cdd3195 3236 .temp = HOT,
04d328de 3237 .op = REQ_OP_WRITE,
70fd7614 3238 .op_flags = REQ_SYNC | REQ_META | REQ_PRIO,
7a9d7548
CY
3239 .old_blkaddr = page->index,
3240 .new_blkaddr = page->index,
05ca3632 3241 .page = page,
4375a336 3242 .encrypted_page = NULL,
fb830fc5 3243 .in_list = false,
458e6197
JK
3244 };
3245
2b947003 3246 if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
04d328de 3247 fio.op_flags &= ~REQ_META;
2b947003 3248
351df4b2 3249 set_page_writeback(page);
17c50035 3250 ClearPageError(page);
b9109b0e 3251 f2fs_submit_page_write(&fio);
b0af6d49 3252
b63e7be5 3253 stat_inc_meta_count(sbi, page->index);
b0af6d49 3254 f2fs_update_iostat(sbi, io_type, F2FS_BLKSIZE);
351df4b2
JK
3255}
3256
4d57b86d 3257void f2fs_do_write_node_page(unsigned int nid, struct f2fs_io_info *fio)
351df4b2
JK
3258{
3259 struct f2fs_summary sum;
05ca3632 3260
351df4b2 3261 set_summary(&sum, nid, 0, 0);
05ca3632 3262 do_write_page(&sum, fio);
b0af6d49
CY
3263
3264 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
351df4b2
JK
3265}
3266
4d57b86d
CY
3267void f2fs_outplace_write_data(struct dnode_of_data *dn,
3268 struct f2fs_io_info *fio)
351df4b2 3269{
05ca3632 3270 struct f2fs_sb_info *sbi = fio->sbi;
351df4b2 3271 struct f2fs_summary sum;
351df4b2 3272
9850cf4a 3273 f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
7735730d 3274 set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
05ca3632 3275 do_write_page(&sum, fio);
f28b3434 3276 f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
b0af6d49
CY
3277
3278 f2fs_update_iostat(sbi, fio->io_type, F2FS_BLKSIZE);
351df4b2
JK
3279}
3280
4d57b86d 3281int f2fs_inplace_write_data(struct f2fs_io_info *fio)
351df4b2 3282{
b0af6d49 3283 int err;
d21b0f23 3284 struct f2fs_sb_info *sbi = fio->sbi;
05573d6c 3285 unsigned int segno;
b0af6d49 3286
7a9d7548 3287 fio->new_blkaddr = fio->old_blkaddr;
0cdd3195
HL
3288 /* i/o temperature is needed for passing down write hints */
3289 __get_segment_type(fio);
d21b0f23 3290
05573d6c
CY
3291 segno = GET_SEGNO(sbi, fio->new_blkaddr);
3292
3293 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3294 set_sbi_flag(sbi, SBI_NEED_FSCK);
2d821c12
CY
3295 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3296 __func__, segno);
10f966bb 3297 return -EFSCORRUPTED;
05573d6c 3298 }
d21b0f23 3299
05ca3632 3300 stat_inc_inplace_blocks(fio->sbi);
b0af6d49 3301
0e7f4197 3302 if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
8648de2c
CY
3303 err = f2fs_merge_page_bio(fio);
3304 else
3305 err = f2fs_submit_page_bio(fio);
e46f6bd8 3306 if (!err) {
39d787be 3307 update_device_state(fio);
e46f6bd8
CY
3308 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
3309 }
b0af6d49
CY
3310
3311 return err;
351df4b2
JK
3312}
3313
2b60311d
CY
3314static inline int __f2fs_get_curseg(struct f2fs_sb_info *sbi,
3315 unsigned int segno)
3316{
3317 int i;
3318
3319 for (i = CURSEG_HOT_DATA; i < NO_CHECK_TYPE; i++) {
3320 if (CURSEG_I(sbi, i)->segno == segno)
3321 break;
3322 }
3323 return i;
3324}
3325
4d57b86d 3326void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
19f106bc 3327 block_t old_blkaddr, block_t new_blkaddr,
28bc106b 3328 bool recover_curseg, bool recover_newaddr)
351df4b2
JK
3329{
3330 struct sit_info *sit_i = SIT_I(sbi);
3331 struct curseg_info *curseg;
3332 unsigned int segno, old_cursegno;
3333 struct seg_entry *se;
3334 int type;
19f106bc 3335 unsigned short old_blkoff;
351df4b2
JK
3336
3337 segno = GET_SEGNO(sbi, new_blkaddr);
3338 se = get_seg_entry(sbi, segno);
3339 type = se->type;
3340
2b60311d
CY
3341 down_write(&SM_I(sbi)->curseg_lock);
3342
19f106bc
CY
3343 if (!recover_curseg) {
3344 /* for recovery flow */
3345 if (se->valid_blocks == 0 && !IS_CURSEG(sbi, segno)) {
3346 if (old_blkaddr == NULL_ADDR)
3347 type = CURSEG_COLD_DATA;
3348 else
3349 type = CURSEG_WARM_DATA;
3350 }
3351 } else {
2b60311d
CY
3352 if (IS_CURSEG(sbi, segno)) {
3353 /* se->type is volatile as SSR allocation */
3354 type = __f2fs_get_curseg(sbi, segno);
3355 f2fs_bug_on(sbi, type == NO_CHECK_TYPE);
3356 } else {
351df4b2 3357 type = CURSEG_WARM_DATA;
2b60311d 3358 }
351df4b2 3359 }
19f106bc 3360
2c190504 3361 f2fs_bug_on(sbi, !IS_DATASEG(type));
351df4b2
JK
3362 curseg = CURSEG_I(sbi, type);
3363
3364 mutex_lock(&curseg->curseg_mutex);
3d26fa6b 3365 down_write(&sit_i->sentry_lock);
351df4b2
JK
3366
3367 old_cursegno = curseg->segno;
19f106bc 3368 old_blkoff = curseg->next_blkoff;
351df4b2
JK
3369
3370 /* change the current segment */
3371 if (segno != curseg->segno) {
3372 curseg->next_segno = segno;
025d63a4 3373 change_curseg(sbi, type);
351df4b2
JK
3374 }
3375
491c0854 3376 curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr);
e79efe3b 3377 __add_sum_entry(sbi, type, sum);
351df4b2 3378
28bc106b 3379 if (!recover_curseg || recover_newaddr)
6e2c64ad 3380 update_sit_entry(sbi, new_blkaddr, 1);
6aa58d8a
CY
3381 if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
3382 invalidate_mapping_pages(META_MAPPING(sbi),
3383 old_blkaddr, old_blkaddr);
6e2c64ad 3384 update_sit_entry(sbi, old_blkaddr, -1);
6aa58d8a 3385 }
6e2c64ad
JK
3386
3387 locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
3388 locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
3389
351df4b2 3390 locate_dirty_segment(sbi, old_cursegno);
351df4b2 3391
19f106bc
CY
3392 if (recover_curseg) {
3393 if (old_cursegno != curseg->segno) {
3394 curseg->next_segno = old_cursegno;
025d63a4 3395 change_curseg(sbi, type);
19f106bc
CY
3396 }
3397 curseg->next_blkoff = old_blkoff;
3398 }
3399
3d26fa6b 3400 up_write(&sit_i->sentry_lock);
351df4b2 3401 mutex_unlock(&curseg->curseg_mutex);
2b60311d 3402 up_write(&SM_I(sbi)->curseg_lock);
351df4b2
JK
3403}
3404
528e3459
CY
3405void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn,
3406 block_t old_addr, block_t new_addr,
28bc106b
CY
3407 unsigned char version, bool recover_curseg,
3408 bool recover_newaddr)
528e3459
CY
3409{
3410 struct f2fs_summary sum;
3411
3412 set_summary(&sum, dn->nid, dn->ofs_in_node, version);
3413
4d57b86d 3414 f2fs_do_replace_block(sbi, &sum, old_addr, new_addr,
28bc106b 3415 recover_curseg, recover_newaddr);
528e3459 3416
f28b3434 3417 f2fs_update_data_blkaddr(dn, new_addr);
528e3459
CY
3418}
3419
93dfe2ac 3420void f2fs_wait_on_page_writeback(struct page *page,
bae0ee7a 3421 enum page_type type, bool ordered, bool locked)
93dfe2ac 3422{
93dfe2ac 3423 if (PageWriteback(page)) {
4081363f
JK
3424 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
3425
0b20fcec 3426 /* submit cached LFS IO */
bab475c5 3427 f2fs_submit_merged_write_cond(sbi, NULL, page, 0, type);
0b20fcec
CY
3428 /* sbumit cached IPU IO */
3429 f2fs_submit_merged_ipu_write(sbi, NULL, page);
bae0ee7a 3430 if (ordered) {
fec1d657 3431 wait_on_page_writeback(page);
bae0ee7a
CY
3432 f2fs_bug_on(sbi, locked && PageWriteback(page));
3433 } else {
fec1d657 3434 wait_for_stable_page(page);
bae0ee7a 3435 }
93dfe2ac
JK
3436 }
3437}
3438
0ded69f6 3439void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr)
08b39fbd 3440{
0ded69f6 3441 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
08b39fbd
CY
3442 struct page *cpage;
3443
0ded69f6
JK
3444 if (!f2fs_post_read_required(inode))
3445 return;
3446
93770ab7 3447 if (!__is_valid_data_blkaddr(blkaddr))
08b39fbd
CY
3448 return;
3449
08b39fbd
CY
3450 cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
3451 if (cpage) {
bae0ee7a 3452 f2fs_wait_on_page_writeback(cpage, DATA, true, true);
08b39fbd
CY
3453 f2fs_put_page(cpage, 1);
3454 }
3455}
3456
1e78e8bd
ST
3457void f2fs_wait_on_block_writeback_range(struct inode *inode, block_t blkaddr,
3458 block_t len)
3459{
3460 block_t i;
3461
3462 for (i = 0; i < len; i++)
3463 f2fs_wait_on_block_writeback(inode, blkaddr + i);
3464}
3465
7735730d 3466static int read_compacted_summaries(struct f2fs_sb_info *sbi)
351df4b2
JK
3467{
3468 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3469 struct curseg_info *seg_i;
3470 unsigned char *kaddr;
3471 struct page *page;
3472 block_t start;
3473 int i, j, offset;
3474
3475 start = start_sum_block(sbi);
3476
4d57b86d 3477 page = f2fs_get_meta_page(sbi, start++);
7735730d
CY
3478 if (IS_ERR(page))
3479 return PTR_ERR(page);
351df4b2
JK
3480 kaddr = (unsigned char *)page_address(page);
3481
3482 /* Step 1: restore nat cache */
3483 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
b7ad7512 3484 memcpy(seg_i->journal, kaddr, SUM_JOURNAL_SIZE);
351df4b2
JK
3485
3486 /* Step 2: restore sit cache */
3487 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
b7ad7512 3488 memcpy(seg_i->journal, kaddr + SUM_JOURNAL_SIZE, SUM_JOURNAL_SIZE);
351df4b2
JK
3489 offset = 2 * SUM_JOURNAL_SIZE;
3490
3491 /* Step 3: restore summary entries */
3492 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3493 unsigned short blk_off;
3494 unsigned int segno;
3495
3496 seg_i = CURSEG_I(sbi, i);
3497 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3498 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3499 seg_i->next_segno = segno;
3500 reset_curseg(sbi, i, 0);
3501 seg_i->alloc_type = ckpt->alloc_type[i];
3502 seg_i->next_blkoff = blk_off;
3503
3504 if (seg_i->alloc_type == SSR)
3505 blk_off = sbi->blocks_per_seg;
3506
3507 for (j = 0; j < blk_off; j++) {
3508 struct f2fs_summary *s;
3509 s = (struct f2fs_summary *)(kaddr + offset);
3510 seg_i->sum_blk->entries[j] = *s;
3511 offset += SUMMARY_SIZE;
09cbfeaf 3512 if (offset + SUMMARY_SIZE <= PAGE_SIZE -
351df4b2
JK
3513 SUM_FOOTER_SIZE)
3514 continue;
3515
3516 f2fs_put_page(page, 1);
3517 page = NULL;
3518
4d57b86d 3519 page = f2fs_get_meta_page(sbi, start++);
7735730d
CY
3520 if (IS_ERR(page))
3521 return PTR_ERR(page);
351df4b2
JK
3522 kaddr = (unsigned char *)page_address(page);
3523 offset = 0;
3524 }
3525 }
3526 f2fs_put_page(page, 1);
7735730d 3527 return 0;
351df4b2
JK
3528}
3529
3530static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
3531{
3532 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
3533 struct f2fs_summary_block *sum;
3534 struct curseg_info *curseg;
3535 struct page *new;
3536 unsigned short blk_off;
3537 unsigned int segno = 0;
3538 block_t blk_addr = 0;
7735730d 3539 int err = 0;
351df4b2
JK
3540
3541 /* get segment number and block addr */
3542 if (IS_DATASEG(type)) {
3543 segno = le32_to_cpu(ckpt->cur_data_segno[type]);
3544 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[type -
3545 CURSEG_HOT_DATA]);
119ee914 3546 if (__exist_node_summaries(sbi))
351df4b2
JK
3547 blk_addr = sum_blk_addr(sbi, NR_CURSEG_TYPE, type);
3548 else
3549 blk_addr = sum_blk_addr(sbi, NR_CURSEG_DATA_TYPE, type);
3550 } else {
3551 segno = le32_to_cpu(ckpt->cur_node_segno[type -
3552 CURSEG_HOT_NODE]);
3553 blk_off = le16_to_cpu(ckpt->cur_node_blkoff[type -
3554 CURSEG_HOT_NODE]);
119ee914 3555 if (__exist_node_summaries(sbi))
351df4b2
JK
3556 blk_addr = sum_blk_addr(sbi, NR_CURSEG_NODE_TYPE,
3557 type - CURSEG_HOT_NODE);
3558 else
3559 blk_addr = GET_SUM_BLOCK(sbi, segno);
3560 }
3561
4d57b86d 3562 new = f2fs_get_meta_page(sbi, blk_addr);
7735730d
CY
3563 if (IS_ERR(new))
3564 return PTR_ERR(new);
351df4b2
JK
3565 sum = (struct f2fs_summary_block *)page_address(new);
3566
3567 if (IS_NODESEG(type)) {
119ee914 3568 if (__exist_node_summaries(sbi)) {
351df4b2
JK
3569 struct f2fs_summary *ns = &sum->entries[0];
3570 int i;
3571 for (i = 0; i < sbi->blocks_per_seg; i++, ns++) {
3572 ns->version = 0;
3573 ns->ofs_in_node = 0;
3574 }
3575 } else {
7735730d
CY
3576 err = f2fs_restore_node_summary(sbi, segno, sum);
3577 if (err)
3578 goto out;
351df4b2
JK
3579 }
3580 }
3581
3582 /* set uncompleted segment to curseg */
3583 curseg = CURSEG_I(sbi, type);
3584 mutex_lock(&curseg->curseg_mutex);
b7ad7512
CY
3585
3586 /* update journal info */
3587 down_write(&curseg->journal_rwsem);
3588 memcpy(curseg->journal, &sum->journal, SUM_JOURNAL_SIZE);
3589 up_write(&curseg->journal_rwsem);
3590
3591 memcpy(curseg->sum_blk->entries, sum->entries, SUM_ENTRY_SIZE);
3592 memcpy(&curseg->sum_blk->footer, &sum->footer, SUM_FOOTER_SIZE);
351df4b2
JK
3593 curseg->next_segno = segno;
3594 reset_curseg(sbi, type, 0);
3595 curseg->alloc_type = ckpt->alloc_type[type];
3596 curseg->next_blkoff = blk_off;
3597 mutex_unlock(&curseg->curseg_mutex);
7735730d 3598out:
351df4b2 3599 f2fs_put_page(new, 1);
7735730d 3600 return err;
351df4b2
JK
3601}
3602
3603static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3604{
21d3f8e1
JQ
3605 struct f2fs_journal *sit_j = CURSEG_I(sbi, CURSEG_COLD_DATA)->journal;
3606 struct f2fs_journal *nat_j = CURSEG_I(sbi, CURSEG_HOT_DATA)->journal;
351df4b2 3607 int type = CURSEG_HOT_DATA;
e4fc5fbf 3608 int err;
351df4b2 3609
aaec2b1d 3610 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG)) {
4d57b86d 3611 int npages = f2fs_npages_for_summary_flush(sbi, true);
3fa06d7b
CY
3612
3613 if (npages >= 2)
4d57b86d 3614 f2fs_ra_meta_pages(sbi, start_sum_block(sbi), npages,
26879fb1 3615 META_CP, true);
3fa06d7b 3616
351df4b2 3617 /* restore for compacted data summary */
7735730d
CY
3618 err = read_compacted_summaries(sbi);
3619 if (err)
3620 return err;
351df4b2
JK
3621 type = CURSEG_HOT_NODE;
3622 }
3623
119ee914 3624 if (__exist_node_summaries(sbi))
4d57b86d 3625 f2fs_ra_meta_pages(sbi, sum_blk_addr(sbi, NR_CURSEG_TYPE, type),
26879fb1 3626 NR_CURSEG_TYPE - type, META_CP, true);
3fa06d7b 3627
e4fc5fbf
CY
3628 for (; type <= CURSEG_COLD_NODE; type++) {
3629 err = read_normal_summaries(sbi, type);
3630 if (err)
3631 return err;
3632 }
3633
21d3f8e1
JQ
3634 /* sanity check for summary blocks */
3635 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
9227d522 3636 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
dcbb4c10
JP
3637 f2fs_err(sbi, "invalid journal entries nats %u sits %u\n",
3638 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
21d3f8e1 3639 return -EINVAL;
9227d522 3640 }
21d3f8e1 3641
351df4b2
JK
3642 return 0;
3643}
3644
3645static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
3646{
3647 struct page *page;
3648 unsigned char *kaddr;
3649 struct f2fs_summary *summary;
3650 struct curseg_info *seg_i;
3651 int written_size = 0;
3652 int i, j;
3653
4d57b86d 3654 page = f2fs_grab_meta_page(sbi, blkaddr++);
351df4b2 3655 kaddr = (unsigned char *)page_address(page);
81114baa 3656 memset(kaddr, 0, PAGE_SIZE);
351df4b2
JK
3657
3658 /* Step 1: write nat cache */
3659 seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
b7ad7512 3660 memcpy(kaddr, seg_i->journal, SUM_JOURNAL_SIZE);
351df4b2
JK
3661 written_size += SUM_JOURNAL_SIZE;
3662
3663 /* Step 2: write sit cache */
3664 seg_i = CURSEG_I(sbi, CURSEG_COLD_DATA);
b7ad7512 3665 memcpy(kaddr + written_size, seg_i->journal, SUM_JOURNAL_SIZE);
351df4b2
JK
3666 written_size += SUM_JOURNAL_SIZE;
3667
351df4b2
JK
3668 /* Step 3: write summary entries */
3669 for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) {
3670 unsigned short blkoff;
3671 seg_i = CURSEG_I(sbi, i);
3672 if (sbi->ckpt->alloc_type[i] == SSR)
3673 blkoff = sbi->blocks_per_seg;
3674 else
3675 blkoff = curseg_blkoff(sbi, i);
3676
3677 for (j = 0; j < blkoff; j++) {
3678 if (!page) {
4d57b86d 3679 page = f2fs_grab_meta_page(sbi, blkaddr++);
351df4b2 3680 kaddr = (unsigned char *)page_address(page);
81114baa 3681 memset(kaddr, 0, PAGE_SIZE);
351df4b2
JK
3682 written_size = 0;
3683 }
3684 summary = (struct f2fs_summary *)(kaddr + written_size);
3685 *summary = seg_i->sum_blk->entries[j];
3686 written_size += SUMMARY_SIZE;
351df4b2 3687
09cbfeaf 3688 if (written_size + SUMMARY_SIZE <= PAGE_SIZE -
351df4b2
JK
3689 SUM_FOOTER_SIZE)
3690 continue;
3691
e8d61a74 3692 set_page_dirty(page);
351df4b2
JK
3693 f2fs_put_page(page, 1);
3694 page = NULL;
3695 }
3696 }
e8d61a74
CY
3697 if (page) {
3698 set_page_dirty(page);
351df4b2 3699 f2fs_put_page(page, 1);
e8d61a74 3700 }
351df4b2
JK
3701}
3702
3703static void write_normal_summaries(struct f2fs_sb_info *sbi,
3704 block_t blkaddr, int type)
3705{
3706 int i, end;
3707 if (IS_DATASEG(type))
3708 end = type + NR_CURSEG_DATA_TYPE;
3709 else
3710 end = type + NR_CURSEG_NODE_TYPE;
3711
b7ad7512
CY
3712 for (i = type; i < end; i++)
3713 write_current_sum_page(sbi, i, blkaddr + (i - type));
351df4b2
JK
3714}
3715
4d57b86d 3716void f2fs_write_data_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
351df4b2 3717{
aaec2b1d 3718 if (is_set_ckpt_flags(sbi, CP_COMPACT_SUM_FLAG))
351df4b2
JK
3719 write_compacted_summaries(sbi, start_blk);
3720 else
3721 write_normal_summaries(sbi, start_blk, CURSEG_HOT_DATA);
3722}
3723
4d57b86d 3724void f2fs_write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
351df4b2 3725{
119ee914 3726 write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
351df4b2
JK
3727}
3728
4d57b86d 3729int f2fs_lookup_journal_in_cursum(struct f2fs_journal *journal, int type,
351df4b2
JK
3730 unsigned int val, int alloc)
3731{
3732 int i;
3733
3734 if (type == NAT_JOURNAL) {
dfc08a12
CY
3735 for (i = 0; i < nats_in_cursum(journal); i++) {
3736 if (le32_to_cpu(nid_in_journal(journal, i)) == val)
351df4b2
JK
3737 return i;
3738 }
dfc08a12
CY
3739 if (alloc && __has_cursum_space(journal, 1, NAT_JOURNAL))
3740 return update_nats_in_cursum(journal, 1);
351df4b2 3741 } else if (type == SIT_JOURNAL) {
dfc08a12
CY
3742 for (i = 0; i < sits_in_cursum(journal); i++)
3743 if (le32_to_cpu(segno_in_journal(journal, i)) == val)
351df4b2 3744 return i;
dfc08a12
CY
3745 if (alloc && __has_cursum_space(journal, 1, SIT_JOURNAL))
3746 return update_sits_in_cursum(journal, 1);
351df4b2
JK
3747 }
3748 return -1;
3749}
3750
3751static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
3752 unsigned int segno)
3753{
7735730d 3754 return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno));
351df4b2
JK
3755}
3756
3757static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
3758 unsigned int start)
3759{
3760 struct sit_info *sit_i = SIT_I(sbi);
068c3cd8 3761 struct page *page;
351df4b2 3762 pgoff_t src_off, dst_off;
351df4b2
JK
3763
3764 src_off = current_sit_addr(sbi, start);
3765 dst_off = next_sit_addr(sbi, src_off);
3766
4d57b86d 3767 page = f2fs_grab_meta_page(sbi, dst_off);
068c3cd8 3768 seg_info_to_sit_page(sbi, page, start);
351df4b2 3769
068c3cd8 3770 set_page_dirty(page);
351df4b2
JK
3771 set_to_next_sit(sit_i, start);
3772
068c3cd8 3773 return page;
351df4b2
JK
3774}
3775
184a5cd2
CY
3776static struct sit_entry_set *grab_sit_entry_set(void)
3777{
3778 struct sit_entry_set *ses =
80c54505 3779 f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS);
184a5cd2
CY
3780
3781 ses->entry_cnt = 0;
3782 INIT_LIST_HEAD(&ses->set_list);
3783 return ses;
3784}
3785
3786static void release_sit_entry_set(struct sit_entry_set *ses)
3787{
3788 list_del(&ses->set_list);
3789 kmem_cache_free(sit_entry_set_slab, ses);
3790}
3791
3792static void adjust_sit_entry_set(struct sit_entry_set *ses,
3793 struct list_head *head)
3794{
3795 struct sit_entry_set *next = ses;
3796
3797 if (list_is_last(&ses->set_list, head))
3798 return;
3799
3800 list_for_each_entry_continue(next, head, set_list)
3801 if (ses->entry_cnt <= next->entry_cnt)
3802 break;
3803
3804 list_move_tail(&ses->set_list, &next->set_list);
3805}
3806
3807static void add_sit_entry(unsigned int segno, struct list_head *head)
3808{
3809 struct sit_entry_set *ses;
3810 unsigned int start_segno = START_SEGNO(segno);
3811
3812 list_for_each_entry(ses, head, set_list) {
3813 if (ses->start_segno == start_segno) {
3814 ses->entry_cnt++;
3815 adjust_sit_entry_set(ses, head);
3816 return;
3817 }
3818 }
3819
3820 ses = grab_sit_entry_set();
3821
3822 ses->start_segno = start_segno;
3823 ses->entry_cnt++;
3824 list_add(&ses->set_list, head);
3825}
3826
3827static void add_sits_in_set(struct f2fs_sb_info *sbi)
3828{
3829 struct f2fs_sm_info *sm_info = SM_I(sbi);
3830 struct list_head *set_list = &sm_info->sit_entry_set;
3831 unsigned long *bitmap = SIT_I(sbi)->dirty_sentries_bitmap;
184a5cd2
CY
3832 unsigned int segno;
3833
7cd8558b 3834 for_each_set_bit(segno, bitmap, MAIN_SEGS(sbi))
184a5cd2
CY
3835 add_sit_entry(segno, set_list);
3836}
3837
3838static void remove_sits_in_journal(struct f2fs_sb_info *sbi)
351df4b2
JK
3839{
3840 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
b7ad7512 3841 struct f2fs_journal *journal = curseg->journal;
351df4b2
JK
3842 int i;
3843
b7ad7512 3844 down_write(&curseg->journal_rwsem);
dfc08a12 3845 for (i = 0; i < sits_in_cursum(journal); i++) {
184a5cd2
CY
3846 unsigned int segno;
3847 bool dirtied;
3848
dfc08a12 3849 segno = le32_to_cpu(segno_in_journal(journal, i));
184a5cd2
CY
3850 dirtied = __mark_sit_entry_dirty(sbi, segno);
3851
3852 if (!dirtied)
3853 add_sit_entry(segno, &SM_I(sbi)->sit_entry_set);
351df4b2 3854 }
dfc08a12 3855 update_sits_in_cursum(journal, -i);
b7ad7512 3856 up_write(&curseg->journal_rwsem);
351df4b2
JK
3857}
3858
0a8165d7 3859/*
351df4b2
JK
3860 * CP calls this function, which flushes SIT entries including sit_journal,
3861 * and moves prefree segs to free segs.
3862 */
4d57b86d 3863void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
351df4b2
JK
3864{
3865 struct sit_info *sit_i = SIT_I(sbi);
3866 unsigned long *bitmap = sit_i->dirty_sentries_bitmap;
3867 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
b7ad7512 3868 struct f2fs_journal *journal = curseg->journal;
184a5cd2
CY
3869 struct sit_entry_set *ses, *tmp;
3870 struct list_head *head = &SM_I(sbi)->sit_entry_set;
04f0b2ea 3871 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
4b2fecc8 3872 struct seg_entry *se;
351df4b2 3873
3d26fa6b 3874 down_write(&sit_i->sentry_lock);
351df4b2 3875
2b11a74b
WL
3876 if (!sit_i->dirty_sentries)
3877 goto out;
3878
351df4b2 3879 /*
184a5cd2
CY
3880 * add and account sit entries of dirty bitmap in sit entry
3881 * set temporarily
351df4b2 3882 */
184a5cd2 3883 add_sits_in_set(sbi);
351df4b2 3884
184a5cd2
CY
3885 /*
3886 * if there are no enough space in journal to store dirty sit
3887 * entries, remove all entries from journal and add and account
3888 * them in sit entry set.
3889 */
04f0b2ea
QS
3890 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
3891 !to_journal)
184a5cd2 3892 remove_sits_in_journal(sbi);
b2955550 3893
184a5cd2
CY
3894 /*
3895 * there are two steps to flush sit entries:
3896 * #1, flush sit entries to journal in current cold data summary block.
3897 * #2, flush sit entries to sit page.
3898 */
3899 list_for_each_entry_safe(ses, tmp, head, set_list) {
4a257ed6 3900 struct page *page = NULL;
184a5cd2
CY
3901 struct f2fs_sit_block *raw_sit = NULL;
3902 unsigned int start_segno = ses->start_segno;
3903 unsigned int end = min(start_segno + SIT_ENTRY_PER_BLOCK,
7cd8558b 3904 (unsigned long)MAIN_SEGS(sbi));
184a5cd2
CY
3905 unsigned int segno = start_segno;
3906
3907 if (to_journal &&
dfc08a12 3908 !__has_cursum_space(journal, ses->entry_cnt, SIT_JOURNAL))
184a5cd2
CY
3909 to_journal = false;
3910
b7ad7512
CY
3911 if (to_journal) {
3912 down_write(&curseg->journal_rwsem);
3913 } else {
184a5cd2
CY
3914 page = get_next_sit_page(sbi, start_segno);
3915 raw_sit = page_address(page);
351df4b2 3916 }
351df4b2 3917
184a5cd2
CY
3918 /* flush dirty sit entries in region of current sit set */
3919 for_each_set_bit_from(segno, bitmap, end) {
3920 int offset, sit_offset;
4b2fecc8
JK
3921
3922 se = get_seg_entry(sbi, segno);
56b07e7e
ZZ
3923#ifdef CONFIG_F2FS_CHECK_FS
3924 if (memcmp(se->cur_valid_map, se->cur_valid_map_mir,
3925 SIT_VBLOCK_MAP_SIZE))
3926 f2fs_bug_on(sbi, 1);
3927#endif
184a5cd2
CY
3928
3929 /* add discard candidates */
c473f1a9 3930 if (!(cpc->reason & CP_DISCARD)) {
4b2fecc8 3931 cpc->trim_start = segno;
25290fa5 3932 add_discard_addrs(sbi, cpc, false);
4b2fecc8 3933 }
184a5cd2
CY
3934
3935 if (to_journal) {
4d57b86d 3936 offset = f2fs_lookup_journal_in_cursum(journal,
184a5cd2
CY
3937 SIT_JOURNAL, segno, 1);
3938 f2fs_bug_on(sbi, offset < 0);
dfc08a12 3939 segno_in_journal(journal, offset) =
184a5cd2
CY
3940 cpu_to_le32(segno);
3941 seg_info_to_raw_sit(se,
dfc08a12 3942 &sit_in_journal(journal, offset));
56b07e7e
ZZ
3943 check_block_count(sbi, segno,
3944 &sit_in_journal(journal, offset));
184a5cd2
CY
3945 } else {
3946 sit_offset = SIT_ENTRY_OFFSET(sit_i, segno);
3947 seg_info_to_raw_sit(se,
3948 &raw_sit->entries[sit_offset]);
56b07e7e
ZZ
3949 check_block_count(sbi, segno,
3950 &raw_sit->entries[sit_offset]);
184a5cd2 3951 }
351df4b2 3952
184a5cd2
CY
3953 __clear_bit(segno, bitmap);
3954 sit_i->dirty_sentries--;
3955 ses->entry_cnt--;
351df4b2
JK
3956 }
3957
b7ad7512
CY
3958 if (to_journal)
3959 up_write(&curseg->journal_rwsem);
3960 else
184a5cd2
CY
3961 f2fs_put_page(page, 1);
3962
3963 f2fs_bug_on(sbi, ses->entry_cnt);
3964 release_sit_entry_set(ses);
351df4b2 3965 }
184a5cd2
CY
3966
3967 f2fs_bug_on(sbi, !list_empty(head));
3968 f2fs_bug_on(sbi, sit_i->dirty_sentries);
184a5cd2 3969out:
c473f1a9 3970 if (cpc->reason & CP_DISCARD) {
650d3c4e
YH
3971 __u64 trim_start = cpc->trim_start;
3972
4b2fecc8 3973 for (; cpc->trim_start <= cpc->trim_end; cpc->trim_start++)
25290fa5 3974 add_discard_addrs(sbi, cpc, false);
650d3c4e
YH
3975
3976 cpc->trim_start = trim_start;
4b2fecc8 3977 }
3d26fa6b 3978 up_write(&sit_i->sentry_lock);
351df4b2 3979
351df4b2
JK
3980 set_prefree_as_free_segments(sbi);
3981}
3982
3983static int build_sit_info(struct f2fs_sb_info *sbi)
3984{
3985 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
351df4b2
JK
3986 struct sit_info *sit_i;
3987 unsigned int sit_segs, start;
2fde3dd1 3988 char *src_bitmap, *bitmap;
bbf9f7d9 3989 unsigned int bitmap_size, main_bitmap_size, sit_bitmap_size;
351df4b2
JK
3990
3991 /* allocate memory for SIT information */
acbf054d 3992 sit_i = f2fs_kzalloc(sbi, sizeof(struct sit_info), GFP_KERNEL);
351df4b2
JK
3993 if (!sit_i)
3994 return -ENOMEM;
3995
3996 SM_I(sbi)->sit_info = sit_i;
3997
9d2a789c
KC
3998 sit_i->sentries =
3999 f2fs_kvzalloc(sbi, array_size(sizeof(struct seg_entry),
4000 MAIN_SEGS(sbi)),
4001 GFP_KERNEL);
351df4b2
JK
4002 if (!sit_i->sentries)
4003 return -ENOMEM;
4004
bbf9f7d9
ST
4005 main_bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
4006 sit_i->dirty_sentries_bitmap = f2fs_kvzalloc(sbi, main_bitmap_size,
628b3d14 4007 GFP_KERNEL);
351df4b2
JK
4008 if (!sit_i->dirty_sentries_bitmap)
4009 return -ENOMEM;
4010
2fde3dd1
CY
4011#ifdef CONFIG_F2FS_CHECK_FS
4012 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 4;
4013#else
4014 bitmap_size = MAIN_SEGS(sbi) * SIT_VBLOCK_MAP_SIZE * 3;
4015#endif
4016 sit_i->bitmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
4017 if (!sit_i->bitmap)
4018 return -ENOMEM;
4019
4020 bitmap = sit_i->bitmap;
4021
7cd8558b 4022 for (start = 0; start < MAIN_SEGS(sbi); start++) {
2fde3dd1
CY
4023 sit_i->sentries[start].cur_valid_map = bitmap;
4024 bitmap += SIT_VBLOCK_MAP_SIZE;
4025
4026 sit_i->sentries[start].ckpt_valid_map = bitmap;
4027 bitmap += SIT_VBLOCK_MAP_SIZE;
3e025740 4028
355e7891 4029#ifdef CONFIG_F2FS_CHECK_FS
2fde3dd1
CY
4030 sit_i->sentries[start].cur_valid_map_mir = bitmap;
4031 bitmap += SIT_VBLOCK_MAP_SIZE;
355e7891
CY
4032#endif
4033
2fde3dd1
CY
4034 sit_i->sentries[start].discard_map = bitmap;
4035 bitmap += SIT_VBLOCK_MAP_SIZE;
351df4b2
JK
4036 }
4037
acbf054d 4038 sit_i->tmp_map = f2fs_kzalloc(sbi, SIT_VBLOCK_MAP_SIZE, GFP_KERNEL);
60a3b782
JK
4039 if (!sit_i->tmp_map)
4040 return -ENOMEM;
4041
2c70c5e3 4042 if (__is_large_section(sbi)) {
9d2a789c
KC
4043 sit_i->sec_entries =
4044 f2fs_kvzalloc(sbi, array_size(sizeof(struct sec_entry),
4045 MAIN_SECS(sbi)),
4046 GFP_KERNEL);
351df4b2
JK
4047 if (!sit_i->sec_entries)
4048 return -ENOMEM;
4049 }
4050
4051 /* get information related with SIT */
4052 sit_segs = le32_to_cpu(raw_super->segment_count_sit) >> 1;
4053
4054 /* setup SIT bitmap from ckeckpoint pack */
bbf9f7d9 4055 sit_bitmap_size = __bitmap_size(sbi, SIT_BITMAP);
351df4b2
JK
4056 src_bitmap = __bitmap_ptr(sbi, SIT_BITMAP);
4057
bbf9f7d9 4058 sit_i->sit_bitmap = kmemdup(src_bitmap, sit_bitmap_size, GFP_KERNEL);
ae27d62e 4059 if (!sit_i->sit_bitmap)
351df4b2 4060 return -ENOMEM;
351df4b2 4061
ae27d62e 4062#ifdef CONFIG_F2FS_CHECK_FS
bbf9f7d9
ST
4063 sit_i->sit_bitmap_mir = kmemdup(src_bitmap,
4064 sit_bitmap_size, GFP_KERNEL);
ae27d62e
CY
4065 if (!sit_i->sit_bitmap_mir)
4066 return -ENOMEM;
bbf9f7d9
ST
4067
4068 sit_i->invalid_segmap = f2fs_kvzalloc(sbi,
4069 main_bitmap_size, GFP_KERNEL);
4070 if (!sit_i->invalid_segmap)
4071 return -ENOMEM;
ae27d62e
CY
4072#endif
4073
351df4b2
JK
4074 /* init SIT information */
4075 sit_i->s_ops = &default_salloc_ops;
4076
4077 sit_i->sit_base_addr = le32_to_cpu(raw_super->sit_blkaddr);
4078 sit_i->sit_blocks = sit_segs << sbi->log_blocks_per_seg;
c79b7ff1 4079 sit_i->written_valid_blocks = 0;
bbf9f7d9 4080 sit_i->bitmap_size = sit_bitmap_size;
351df4b2
JK
4081 sit_i->dirty_sentries = 0;
4082 sit_i->sents_per_block = SIT_ENTRY_PER_BLOCK;
4083 sit_i->elapsed_time = le64_to_cpu(sbi->ckpt->elapsed_time);
a7e679b5 4084 sit_i->mounted_time = ktime_get_boottime_seconds();
3d26fa6b 4085 init_rwsem(&sit_i->sentry_lock);
351df4b2
JK
4086 return 0;
4087}
4088
4089static int build_free_segmap(struct f2fs_sb_info *sbi)
4090{
351df4b2
JK
4091 struct free_segmap_info *free_i;
4092 unsigned int bitmap_size, sec_bitmap_size;
4093
4094 /* allocate memory for free segmap information */
acbf054d 4095 free_i = f2fs_kzalloc(sbi, sizeof(struct free_segmap_info), GFP_KERNEL);
351df4b2
JK
4096 if (!free_i)
4097 return -ENOMEM;
4098
4099 SM_I(sbi)->free_info = free_i;
4100
7cd8558b 4101 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
628b3d14 4102 free_i->free_segmap = f2fs_kvmalloc(sbi, bitmap_size, GFP_KERNEL);
351df4b2
JK
4103 if (!free_i->free_segmap)
4104 return -ENOMEM;
4105
7cd8558b 4106 sec_bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
628b3d14 4107 free_i->free_secmap = f2fs_kvmalloc(sbi, sec_bitmap_size, GFP_KERNEL);
351df4b2
JK
4108 if (!free_i->free_secmap)
4109 return -ENOMEM;
4110
4111 /* set all segments as dirty temporarily */
4112 memset(free_i->free_segmap, 0xff, bitmap_size);
4113 memset(free_i->free_secmap, 0xff, sec_bitmap_size);
4114
4115 /* init free segmap information */
7cd8558b 4116 free_i->start_segno = GET_SEGNO_FROM_SEG0(sbi, MAIN_BLKADDR(sbi));
351df4b2
JK
4117 free_i->free_segments = 0;
4118 free_i->free_sections = 0;
1a118ccf 4119 spin_lock_init(&free_i->segmap_lock);
351df4b2
JK
4120 return 0;
4121}
4122
4123static int build_curseg(struct f2fs_sb_info *sbi)
4124{
1042d60f 4125 struct curseg_info *array;
351df4b2
JK
4126 int i;
4127
026f0507
KC
4128 array = f2fs_kzalloc(sbi, array_size(NR_CURSEG_TYPE, sizeof(*array)),
4129 GFP_KERNEL);
351df4b2
JK
4130 if (!array)
4131 return -ENOMEM;
4132
4133 SM_I(sbi)->curseg_array = array;
4134
4135 for (i = 0; i < NR_CURSEG_TYPE; i++) {
4136 mutex_init(&array[i].curseg_mutex);
acbf054d 4137 array[i].sum_blk = f2fs_kzalloc(sbi, PAGE_SIZE, GFP_KERNEL);
351df4b2
JK
4138 if (!array[i].sum_blk)
4139 return -ENOMEM;
b7ad7512 4140 init_rwsem(&array[i].journal_rwsem);
acbf054d
CY
4141 array[i].journal = f2fs_kzalloc(sbi,
4142 sizeof(struct f2fs_journal), GFP_KERNEL);
b7ad7512
CY
4143 if (!array[i].journal)
4144 return -ENOMEM;
351df4b2
JK
4145 array[i].segno = NULL_SEGNO;
4146 array[i].next_blkoff = 0;
4147 }
4148 return restore_curseg_summaries(sbi);
4149}
4150
c39a1b34 4151static int build_sit_entries(struct f2fs_sb_info *sbi)
351df4b2
JK
4152{
4153 struct sit_info *sit_i = SIT_I(sbi);
4154 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
b7ad7512 4155 struct f2fs_journal *journal = curseg->journal;
9c094040
YH
4156 struct seg_entry *se;
4157 struct f2fs_sit_entry sit;
74de593a
CY
4158 int sit_blk_cnt = SIT_BLK_CNT(sbi);
4159 unsigned int i, start, end;
4160 unsigned int readed, start_blk = 0;
c39a1b34 4161 int err = 0;
8a29c126 4162 block_t total_node_blocks = 0;
351df4b2 4163
74de593a 4164 do {
4d57b86d 4165 readed = f2fs_ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
664ba972 4166 META_SIT, true);
74de593a
CY
4167
4168 start = start_blk * sit_i->sents_per_block;
4169 end = (start_blk + readed) * sit_i->sents_per_block;
4170
7cd8558b 4171 for (; start < end && start < MAIN_SEGS(sbi); start++) {
74de593a 4172 struct f2fs_sit_block *sit_blk;
74de593a
CY
4173 struct page *page;
4174
9c094040 4175 se = &sit_i->sentries[start];
74de593a 4176 page = get_current_sit_page(sbi, start);
edc55aaf
JK
4177 if (IS_ERR(page))
4178 return PTR_ERR(page);
74de593a
CY
4179 sit_blk = (struct f2fs_sit_block *)page_address(page);
4180 sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
4181 f2fs_put_page(page, 1);
d600af23 4182
c39a1b34
JK
4183 err = check_block_count(sbi, start, &sit);
4184 if (err)
4185 return err;
74de593a 4186 seg_info_from_raw_sit(se, &sit);
8a29c126
JK
4187 if (IS_NODESEG(se->type))
4188 total_node_blocks += se->valid_blocks;
a66cdd98
JK
4189
4190 /* build discard map only one time */
7d20c8ab
CY
4191 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4192 memset(se->discard_map, 0xff,
4193 SIT_VBLOCK_MAP_SIZE);
4194 } else {
4195 memcpy(se->discard_map,
4196 se->cur_valid_map,
4197 SIT_VBLOCK_MAP_SIZE);
4198 sbi->discard_blks +=
4199 sbi->blocks_per_seg -
4200 se->valid_blocks;
3e025740 4201 }
a66cdd98 4202
2c70c5e3 4203 if (__is_large_section(sbi))
d600af23
CY
4204 get_sec_entry(sbi, start)->valid_blocks +=
4205 se->valid_blocks;
351df4b2 4206 }
74de593a
CY
4207 start_blk += readed;
4208 } while (start_blk < sit_blk_cnt);
d600af23
CY
4209
4210 down_read(&curseg->journal_rwsem);
4211 for (i = 0; i < sits_in_cursum(journal); i++) {
d600af23
CY
4212 unsigned int old_valid_blocks;
4213
4214 start = le32_to_cpu(segno_in_journal(journal, i));
b2ca374f 4215 if (start >= MAIN_SEGS(sbi)) {
dcbb4c10
JP
4216 f2fs_err(sbi, "Wrong journal entry on segno %u",
4217 start);
10f966bb 4218 err = -EFSCORRUPTED;
b2ca374f
JK
4219 break;
4220 }
4221
d600af23
CY
4222 se = &sit_i->sentries[start];
4223 sit = sit_in_journal(journal, i);
4224
4225 old_valid_blocks = se->valid_blocks;
8a29c126
JK
4226 if (IS_NODESEG(se->type))
4227 total_node_blocks -= old_valid_blocks;
d600af23 4228
c39a1b34
JK
4229 err = check_block_count(sbi, start, &sit);
4230 if (err)
4231 break;
d600af23 4232 seg_info_from_raw_sit(se, &sit);
8a29c126
JK
4233 if (IS_NODESEG(se->type))
4234 total_node_blocks += se->valid_blocks;
d600af23 4235
7d20c8ab
CY
4236 if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
4237 memset(se->discard_map, 0xff, SIT_VBLOCK_MAP_SIZE);
4238 } else {
4239 memcpy(se->discard_map, se->cur_valid_map,
4240 SIT_VBLOCK_MAP_SIZE);
4241 sbi->discard_blks += old_valid_blocks;
4242 sbi->discard_blks -= se->valid_blocks;
d600af23
CY
4243 }
4244
2c70c5e3 4245 if (__is_large_section(sbi)) {
d600af23 4246 get_sec_entry(sbi, start)->valid_blocks +=
a9af3fdc
CY
4247 se->valid_blocks;
4248 get_sec_entry(sbi, start)->valid_blocks -=
4249 old_valid_blocks;
4250 }
d600af23
CY
4251 }
4252 up_read(&curseg->journal_rwsem);
8a29c126
JK
4253
4254 if (!err && total_node_blocks != valid_node_count(sbi)) {
dcbb4c10
JP
4255 f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4256 total_node_blocks, valid_node_count(sbi));
10f966bb 4257 err = -EFSCORRUPTED;
8a29c126
JK
4258 }
4259
c39a1b34 4260 return err;
351df4b2
JK
4261}
4262
4263static void init_free_segmap(struct f2fs_sb_info *sbi)
4264{
4265 unsigned int start;
4266 int type;
4267
7cd8558b 4268 for (start = 0; start < MAIN_SEGS(sbi); start++) {
351df4b2
JK
4269 struct seg_entry *sentry = get_seg_entry(sbi, start);
4270 if (!sentry->valid_blocks)
4271 __set_free(sbi, start);
c79b7ff1
JK
4272 else
4273 SIT_I(sbi)->written_valid_blocks +=
4274 sentry->valid_blocks;
351df4b2
JK
4275 }
4276
4277 /* set use the current segments */
4278 for (type = CURSEG_HOT_DATA; type <= CURSEG_COLD_NODE; type++) {
4279 struct curseg_info *curseg_t = CURSEG_I(sbi, type);
4280 __set_test_and_inuse(sbi, curseg_t->segno);
4281 }
4282}
4283
4284static void init_dirty_segmap(struct f2fs_sb_info *sbi)
4285{
4286 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4287 struct free_segmap_info *free_i = FREE_I(sbi);
7cd8558b 4288 unsigned int segno = 0, offset = 0;
351df4b2
JK
4289 unsigned short valid_blocks;
4290
8736fbf0 4291 while (1) {
351df4b2 4292 /* find dirty segment based on free segmap */
7cd8558b
JK
4293 segno = find_next_inuse(free_i, MAIN_SEGS(sbi), offset);
4294 if (segno >= MAIN_SEGS(sbi))
351df4b2
JK
4295 break;
4296 offset = segno + 1;
302bd348 4297 valid_blocks = get_valid_blocks(sbi, segno, false);
ec325b52 4298 if (valid_blocks == sbi->blocks_per_seg || !valid_blocks)
351df4b2 4299 continue;
ec325b52
JK
4300 if (valid_blocks > sbi->blocks_per_seg) {
4301 f2fs_bug_on(sbi, 1);
4302 continue;
4303 }
351df4b2
JK
4304 mutex_lock(&dirty_i->seglist_lock);
4305 __locate_dirty_segment(sbi, segno, DIRTY);
4306 mutex_unlock(&dirty_i->seglist_lock);
4307 }
4308}
4309
5ec4e49f 4310static int init_victim_secmap(struct f2fs_sb_info *sbi)
351df4b2
JK
4311{
4312 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
7cd8558b 4313 unsigned int bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi));
351df4b2 4314
628b3d14 4315 dirty_i->victim_secmap = f2fs_kvzalloc(sbi, bitmap_size, GFP_KERNEL);
5ec4e49f 4316 if (!dirty_i->victim_secmap)
351df4b2
JK
4317 return -ENOMEM;
4318 return 0;
4319}
4320
4321static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4322{
4323 struct dirty_seglist_info *dirty_i;
4324 unsigned int bitmap_size, i;
4325
4326 /* allocate memory for dirty segments list information */
acbf054d
CY
4327 dirty_i = f2fs_kzalloc(sbi, sizeof(struct dirty_seglist_info),
4328 GFP_KERNEL);
351df4b2
JK
4329 if (!dirty_i)
4330 return -ENOMEM;
4331
4332 SM_I(sbi)->dirty_info = dirty_i;
4333 mutex_init(&dirty_i->seglist_lock);
4334
7cd8558b 4335 bitmap_size = f2fs_bitmap_size(MAIN_SEGS(sbi));
351df4b2
JK
4336
4337 for (i = 0; i < NR_DIRTY_TYPE; i++) {
628b3d14
CY
4338 dirty_i->dirty_segmap[i] = f2fs_kvzalloc(sbi, bitmap_size,
4339 GFP_KERNEL);
351df4b2
JK
4340 if (!dirty_i->dirty_segmap[i])
4341 return -ENOMEM;
4342 }
4343
4344 init_dirty_segmap(sbi);
5ec4e49f 4345 return init_victim_secmap(sbi);
351df4b2
JK
4346}
4347
c854f4d6
CY
4348static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4349{
4350 int i;
4351
4352 /*
4353 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4354 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4355 */
4356 for (i = 0; i < NO_CHECK_TYPE; i++) {
4357 struct curseg_info *curseg = CURSEG_I(sbi, i);
4358 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4359 unsigned int blkofs = curseg->next_blkoff;
4360
4361 if (f2fs_test_bit(blkofs, se->cur_valid_map))
4362 goto out;
4363
4364 if (curseg->alloc_type == SSR)
4365 continue;
4366
4367 for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4368 if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4369 continue;
4370out:
dcbb4c10
JP
4371 f2fs_err(sbi,
4372 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4373 i, curseg->segno, curseg->alloc_type,
4374 curseg->next_blkoff, blkofs);
10f966bb 4375 return -EFSCORRUPTED;
c854f4d6
CY
4376 }
4377 }
4378 return 0;
4379}
4380
c426d991
SK
4381#ifdef CONFIG_BLK_DEV_ZONED
4382
d508c94e
SK
4383static int check_zone_write_pointer(struct f2fs_sb_info *sbi,
4384 struct f2fs_dev_info *fdev,
4385 struct blk_zone *zone)
4386{
4387 unsigned int wp_segno, wp_blkoff, zone_secno, zone_segno, segno;
4388 block_t zone_block, wp_block, last_valid_block;
4389 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4390 int i, s, b, ret;
4391 struct seg_entry *se;
4392
4393 if (zone->type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4394 return 0;
4395
4396 wp_block = fdev->start_blk + (zone->wp >> log_sectors_per_block);
4397 wp_segno = GET_SEGNO(sbi, wp_block);
4398 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4399 zone_block = fdev->start_blk + (zone->start >> log_sectors_per_block);
4400 zone_segno = GET_SEGNO(sbi, zone_block);
4401 zone_secno = GET_SEC_FROM_SEG(sbi, zone_segno);
4402
4403 if (zone_segno >= MAIN_SEGS(sbi))
4404 return 0;
4405
4406 /*
4407 * Skip check of zones cursegs point to, since
4408 * fix_curseg_write_pointer() checks them.
4409 */
4410 for (i = 0; i < NO_CHECK_TYPE; i++)
4411 if (zone_secno == GET_SEC_FROM_SEG(sbi,
4412 CURSEG_I(sbi, i)->segno))
4413 return 0;
4414
4415 /*
4416 * Get last valid block of the zone.
4417 */
4418 last_valid_block = zone_block - 1;
4419 for (s = sbi->segs_per_sec - 1; s >= 0; s--) {
4420 segno = zone_segno + s;
4421 se = get_seg_entry(sbi, segno);
4422 for (b = sbi->blocks_per_seg - 1; b >= 0; b--)
4423 if (f2fs_test_bit(b, se->cur_valid_map)) {
4424 last_valid_block = START_BLOCK(sbi, segno) + b;
4425 break;
4426 }
4427 if (last_valid_block >= zone_block)
4428 break;
4429 }
4430
4431 /*
4432 * If last valid block is beyond the write pointer, report the
4433 * inconsistency. This inconsistency does not cause write error
4434 * because the zone will not be selected for write operation until
4435 * it get discarded. Just report it.
4436 */
4437 if (last_valid_block >= wp_block) {
4438 f2fs_notice(sbi, "Valid block beyond write pointer: "
4439 "valid block[0x%x,0x%x] wp[0x%x,0x%x]",
4440 GET_SEGNO(sbi, last_valid_block),
4441 GET_BLKOFF_FROM_SEG0(sbi, last_valid_block),
4442 wp_segno, wp_blkoff);
4443 return 0;
4444 }
4445
4446 /*
4447 * If there is no valid block in the zone and if write pointer is
4448 * not at zone start, reset the write pointer.
4449 */
4450 if (last_valid_block + 1 == zone_block && zone->wp != zone->start) {
4451 f2fs_notice(sbi,
4452 "Zone without valid block has non-zero write "
4453 "pointer. Reset the write pointer: wp[0x%x,0x%x]",
4454 wp_segno, wp_blkoff);
4455 ret = __f2fs_issue_discard_zone(sbi, fdev->bdev, zone_block,
4456 zone->len >> log_sectors_per_block);
4457 if (ret) {
4458 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4459 fdev->path, ret);
4460 return ret;
4461 }
4462 }
4463
4464 return 0;
4465}
4466
c426d991
SK
4467static struct f2fs_dev_info *get_target_zoned_dev(struct f2fs_sb_info *sbi,
4468 block_t zone_blkaddr)
4469{
4470 int i;
4471
4472 for (i = 0; i < sbi->s_ndevs; i++) {
4473 if (!bdev_is_zoned(FDEV(i).bdev))
4474 continue;
4475 if (sbi->s_ndevs == 1 || (FDEV(i).start_blk <= zone_blkaddr &&
4476 zone_blkaddr <= FDEV(i).end_blk))
4477 return &FDEV(i);
4478 }
4479
4480 return NULL;
4481}
4482
4483static int report_one_zone_cb(struct blk_zone *zone, unsigned int idx,
4484 void *data) {
4485 memcpy(data, zone, sizeof(struct blk_zone));
4486 return 0;
4487}
4488
4489static int fix_curseg_write_pointer(struct f2fs_sb_info *sbi, int type)
4490{
4491 struct curseg_info *cs = CURSEG_I(sbi, type);
4492 struct f2fs_dev_info *zbd;
4493 struct blk_zone zone;
4494 unsigned int cs_section, wp_segno, wp_blkoff, wp_sector_off;
4495 block_t cs_zone_block, wp_block;
4496 unsigned int log_sectors_per_block = sbi->log_blocksize - SECTOR_SHIFT;
4497 sector_t zone_sector;
4498 int err;
4499
4500 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4501 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4502
4503 zbd = get_target_zoned_dev(sbi, cs_zone_block);
4504 if (!zbd)
4505 return 0;
4506
4507 /* report zone for the sector the curseg points to */
4508 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4509 << log_sectors_per_block;
4510 err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4511 report_one_zone_cb, &zone);
4512 if (err != 1) {
4513 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4514 zbd->path, err);
4515 return err;
4516 }
4517
4518 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4519 return 0;
4520
4521 wp_block = zbd->start_blk + (zone.wp >> log_sectors_per_block);
4522 wp_segno = GET_SEGNO(sbi, wp_block);
4523 wp_blkoff = wp_block - START_BLOCK(sbi, wp_segno);
4524 wp_sector_off = zone.wp & GENMASK(log_sectors_per_block - 1, 0);
4525
4526 if (cs->segno == wp_segno && cs->next_blkoff == wp_blkoff &&
4527 wp_sector_off == 0)
4528 return 0;
4529
4530 f2fs_notice(sbi, "Unaligned curseg[%d] with write pointer: "
4531 "curseg[0x%x,0x%x] wp[0x%x,0x%x]",
4532 type, cs->segno, cs->next_blkoff, wp_segno, wp_blkoff);
4533
4534 f2fs_notice(sbi, "Assign new section to curseg[%d]: "
4535 "curseg[0x%x,0x%x]", type, cs->segno, cs->next_blkoff);
4536 allocate_segment_by_default(sbi, type, true);
4537
d508c94e
SK
4538 /* check consistency of the zone curseg pointed to */
4539 if (check_zone_write_pointer(sbi, zbd, &zone))
4540 return -EIO;
4541
c426d991
SK
4542 /* check newly assigned zone */
4543 cs_section = GET_SEC_FROM_SEG(sbi, cs->segno);
4544 cs_zone_block = START_BLOCK(sbi, GET_SEG_FROM_SEC(sbi, cs_section));
4545
4546 zbd = get_target_zoned_dev(sbi, cs_zone_block);
4547 if (!zbd)
4548 return 0;
4549
4550 zone_sector = (sector_t)(cs_zone_block - zbd->start_blk)
4551 << log_sectors_per_block;
4552 err = blkdev_report_zones(zbd->bdev, zone_sector, 1,
4553 report_one_zone_cb, &zone);
4554 if (err != 1) {
4555 f2fs_err(sbi, "Report zone failed: %s errno=(%d)",
4556 zbd->path, err);
4557 return err;
4558 }
4559
4560 if (zone.type != BLK_ZONE_TYPE_SEQWRITE_REQ)
4561 return 0;
4562
4563 if (zone.wp != zone.start) {
4564 f2fs_notice(sbi,
4565 "New zone for curseg[%d] is not yet discarded. "
4566 "Reset the zone: curseg[0x%x,0x%x]",
4567 type, cs->segno, cs->next_blkoff);
4568 err = __f2fs_issue_discard_zone(sbi, zbd->bdev,
4569 zone_sector >> log_sectors_per_block,
4570 zone.len >> log_sectors_per_block);
4571 if (err) {
4572 f2fs_err(sbi, "Discard zone failed: %s (errno=%d)",
4573 zbd->path, err);
4574 return err;
4575 }
4576 }
4577
4578 return 0;
4579}
4580
4581int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
4582{
4583 int i, ret;
4584
4585 for (i = 0; i < NO_CHECK_TYPE; i++) {
4586 ret = fix_curseg_write_pointer(sbi, i);
4587 if (ret)
4588 return ret;
4589 }
4590
4591 return 0;
4592}
d508c94e
SK
4593
4594struct check_zone_write_pointer_args {
4595 struct f2fs_sb_info *sbi;
4596 struct f2fs_dev_info *fdev;
4597};
4598
4599static int check_zone_write_pointer_cb(struct blk_zone *zone, unsigned int idx,
4600 void *data) {
4601 struct check_zone_write_pointer_args *args;
4602 args = (struct check_zone_write_pointer_args *)data;
4603
4604 return check_zone_write_pointer(args->sbi, args->fdev, zone);
4605}
4606
4607int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
4608{
4609 int i, ret;
4610 struct check_zone_write_pointer_args args;
4611
4612 for (i = 0; i < sbi->s_ndevs; i++) {
4613 if (!bdev_is_zoned(FDEV(i).bdev))
4614 continue;
4615
4616 args.sbi = sbi;
4617 args.fdev = &FDEV(i);
4618 ret = blkdev_report_zones(FDEV(i).bdev, 0, BLK_ALL_ZONES,
4619 check_zone_write_pointer_cb, &args);
4620 if (ret < 0)
4621 return ret;
4622 }
4623
4624 return 0;
4625}
c426d991
SK
4626#else
4627int f2fs_fix_curseg_write_pointer(struct f2fs_sb_info *sbi)
4628{
4629 return 0;
4630}
d508c94e
SK
4631
4632int f2fs_check_write_pointer(struct f2fs_sb_info *sbi)
4633{
4634 return 0;
4635}
c426d991
SK
4636#endif
4637
0a8165d7 4638/*
351df4b2
JK
4639 * Update min, max modified time for cost-benefit GC algorithm
4640 */
4641static void init_min_max_mtime(struct f2fs_sb_info *sbi)
4642{
4643 struct sit_info *sit_i = SIT_I(sbi);
4644 unsigned int segno;
4645
3d26fa6b 4646 down_write(&sit_i->sentry_lock);
351df4b2 4647
5ad25442 4648 sit_i->min_mtime = ULLONG_MAX;
351df4b2 4649
7cd8558b 4650 for (segno = 0; segno < MAIN_SEGS(sbi); segno += sbi->segs_per_sec) {
351df4b2
JK
4651 unsigned int i;
4652 unsigned long long mtime = 0;
4653
4654 for (i = 0; i < sbi->segs_per_sec; i++)
4655 mtime += get_seg_entry(sbi, segno + i)->mtime;
4656
4657 mtime = div_u64(mtime, sbi->segs_per_sec);
4658
4659 if (sit_i->min_mtime > mtime)
4660 sit_i->min_mtime = mtime;
4661 }
a1f72ac2 4662 sit_i->max_mtime = get_mtime(sbi, false);
3d26fa6b 4663 up_write(&sit_i->sentry_lock);
351df4b2
JK
4664}
4665
4d57b86d 4666int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
351df4b2
JK
4667{
4668 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
4669 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
1042d60f 4670 struct f2fs_sm_info *sm_info;
351df4b2
JK
4671 int err;
4672
acbf054d 4673 sm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_sm_info), GFP_KERNEL);
351df4b2
JK
4674 if (!sm_info)
4675 return -ENOMEM;
4676
4677 /* init sm info */
4678 sbi->sm_info = sm_info;
351df4b2
JK
4679 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
4680 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
4681 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
4682 sm_info->reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
4683 sm_info->ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
4684 sm_info->main_segments = le32_to_cpu(raw_super->segment_count_main);
4685 sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
58c41035
JK
4686 sm_info->rec_prefree_segments = sm_info->main_segments *
4687 DEF_RECLAIM_PREFREE_SEGMENTS / 100;
44a83499
JK
4688 if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
4689 sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
4690
b0332a0f 4691 if (!f2fs_lfs_mode(sbi))
52763a4b 4692 sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
216fbd64 4693 sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
c1ce1b02 4694 sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
853137ce 4695 sm_info->min_seq_blocks = sbi->blocks_per_seg * sbi->segs_per_sec;
ef095d19 4696 sm_info->min_hot_blocks = DEF_MIN_HOT_BLOCKS;
a2a12b67 4697 sm_info->min_ssr_sections = reserved_sections(sbi);
351df4b2 4698
184a5cd2
CY
4699 INIT_LIST_HEAD(&sm_info->sit_entry_set);
4700
2b60311d
CY
4701 init_rwsem(&sm_info->curseg_lock);
4702
d4fdf8ba 4703 if (!f2fs_readonly(sbi->sb)) {
4d57b86d 4704 err = f2fs_create_flush_cmd_control(sbi);
2163d198 4705 if (err)
a688b9d9 4706 return err;
6b4afdd7
JK
4707 }
4708
0b54fb84
JK
4709 err = create_discard_cmd_control(sbi);
4710 if (err)
4711 return err;
4712
351df4b2
JK
4713 err = build_sit_info(sbi);
4714 if (err)
4715 return err;
4716 err = build_free_segmap(sbi);
4717 if (err)
4718 return err;
4719 err = build_curseg(sbi);
4720 if (err)
4721 return err;
4722
4723 /* reinit free segmap based on SIT */
c39a1b34
JK
4724 err = build_sit_entries(sbi);
4725 if (err)
4726 return err;
351df4b2
JK
4727
4728 init_free_segmap(sbi);
4729 err = build_dirty_segmap(sbi);
4730 if (err)
4731 return err;
4732
c854f4d6
CY
4733 err = sanity_check_curseg(sbi);
4734 if (err)
4735 return err;
4736
351df4b2
JK
4737 init_min_max_mtime(sbi);
4738 return 0;
4739}
4740
4741static void discard_dirty_segmap(struct f2fs_sb_info *sbi,
4742 enum dirty_type dirty_type)
4743{
4744 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4745
4746 mutex_lock(&dirty_i->seglist_lock);
39307a8e 4747 kvfree(dirty_i->dirty_segmap[dirty_type]);
351df4b2
JK
4748 dirty_i->nr_dirty[dirty_type] = 0;
4749 mutex_unlock(&dirty_i->seglist_lock);
4750}
4751
5ec4e49f 4752static void destroy_victim_secmap(struct f2fs_sb_info *sbi)
351df4b2
JK
4753{
4754 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
39307a8e 4755 kvfree(dirty_i->victim_secmap);
351df4b2
JK
4756}
4757
4758static void destroy_dirty_segmap(struct f2fs_sb_info *sbi)
4759{
4760 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
4761 int i;
4762
4763 if (!dirty_i)
4764 return;
4765
4766 /* discard pre-free/dirty segments list */
4767 for (i = 0; i < NR_DIRTY_TYPE; i++)
4768 discard_dirty_segmap(sbi, i);
4769
5ec4e49f 4770 destroy_victim_secmap(sbi);
351df4b2 4771 SM_I(sbi)->dirty_info = NULL;
5222595d 4772 kvfree(dirty_i);
351df4b2
JK
4773}
4774
4775static void destroy_curseg(struct f2fs_sb_info *sbi)
4776{
4777 struct curseg_info *array = SM_I(sbi)->curseg_array;
4778 int i;
4779
4780 if (!array)
4781 return;
4782 SM_I(sbi)->curseg_array = NULL;
b7ad7512 4783 for (i = 0; i < NR_CURSEG_TYPE; i++) {
5222595d
JK
4784 kvfree(array[i].sum_blk);
4785 kvfree(array[i].journal);
b7ad7512 4786 }
5222595d 4787 kvfree(array);
351df4b2
JK
4788}
4789
4790static void destroy_free_segmap(struct f2fs_sb_info *sbi)
4791{
4792 struct free_segmap_info *free_i = SM_I(sbi)->free_info;
4793 if (!free_i)
4794 return;
4795 SM_I(sbi)->free_info = NULL;
39307a8e
JK
4796 kvfree(free_i->free_segmap);
4797 kvfree(free_i->free_secmap);
5222595d 4798 kvfree(free_i);
351df4b2
JK
4799}
4800
4801static void destroy_sit_info(struct f2fs_sb_info *sbi)
4802{
4803 struct sit_info *sit_i = SIT_I(sbi);
351df4b2
JK
4804
4805 if (!sit_i)
4806 return;
4807
2fde3dd1
CY
4808 if (sit_i->sentries)
4809 kvfree(sit_i->bitmap);
5222595d 4810 kvfree(sit_i->tmp_map);
60a3b782 4811
39307a8e
JK
4812 kvfree(sit_i->sentries);
4813 kvfree(sit_i->sec_entries);
4814 kvfree(sit_i->dirty_sentries_bitmap);
351df4b2
JK
4815
4816 SM_I(sbi)->sit_info = NULL;
5222595d 4817 kvfree(sit_i->sit_bitmap);
ae27d62e 4818#ifdef CONFIG_F2FS_CHECK_FS
5222595d 4819 kvfree(sit_i->sit_bitmap_mir);
bbf9f7d9 4820 kvfree(sit_i->invalid_segmap);
ae27d62e 4821#endif
5222595d 4822 kvfree(sit_i);
351df4b2
JK
4823}
4824
4d57b86d 4825void f2fs_destroy_segment_manager(struct f2fs_sb_info *sbi)
351df4b2
JK
4826{
4827 struct f2fs_sm_info *sm_info = SM_I(sbi);
a688b9d9 4828
3b03f724
CY
4829 if (!sm_info)
4830 return;
4d57b86d 4831 f2fs_destroy_flush_cmd_control(sbi, true);
f099405f 4832 destroy_discard_cmd_control(sbi);
351df4b2
JK
4833 destroy_dirty_segmap(sbi);
4834 destroy_curseg(sbi);
4835 destroy_free_segmap(sbi);
4836 destroy_sit_info(sbi);
4837 sbi->sm_info = NULL;
5222595d 4838 kvfree(sm_info);
351df4b2 4839}
7fd9e544 4840
4d57b86d 4841int __init f2fs_create_segment_manager_caches(void)
7fd9e544 4842{
98510003 4843 discard_entry_slab = f2fs_kmem_cache_create("f2fs_discard_entry",
e8512d2e 4844 sizeof(struct discard_entry));
7fd9e544 4845 if (!discard_entry_slab)
184a5cd2
CY
4846 goto fail;
4847
98510003 4848 discard_cmd_slab = f2fs_kmem_cache_create("f2fs_discard_cmd",
b01a9201
JK
4849 sizeof(struct discard_cmd));
4850 if (!discard_cmd_slab)
6ab2a308 4851 goto destroy_discard_entry;
275b66b0 4852
98510003 4853 sit_entry_set_slab = f2fs_kmem_cache_create("f2fs_sit_entry_set",
c9ee0085 4854 sizeof(struct sit_entry_set));
184a5cd2 4855 if (!sit_entry_set_slab)
b01a9201 4856 goto destroy_discard_cmd;
88b88a66 4857
98510003 4858 inmem_entry_slab = f2fs_kmem_cache_create("f2fs_inmem_page_entry",
88b88a66
JK
4859 sizeof(struct inmem_pages));
4860 if (!inmem_entry_slab)
4861 goto destroy_sit_entry_set;
7fd9e544 4862 return 0;
184a5cd2 4863
88b88a66
JK
4864destroy_sit_entry_set:
4865 kmem_cache_destroy(sit_entry_set_slab);
b01a9201
JK
4866destroy_discard_cmd:
4867 kmem_cache_destroy(discard_cmd_slab);
6ab2a308 4868destroy_discard_entry:
184a5cd2
CY
4869 kmem_cache_destroy(discard_entry_slab);
4870fail:
4871 return -ENOMEM;
7fd9e544
JK
4872}
4873
4d57b86d 4874void f2fs_destroy_segment_manager_caches(void)
7fd9e544 4875{
184a5cd2 4876 kmem_cache_destroy(sit_entry_set_slab);
b01a9201 4877 kmem_cache_destroy(discard_cmd_slab);
7fd9e544 4878 kmem_cache_destroy(discard_entry_slab);
88b88a66 4879 kmem_cache_destroy(inmem_entry_slab);
7fd9e544 4880}