]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/f2fs/node.c
f2fs: fix missing up_read
[mirror_ubuntu-jammy-kernel.git] / fs / f2fs / node.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * fs/f2fs/node.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 */
8 #include <linux/fs.h>
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/backing-dev.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
15
16 #include "f2fs.h"
17 #include "node.h"
18 #include "segment.h"
19 #include "xattr.h"
20 #include "trace.h"
21 #include <trace/events/f2fs.h>
22
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
24
25 static struct kmem_cache *nat_entry_slab;
26 static struct kmem_cache *free_nid_slab;
27 static struct kmem_cache *nat_entry_set_slab;
28 static struct kmem_cache *fsync_node_entry_slab;
29
30 /*
31 * Check whether the given nid is within node id range.
32 */
33 int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34 {
35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36 set_sbi_flag(sbi, SBI_NEED_FSCK);
37 f2fs_msg(sbi->sb, KERN_WARNING,
38 "%s: out-of-range nid=%x, run fsck to fix.",
39 __func__, nid);
40 return -EINVAL;
41 }
42 return 0;
43 }
44
45 bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type)
46 {
47 struct f2fs_nm_info *nm_i = NM_I(sbi);
48 struct sysinfo val;
49 unsigned long avail_ram;
50 unsigned long mem_size = 0;
51 bool res = false;
52
53 si_meminfo(&val);
54
55 /* only uses low memory */
56 avail_ram = val.totalram - val.totalhigh;
57
58 /*
59 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
60 */
61 if (type == FREE_NIDS) {
62 mem_size = (nm_i->nid_cnt[FREE_NID] *
63 sizeof(struct free_nid)) >> PAGE_SHIFT;
64 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
65 } else if (type == NAT_ENTRIES) {
66 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
67 PAGE_SHIFT;
68 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
69 if (excess_cached_nats(sbi))
70 res = false;
71 } else if (type == DIRTY_DENTS) {
72 if (sbi->sb->s_bdi->wb.dirty_exceeded)
73 return false;
74 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
75 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
76 } else if (type == INO_ENTRIES) {
77 int i;
78
79 for (i = 0; i < MAX_INO_ENTRY; i++)
80 mem_size += sbi->im[i].ino_num *
81 sizeof(struct ino_entry);
82 mem_size >>= PAGE_SHIFT;
83 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
84 } else if (type == EXTENT_CACHE) {
85 mem_size = (atomic_read(&sbi->total_ext_tree) *
86 sizeof(struct extent_tree) +
87 atomic_read(&sbi->total_ext_node) *
88 sizeof(struct extent_node)) >> PAGE_SHIFT;
89 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
90 } else if (type == INMEM_PAGES) {
91 /* it allows 20% / total_ram for inmemory pages */
92 mem_size = get_pages(sbi, F2FS_INMEM_PAGES);
93 res = mem_size < (val.totalram / 5);
94 } else {
95 if (!sbi->sb->s_bdi->wb.dirty_exceeded)
96 return true;
97 }
98 return res;
99 }
100
101 static void clear_node_page_dirty(struct page *page)
102 {
103 if (PageDirty(page)) {
104 f2fs_clear_radix_tree_dirty_tag(page);
105 clear_page_dirty_for_io(page);
106 dec_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
107 }
108 ClearPageUptodate(page);
109 }
110
111 static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
112 {
113 return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
114 }
115
116 static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
117 {
118 struct page *src_page;
119 struct page *dst_page;
120 pgoff_t dst_off;
121 void *src_addr;
122 void *dst_addr;
123 struct f2fs_nm_info *nm_i = NM_I(sbi);
124
125 dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
126
127 /* get current nat block page with lock */
128 src_page = get_current_nat_page(sbi, nid);
129 if (IS_ERR(src_page))
130 return src_page;
131 dst_page = f2fs_grab_meta_page(sbi, dst_off);
132 f2fs_bug_on(sbi, PageDirty(src_page));
133
134 src_addr = page_address(src_page);
135 dst_addr = page_address(dst_page);
136 memcpy(dst_addr, src_addr, PAGE_SIZE);
137 set_page_dirty(dst_page);
138 f2fs_put_page(src_page, 1);
139
140 set_to_next_nat(nm_i, nid);
141
142 return dst_page;
143 }
144
145 static struct nat_entry *__alloc_nat_entry(nid_t nid, bool no_fail)
146 {
147 struct nat_entry *new;
148
149 if (no_fail)
150 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
151 else
152 new = kmem_cache_alloc(nat_entry_slab, GFP_F2FS_ZERO);
153 if (new) {
154 nat_set_nid(new, nid);
155 nat_reset_flag(new);
156 }
157 return new;
158 }
159
160 static void __free_nat_entry(struct nat_entry *e)
161 {
162 kmem_cache_free(nat_entry_slab, e);
163 }
164
165 /* must be locked by nat_tree_lock */
166 static struct nat_entry *__init_nat_entry(struct f2fs_nm_info *nm_i,
167 struct nat_entry *ne, struct f2fs_nat_entry *raw_ne, bool no_fail)
168 {
169 if (no_fail)
170 f2fs_radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne);
171 else if (radix_tree_insert(&nm_i->nat_root, nat_get_nid(ne), ne))
172 return NULL;
173
174 if (raw_ne)
175 node_info_from_raw_nat(&ne->ni, raw_ne);
176
177 spin_lock(&nm_i->nat_list_lock);
178 list_add_tail(&ne->list, &nm_i->nat_entries);
179 spin_unlock(&nm_i->nat_list_lock);
180
181 nm_i->nat_cnt++;
182 return ne;
183 }
184
185 static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
186 {
187 struct nat_entry *ne;
188
189 ne = radix_tree_lookup(&nm_i->nat_root, n);
190
191 /* for recent accessed nat entry, move it to tail of lru list */
192 if (ne && !get_nat_flag(ne, IS_DIRTY)) {
193 spin_lock(&nm_i->nat_list_lock);
194 if (!list_empty(&ne->list))
195 list_move_tail(&ne->list, &nm_i->nat_entries);
196 spin_unlock(&nm_i->nat_list_lock);
197 }
198
199 return ne;
200 }
201
202 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
203 nid_t start, unsigned int nr, struct nat_entry **ep)
204 {
205 return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr);
206 }
207
208 static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
209 {
210 radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
211 nm_i->nat_cnt--;
212 __free_nat_entry(e);
213 }
214
215 static struct nat_entry_set *__grab_nat_entry_set(struct f2fs_nm_info *nm_i,
216 struct nat_entry *ne)
217 {
218 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
219 struct nat_entry_set *head;
220
221 head = radix_tree_lookup(&nm_i->nat_set_root, set);
222 if (!head) {
223 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS);
224
225 INIT_LIST_HEAD(&head->entry_list);
226 INIT_LIST_HEAD(&head->set_list);
227 head->set = set;
228 head->entry_cnt = 0;
229 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
230 }
231 return head;
232 }
233
234 static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
235 struct nat_entry *ne)
236 {
237 struct nat_entry_set *head;
238 bool new_ne = nat_get_blkaddr(ne) == NEW_ADDR;
239
240 if (!new_ne)
241 head = __grab_nat_entry_set(nm_i, ne);
242
243 /*
244 * update entry_cnt in below condition:
245 * 1. update NEW_ADDR to valid block address;
246 * 2. update old block address to new one;
247 */
248 if (!new_ne && (get_nat_flag(ne, IS_PREALLOC) ||
249 !get_nat_flag(ne, IS_DIRTY)))
250 head->entry_cnt++;
251
252 set_nat_flag(ne, IS_PREALLOC, new_ne);
253
254 if (get_nat_flag(ne, IS_DIRTY))
255 goto refresh_list;
256
257 nm_i->dirty_nat_cnt++;
258 set_nat_flag(ne, IS_DIRTY, true);
259 refresh_list:
260 spin_lock(&nm_i->nat_list_lock);
261 if (new_ne)
262 list_del_init(&ne->list);
263 else
264 list_move_tail(&ne->list, &head->entry_list);
265 spin_unlock(&nm_i->nat_list_lock);
266 }
267
268 static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
269 struct nat_entry_set *set, struct nat_entry *ne)
270 {
271 spin_lock(&nm_i->nat_list_lock);
272 list_move_tail(&ne->list, &nm_i->nat_entries);
273 spin_unlock(&nm_i->nat_list_lock);
274
275 set_nat_flag(ne, IS_DIRTY, false);
276 set->entry_cnt--;
277 nm_i->dirty_nat_cnt--;
278 }
279
280 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info *nm_i,
281 nid_t start, unsigned int nr, struct nat_entry_set **ep)
282 {
283 return radix_tree_gang_lookup(&nm_i->nat_set_root, (void **)ep,
284 start, nr);
285 }
286
287 bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
288 {
289 return NODE_MAPPING(sbi) == page->mapping &&
290 IS_DNODE(page) && is_cold_node(page);
291 }
292
293 void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
294 {
295 spin_lock_init(&sbi->fsync_node_lock);
296 INIT_LIST_HEAD(&sbi->fsync_node_list);
297 sbi->fsync_seg_id = 0;
298 sbi->fsync_node_num = 0;
299 }
300
301 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
302 struct page *page)
303 {
304 struct fsync_node_entry *fn;
305 unsigned long flags;
306 unsigned int seq_id;
307
308 fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
309
310 get_page(page);
311 fn->page = page;
312 INIT_LIST_HEAD(&fn->list);
313
314 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
315 list_add_tail(&fn->list, &sbi->fsync_node_list);
316 fn->seq_id = sbi->fsync_seg_id++;
317 seq_id = fn->seq_id;
318 sbi->fsync_node_num++;
319 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
320
321 return seq_id;
322 }
323
324 void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
325 {
326 struct fsync_node_entry *fn;
327 unsigned long flags;
328
329 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
330 list_for_each_entry(fn, &sbi->fsync_node_list, list) {
331 if (fn->page == page) {
332 list_del(&fn->list);
333 sbi->fsync_node_num--;
334 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
335 kmem_cache_free(fsync_node_entry_slab, fn);
336 put_page(page);
337 return;
338 }
339 }
340 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
341 f2fs_bug_on(sbi, 1);
342 }
343
344 void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
345 {
346 unsigned long flags;
347
348 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
349 sbi->fsync_seg_id = 0;
350 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
351 }
352
353 int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
354 {
355 struct f2fs_nm_info *nm_i = NM_I(sbi);
356 struct nat_entry *e;
357 bool need = false;
358
359 down_read(&nm_i->nat_tree_lock);
360 e = __lookup_nat_cache(nm_i, nid);
361 if (e) {
362 if (!get_nat_flag(e, IS_CHECKPOINTED) &&
363 !get_nat_flag(e, HAS_FSYNCED_INODE))
364 need = true;
365 }
366 up_read(&nm_i->nat_tree_lock);
367 return need;
368 }
369
370 bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
371 {
372 struct f2fs_nm_info *nm_i = NM_I(sbi);
373 struct nat_entry *e;
374 bool is_cp = true;
375
376 down_read(&nm_i->nat_tree_lock);
377 e = __lookup_nat_cache(nm_i, nid);
378 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
379 is_cp = false;
380 up_read(&nm_i->nat_tree_lock);
381 return is_cp;
382 }
383
384 bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
385 {
386 struct f2fs_nm_info *nm_i = NM_I(sbi);
387 struct nat_entry *e;
388 bool need_update = true;
389
390 down_read(&nm_i->nat_tree_lock);
391 e = __lookup_nat_cache(nm_i, ino);
392 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
393 (get_nat_flag(e, IS_CHECKPOINTED) ||
394 get_nat_flag(e, HAS_FSYNCED_INODE)))
395 need_update = false;
396 up_read(&nm_i->nat_tree_lock);
397 return need_update;
398 }
399
400 /* must be locked by nat_tree_lock */
401 static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
402 struct f2fs_nat_entry *ne)
403 {
404 struct f2fs_nm_info *nm_i = NM_I(sbi);
405 struct nat_entry *new, *e;
406
407 new = __alloc_nat_entry(nid, false);
408 if (!new)
409 return;
410
411 down_write(&nm_i->nat_tree_lock);
412 e = __lookup_nat_cache(nm_i, nid);
413 if (!e)
414 e = __init_nat_entry(nm_i, new, ne, false);
415 else
416 f2fs_bug_on(sbi, nat_get_ino(e) != le32_to_cpu(ne->ino) ||
417 nat_get_blkaddr(e) !=
418 le32_to_cpu(ne->block_addr) ||
419 nat_get_version(e) != ne->version);
420 up_write(&nm_i->nat_tree_lock);
421 if (e != new)
422 __free_nat_entry(new);
423 }
424
425 static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
426 block_t new_blkaddr, bool fsync_done)
427 {
428 struct f2fs_nm_info *nm_i = NM_I(sbi);
429 struct nat_entry *e;
430 struct nat_entry *new = __alloc_nat_entry(ni->nid, true);
431
432 down_write(&nm_i->nat_tree_lock);
433 e = __lookup_nat_cache(nm_i, ni->nid);
434 if (!e) {
435 e = __init_nat_entry(nm_i, new, NULL, true);
436 copy_node_info(&e->ni, ni);
437 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
438 } else if (new_blkaddr == NEW_ADDR) {
439 /*
440 * when nid is reallocated,
441 * previous nat entry can be remained in nat cache.
442 * So, reinitialize it with new information.
443 */
444 copy_node_info(&e->ni, ni);
445 f2fs_bug_on(sbi, ni->blk_addr != NULL_ADDR);
446 }
447 /* let's free early to reduce memory consumption */
448 if (e != new)
449 __free_nat_entry(new);
450
451 /* sanity check */
452 f2fs_bug_on(sbi, nat_get_blkaddr(e) != ni->blk_addr);
453 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NULL_ADDR &&
454 new_blkaddr == NULL_ADDR);
455 f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
456 new_blkaddr == NEW_ADDR);
457 f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
458 new_blkaddr == NEW_ADDR);
459
460 /* increment version no as node is removed */
461 if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) {
462 unsigned char version = nat_get_version(e);
463 nat_set_version(e, inc_node_version(version));
464 }
465
466 /* change address */
467 nat_set_blkaddr(e, new_blkaddr);
468 if (!is_valid_data_blkaddr(sbi, new_blkaddr))
469 set_nat_flag(e, IS_CHECKPOINTED, false);
470 __set_nat_cache_dirty(nm_i, e);
471
472 /* update fsync_mark if its inode nat entry is still alive */
473 if (ni->nid != ni->ino)
474 e = __lookup_nat_cache(nm_i, ni->ino);
475 if (e) {
476 if (fsync_done && ni->nid == ni->ino)
477 set_nat_flag(e, HAS_FSYNCED_INODE, true);
478 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
479 }
480 up_write(&nm_i->nat_tree_lock);
481 }
482
483 int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
484 {
485 struct f2fs_nm_info *nm_i = NM_I(sbi);
486 int nr = nr_shrink;
487
488 if (!down_write_trylock(&nm_i->nat_tree_lock))
489 return 0;
490
491 spin_lock(&nm_i->nat_list_lock);
492 while (nr_shrink) {
493 struct nat_entry *ne;
494
495 if (list_empty(&nm_i->nat_entries))
496 break;
497
498 ne = list_first_entry(&nm_i->nat_entries,
499 struct nat_entry, list);
500 list_del(&ne->list);
501 spin_unlock(&nm_i->nat_list_lock);
502
503 __del_from_nat_cache(nm_i, ne);
504 nr_shrink--;
505
506 spin_lock(&nm_i->nat_list_lock);
507 }
508 spin_unlock(&nm_i->nat_list_lock);
509
510 up_write(&nm_i->nat_tree_lock);
511 return nr - nr_shrink;
512 }
513
514 /*
515 * This function always returns success
516 */
517 int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
518 struct node_info *ni)
519 {
520 struct f2fs_nm_info *nm_i = NM_I(sbi);
521 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
522 struct f2fs_journal *journal = curseg->journal;
523 nid_t start_nid = START_NID(nid);
524 struct f2fs_nat_block *nat_blk;
525 struct page *page = NULL;
526 struct f2fs_nat_entry ne;
527 struct nat_entry *e;
528 pgoff_t index;
529 int i;
530
531 ni->nid = nid;
532
533 /* Check nat cache */
534 down_read(&nm_i->nat_tree_lock);
535 e = __lookup_nat_cache(nm_i, nid);
536 if (e) {
537 ni->ino = nat_get_ino(e);
538 ni->blk_addr = nat_get_blkaddr(e);
539 ni->version = nat_get_version(e);
540 up_read(&nm_i->nat_tree_lock);
541 return 0;
542 }
543
544 memset(&ne, 0, sizeof(struct f2fs_nat_entry));
545
546 /* Check current segment summary */
547 down_read(&curseg->journal_rwsem);
548 i = f2fs_lookup_journal_in_cursum(journal, NAT_JOURNAL, nid, 0);
549 if (i >= 0) {
550 ne = nat_in_journal(journal, i);
551 node_info_from_raw_nat(ni, &ne);
552 }
553 up_read(&curseg->journal_rwsem);
554 if (i >= 0) {
555 up_read(&nm_i->nat_tree_lock);
556 goto cache;
557 }
558
559 /* Fill node_info from nat page */
560 index = current_nat_addr(sbi, nid);
561 up_read(&nm_i->nat_tree_lock);
562
563 page = f2fs_get_meta_page(sbi, index);
564 if (IS_ERR(page))
565 return PTR_ERR(page);
566
567 nat_blk = (struct f2fs_nat_block *)page_address(page);
568 ne = nat_blk->entries[nid - start_nid];
569 node_info_from_raw_nat(ni, &ne);
570 f2fs_put_page(page, 1);
571 cache:
572 /* cache nat entry */
573 cache_nat_entry(sbi, nid, &ne);
574 return 0;
575 }
576
577 /*
578 * readahead MAX_RA_NODE number of node pages.
579 */
580 static void f2fs_ra_node_pages(struct page *parent, int start, int n)
581 {
582 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
583 struct blk_plug plug;
584 int i, end;
585 nid_t nid;
586
587 blk_start_plug(&plug);
588
589 /* Then, try readahead for siblings of the desired node */
590 end = start + n;
591 end = min(end, NIDS_PER_BLOCK);
592 for (i = start; i < end; i++) {
593 nid = get_nid(parent, i, false);
594 f2fs_ra_node_page(sbi, nid);
595 }
596
597 blk_finish_plug(&plug);
598 }
599
600 pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs)
601 {
602 const long direct_index = ADDRS_PER_INODE(dn->inode);
603 const long direct_blks = ADDRS_PER_BLOCK;
604 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
605 unsigned int skipped_unit = ADDRS_PER_BLOCK;
606 int cur_level = dn->cur_level;
607 int max_level = dn->max_level;
608 pgoff_t base = 0;
609
610 if (!dn->max_level)
611 return pgofs + 1;
612
613 while (max_level-- > cur_level)
614 skipped_unit *= NIDS_PER_BLOCK;
615
616 switch (dn->max_level) {
617 case 3:
618 base += 2 * indirect_blks;
619 case 2:
620 base += 2 * direct_blks;
621 case 1:
622 base += direct_index;
623 break;
624 default:
625 f2fs_bug_on(F2FS_I_SB(dn->inode), 1);
626 }
627
628 return ((pgofs - base) / skipped_unit + 1) * skipped_unit + base;
629 }
630
631 /*
632 * The maximum depth is four.
633 * Offset[0] will have raw inode offset.
634 */
635 static int get_node_path(struct inode *inode, long block,
636 int offset[4], unsigned int noffset[4])
637 {
638 const long direct_index = ADDRS_PER_INODE(inode);
639 const long direct_blks = ADDRS_PER_BLOCK;
640 const long dptrs_per_blk = NIDS_PER_BLOCK;
641 const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
642 const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK;
643 int n = 0;
644 int level = 0;
645
646 noffset[0] = 0;
647
648 if (block < direct_index) {
649 offset[n] = block;
650 goto got;
651 }
652 block -= direct_index;
653 if (block < direct_blks) {
654 offset[n++] = NODE_DIR1_BLOCK;
655 noffset[n] = 1;
656 offset[n] = block;
657 level = 1;
658 goto got;
659 }
660 block -= direct_blks;
661 if (block < direct_blks) {
662 offset[n++] = NODE_DIR2_BLOCK;
663 noffset[n] = 2;
664 offset[n] = block;
665 level = 1;
666 goto got;
667 }
668 block -= direct_blks;
669 if (block < indirect_blks) {
670 offset[n++] = NODE_IND1_BLOCK;
671 noffset[n] = 3;
672 offset[n++] = block / direct_blks;
673 noffset[n] = 4 + offset[n - 1];
674 offset[n] = block % direct_blks;
675 level = 2;
676 goto got;
677 }
678 block -= indirect_blks;
679 if (block < indirect_blks) {
680 offset[n++] = NODE_IND2_BLOCK;
681 noffset[n] = 4 + dptrs_per_blk;
682 offset[n++] = block / direct_blks;
683 noffset[n] = 5 + dptrs_per_blk + offset[n - 1];
684 offset[n] = block % direct_blks;
685 level = 2;
686 goto got;
687 }
688 block -= indirect_blks;
689 if (block < dindirect_blks) {
690 offset[n++] = NODE_DIND_BLOCK;
691 noffset[n] = 5 + (dptrs_per_blk * 2);
692 offset[n++] = block / indirect_blks;
693 noffset[n] = 6 + (dptrs_per_blk * 2) +
694 offset[n - 1] * (dptrs_per_blk + 1);
695 offset[n++] = (block / direct_blks) % dptrs_per_blk;
696 noffset[n] = 7 + (dptrs_per_blk * 2) +
697 offset[n - 2] * (dptrs_per_blk + 1) +
698 offset[n - 1];
699 offset[n] = block % direct_blks;
700 level = 3;
701 goto got;
702 } else {
703 return -E2BIG;
704 }
705 got:
706 return level;
707 }
708
709 /*
710 * Caller should call f2fs_put_dnode(dn).
711 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
712 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
713 * In the case of RDONLY_NODE, we don't need to care about mutex.
714 */
715 int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
716 {
717 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
718 struct page *npage[4];
719 struct page *parent = NULL;
720 int offset[4];
721 unsigned int noffset[4];
722 nid_t nids[4];
723 int level, i = 0;
724 int err = 0;
725
726 level = get_node_path(dn->inode, index, offset, noffset);
727 if (level < 0)
728 return level;
729
730 nids[0] = dn->inode->i_ino;
731 npage[0] = dn->inode_page;
732
733 if (!npage[0]) {
734 npage[0] = f2fs_get_node_page(sbi, nids[0]);
735 if (IS_ERR(npage[0]))
736 return PTR_ERR(npage[0]);
737 }
738
739 /* if inline_data is set, should not report any block indices */
740 if (f2fs_has_inline_data(dn->inode) && index) {
741 err = -ENOENT;
742 f2fs_put_page(npage[0], 1);
743 goto release_out;
744 }
745
746 parent = npage[0];
747 if (level != 0)
748 nids[1] = get_nid(parent, offset[0], true);
749 dn->inode_page = npage[0];
750 dn->inode_page_locked = true;
751
752 /* get indirect or direct nodes */
753 for (i = 1; i <= level; i++) {
754 bool done = false;
755
756 if (!nids[i] && mode == ALLOC_NODE) {
757 /* alloc new node */
758 if (!f2fs_alloc_nid(sbi, &(nids[i]))) {
759 err = -ENOSPC;
760 goto release_pages;
761 }
762
763 dn->nid = nids[i];
764 npage[i] = f2fs_new_node_page(dn, noffset[i]);
765 if (IS_ERR(npage[i])) {
766 f2fs_alloc_nid_failed(sbi, nids[i]);
767 err = PTR_ERR(npage[i]);
768 goto release_pages;
769 }
770
771 set_nid(parent, offset[i - 1], nids[i], i == 1);
772 f2fs_alloc_nid_done(sbi, nids[i]);
773 done = true;
774 } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) {
775 npage[i] = f2fs_get_node_page_ra(parent, offset[i - 1]);
776 if (IS_ERR(npage[i])) {
777 err = PTR_ERR(npage[i]);
778 goto release_pages;
779 }
780 done = true;
781 }
782 if (i == 1) {
783 dn->inode_page_locked = false;
784 unlock_page(parent);
785 } else {
786 f2fs_put_page(parent, 1);
787 }
788
789 if (!done) {
790 npage[i] = f2fs_get_node_page(sbi, nids[i]);
791 if (IS_ERR(npage[i])) {
792 err = PTR_ERR(npage[i]);
793 f2fs_put_page(npage[0], 0);
794 goto release_out;
795 }
796 }
797 if (i < level) {
798 parent = npage[i];
799 nids[i + 1] = get_nid(parent, offset[i], false);
800 }
801 }
802 dn->nid = nids[level];
803 dn->ofs_in_node = offset[level];
804 dn->node_page = npage[level];
805 dn->data_blkaddr = datablock_addr(dn->inode,
806 dn->node_page, dn->ofs_in_node);
807 return 0;
808
809 release_pages:
810 f2fs_put_page(parent, 1);
811 if (i > 1)
812 f2fs_put_page(npage[0], 0);
813 release_out:
814 dn->inode_page = NULL;
815 dn->node_page = NULL;
816 if (err == -ENOENT) {
817 dn->cur_level = i;
818 dn->max_level = level;
819 dn->ofs_in_node = offset[level];
820 }
821 return err;
822 }
823
824 static int truncate_node(struct dnode_of_data *dn)
825 {
826 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
827 struct node_info ni;
828 int err;
829
830 err = f2fs_get_node_info(sbi, dn->nid, &ni);
831 if (err)
832 return err;
833
834 /* Deallocate node address */
835 f2fs_invalidate_blocks(sbi, ni.blk_addr);
836 dec_valid_node_count(sbi, dn->inode, dn->nid == dn->inode->i_ino);
837 set_node_addr(sbi, &ni, NULL_ADDR, false);
838
839 if (dn->nid == dn->inode->i_ino) {
840 f2fs_remove_orphan_inode(sbi, dn->nid);
841 dec_valid_inode_count(sbi);
842 f2fs_inode_synced(dn->inode);
843 }
844
845 clear_node_page_dirty(dn->node_page);
846 set_sbi_flag(sbi, SBI_IS_DIRTY);
847
848 f2fs_put_page(dn->node_page, 1);
849
850 invalidate_mapping_pages(NODE_MAPPING(sbi),
851 dn->node_page->index, dn->node_page->index);
852
853 dn->node_page = NULL;
854 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
855
856 return 0;
857 }
858
859 static int truncate_dnode(struct dnode_of_data *dn)
860 {
861 struct page *page;
862 int err;
863
864 if (dn->nid == 0)
865 return 1;
866
867 /* get direct node */
868 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
869 if (IS_ERR(page) && PTR_ERR(page) == -ENOENT)
870 return 1;
871 else if (IS_ERR(page))
872 return PTR_ERR(page);
873
874 /* Make dnode_of_data for parameter */
875 dn->node_page = page;
876 dn->ofs_in_node = 0;
877 f2fs_truncate_data_blocks(dn);
878 err = truncate_node(dn);
879 if (err)
880 return err;
881
882 return 1;
883 }
884
885 static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
886 int ofs, int depth)
887 {
888 struct dnode_of_data rdn = *dn;
889 struct page *page;
890 struct f2fs_node *rn;
891 nid_t child_nid;
892 unsigned int child_nofs;
893 int freed = 0;
894 int i, ret;
895
896 if (dn->nid == 0)
897 return NIDS_PER_BLOCK + 1;
898
899 trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr);
900
901 page = f2fs_get_node_page(F2FS_I_SB(dn->inode), dn->nid);
902 if (IS_ERR(page)) {
903 trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page));
904 return PTR_ERR(page);
905 }
906
907 f2fs_ra_node_pages(page, ofs, NIDS_PER_BLOCK);
908
909 rn = F2FS_NODE(page);
910 if (depth < 3) {
911 for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
912 child_nid = le32_to_cpu(rn->in.nid[i]);
913 if (child_nid == 0)
914 continue;
915 rdn.nid = child_nid;
916 ret = truncate_dnode(&rdn);
917 if (ret < 0)
918 goto out_err;
919 if (set_nid(page, i, 0, false))
920 dn->node_changed = true;
921 }
922 } else {
923 child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1;
924 for (i = ofs; i < NIDS_PER_BLOCK; i++) {
925 child_nid = le32_to_cpu(rn->in.nid[i]);
926 if (child_nid == 0) {
927 child_nofs += NIDS_PER_BLOCK + 1;
928 continue;
929 }
930 rdn.nid = child_nid;
931 ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1);
932 if (ret == (NIDS_PER_BLOCK + 1)) {
933 if (set_nid(page, i, 0, false))
934 dn->node_changed = true;
935 child_nofs += ret;
936 } else if (ret < 0 && ret != -ENOENT) {
937 goto out_err;
938 }
939 }
940 freed = child_nofs;
941 }
942
943 if (!ofs) {
944 /* remove current indirect node */
945 dn->node_page = page;
946 ret = truncate_node(dn);
947 if (ret)
948 goto out_err;
949 freed++;
950 } else {
951 f2fs_put_page(page, 1);
952 }
953 trace_f2fs_truncate_nodes_exit(dn->inode, freed);
954 return freed;
955
956 out_err:
957 f2fs_put_page(page, 1);
958 trace_f2fs_truncate_nodes_exit(dn->inode, ret);
959 return ret;
960 }
961
962 static int truncate_partial_nodes(struct dnode_of_data *dn,
963 struct f2fs_inode *ri, int *offset, int depth)
964 {
965 struct page *pages[2];
966 nid_t nid[3];
967 nid_t child_nid;
968 int err = 0;
969 int i;
970 int idx = depth - 2;
971
972 nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
973 if (!nid[0])
974 return 0;
975
976 /* get indirect nodes in the path */
977 for (i = 0; i < idx + 1; i++) {
978 /* reference count'll be increased */
979 pages[i] = f2fs_get_node_page(F2FS_I_SB(dn->inode), nid[i]);
980 if (IS_ERR(pages[i])) {
981 err = PTR_ERR(pages[i]);
982 idx = i - 1;
983 goto fail;
984 }
985 nid[i + 1] = get_nid(pages[i], offset[i + 1], false);
986 }
987
988 f2fs_ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK);
989
990 /* free direct nodes linked to a partial indirect node */
991 for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) {
992 child_nid = get_nid(pages[idx], i, false);
993 if (!child_nid)
994 continue;
995 dn->nid = child_nid;
996 err = truncate_dnode(dn);
997 if (err < 0)
998 goto fail;
999 if (set_nid(pages[idx], i, 0, false))
1000 dn->node_changed = true;
1001 }
1002
1003 if (offset[idx + 1] == 0) {
1004 dn->node_page = pages[idx];
1005 dn->nid = nid[idx];
1006 err = truncate_node(dn);
1007 if (err)
1008 goto fail;
1009 } else {
1010 f2fs_put_page(pages[idx], 1);
1011 }
1012 offset[idx]++;
1013 offset[idx + 1] = 0;
1014 idx--;
1015 fail:
1016 for (i = idx; i >= 0; i--)
1017 f2fs_put_page(pages[i], 1);
1018
1019 trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err);
1020
1021 return err;
1022 }
1023
1024 /*
1025 * All the block addresses of data and nodes should be nullified.
1026 */
1027 int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from)
1028 {
1029 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1030 int err = 0, cont = 1;
1031 int level, offset[4], noffset[4];
1032 unsigned int nofs = 0;
1033 struct f2fs_inode *ri;
1034 struct dnode_of_data dn;
1035 struct page *page;
1036
1037 trace_f2fs_truncate_inode_blocks_enter(inode, from);
1038
1039 level = get_node_path(inode, from, offset, noffset);
1040 if (level < 0)
1041 return level;
1042
1043 page = f2fs_get_node_page(sbi, inode->i_ino);
1044 if (IS_ERR(page)) {
1045 trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page));
1046 return PTR_ERR(page);
1047 }
1048
1049 set_new_dnode(&dn, inode, page, NULL, 0);
1050 unlock_page(page);
1051
1052 ri = F2FS_INODE(page);
1053 switch (level) {
1054 case 0:
1055 case 1:
1056 nofs = noffset[1];
1057 break;
1058 case 2:
1059 nofs = noffset[1];
1060 if (!offset[level - 1])
1061 goto skip_partial;
1062 err = truncate_partial_nodes(&dn, ri, offset, level);
1063 if (err < 0 && err != -ENOENT)
1064 goto fail;
1065 nofs += 1 + NIDS_PER_BLOCK;
1066 break;
1067 case 3:
1068 nofs = 5 + 2 * NIDS_PER_BLOCK;
1069 if (!offset[level - 1])
1070 goto skip_partial;
1071 err = truncate_partial_nodes(&dn, ri, offset, level);
1072 if (err < 0 && err != -ENOENT)
1073 goto fail;
1074 break;
1075 default:
1076 BUG();
1077 }
1078
1079 skip_partial:
1080 while (cont) {
1081 dn.nid = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]);
1082 switch (offset[0]) {
1083 case NODE_DIR1_BLOCK:
1084 case NODE_DIR2_BLOCK:
1085 err = truncate_dnode(&dn);
1086 break;
1087
1088 case NODE_IND1_BLOCK:
1089 case NODE_IND2_BLOCK:
1090 err = truncate_nodes(&dn, nofs, offset[1], 2);
1091 break;
1092
1093 case NODE_DIND_BLOCK:
1094 err = truncate_nodes(&dn, nofs, offset[1], 3);
1095 cont = 0;
1096 break;
1097
1098 default:
1099 BUG();
1100 }
1101 if (err < 0 && err != -ENOENT)
1102 goto fail;
1103 if (offset[1] == 0 &&
1104 ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) {
1105 lock_page(page);
1106 BUG_ON(page->mapping != NODE_MAPPING(sbi));
1107 f2fs_wait_on_page_writeback(page, NODE, true);
1108 ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0;
1109 set_page_dirty(page);
1110 unlock_page(page);
1111 }
1112 offset[1] = 0;
1113 offset[0]++;
1114 nofs += err;
1115 }
1116 fail:
1117 f2fs_put_page(page, 0);
1118 trace_f2fs_truncate_inode_blocks_exit(inode, err);
1119 return err > 0 ? 0 : err;
1120 }
1121
1122 /* caller must lock inode page */
1123 int f2fs_truncate_xattr_node(struct inode *inode)
1124 {
1125 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1126 nid_t nid = F2FS_I(inode)->i_xattr_nid;
1127 struct dnode_of_data dn;
1128 struct page *npage;
1129 int err;
1130
1131 if (!nid)
1132 return 0;
1133
1134 npage = f2fs_get_node_page(sbi, nid);
1135 if (IS_ERR(npage))
1136 return PTR_ERR(npage);
1137
1138 set_new_dnode(&dn, inode, NULL, npage, nid);
1139 err = truncate_node(&dn);
1140 if (err) {
1141 f2fs_put_page(npage, 1);
1142 return err;
1143 }
1144
1145 f2fs_i_xnid_write(inode, 0);
1146
1147 return 0;
1148 }
1149
1150 /*
1151 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1152 * f2fs_unlock_op().
1153 */
1154 int f2fs_remove_inode_page(struct inode *inode)
1155 {
1156 struct dnode_of_data dn;
1157 int err;
1158
1159 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1160 err = f2fs_get_dnode_of_data(&dn, 0, LOOKUP_NODE);
1161 if (err)
1162 return err;
1163
1164 err = f2fs_truncate_xattr_node(inode);
1165 if (err) {
1166 f2fs_put_dnode(&dn);
1167 return err;
1168 }
1169
1170 /* remove potential inline_data blocks */
1171 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
1172 S_ISLNK(inode->i_mode))
1173 f2fs_truncate_data_blocks_range(&dn, 1);
1174
1175 /* 0 is possible, after f2fs_new_inode() has failed */
1176 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
1177 f2fs_put_dnode(&dn);
1178 return -EIO;
1179 }
1180 f2fs_bug_on(F2FS_I_SB(inode),
1181 inode->i_blocks != 0 && inode->i_blocks != 8);
1182
1183 /* will put inode & node pages */
1184 err = truncate_node(&dn);
1185 if (err) {
1186 f2fs_put_dnode(&dn);
1187 return err;
1188 }
1189 return 0;
1190 }
1191
1192 struct page *f2fs_new_inode_page(struct inode *inode)
1193 {
1194 struct dnode_of_data dn;
1195
1196 /* allocate inode page for new inode */
1197 set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino);
1198
1199 /* caller should f2fs_put_page(page, 1); */
1200 return f2fs_new_node_page(&dn, 0);
1201 }
1202
1203 struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
1204 {
1205 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1206 struct node_info new_ni;
1207 struct page *page;
1208 int err;
1209
1210 if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
1211 return ERR_PTR(-EPERM);
1212
1213 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
1214 if (!page)
1215 return ERR_PTR(-ENOMEM);
1216
1217 if (unlikely((err = inc_valid_node_count(sbi, dn->inode, !ofs))))
1218 goto fail;
1219
1220 #ifdef CONFIG_F2FS_CHECK_FS
1221 err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
1222 if (err) {
1223 dec_valid_node_count(sbi, dn->inode, !ofs);
1224 goto fail;
1225 }
1226 f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
1227 #endif
1228 new_ni.nid = dn->nid;
1229 new_ni.ino = dn->inode->i_ino;
1230 new_ni.blk_addr = NULL_ADDR;
1231 new_ni.flag = 0;
1232 new_ni.version = 0;
1233 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
1234
1235 f2fs_wait_on_page_writeback(page, NODE, true);
1236 fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
1237 set_cold_node(page, S_ISDIR(dn->inode->i_mode));
1238 if (!PageUptodate(page))
1239 SetPageUptodate(page);
1240 if (set_page_dirty(page))
1241 dn->node_changed = true;
1242
1243 if (f2fs_has_xattr_block(ofs))
1244 f2fs_i_xnid_write(dn->inode, dn->nid);
1245
1246 if (ofs == 0)
1247 inc_valid_inode_count(sbi);
1248 return page;
1249
1250 fail:
1251 clear_node_page_dirty(page);
1252 f2fs_put_page(page, 1);
1253 return ERR_PTR(err);
1254 }
1255
1256 /*
1257 * Caller should do after getting the following values.
1258 * 0: f2fs_put_page(page, 0)
1259 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1260 */
1261 static int read_node_page(struct page *page, int op_flags)
1262 {
1263 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1264 struct node_info ni;
1265 struct f2fs_io_info fio = {
1266 .sbi = sbi,
1267 .type = NODE,
1268 .op = REQ_OP_READ,
1269 .op_flags = op_flags,
1270 .page = page,
1271 .encrypted_page = NULL,
1272 };
1273 int err;
1274
1275 if (PageUptodate(page)) {
1276 #ifdef CONFIG_F2FS_CHECK_FS
1277 f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
1278 #endif
1279 return LOCKED_PAGE;
1280 }
1281
1282 err = f2fs_get_node_info(sbi, page->index, &ni);
1283 if (err)
1284 return err;
1285
1286 if (unlikely(ni.blk_addr == NULL_ADDR) ||
1287 is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
1288 ClearPageUptodate(page);
1289 return -ENOENT;
1290 }
1291
1292 fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
1293 return f2fs_submit_page_bio(&fio);
1294 }
1295
1296 /*
1297 * Readahead a node page
1298 */
1299 void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
1300 {
1301 struct page *apage;
1302 int err;
1303
1304 if (!nid)
1305 return;
1306 if (f2fs_check_nid_range(sbi, nid))
1307 return;
1308
1309 rcu_read_lock();
1310 apage = radix_tree_lookup(&NODE_MAPPING(sbi)->i_pages, nid);
1311 rcu_read_unlock();
1312 if (apage)
1313 return;
1314
1315 apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1316 if (!apage)
1317 return;
1318
1319 err = read_node_page(apage, REQ_RAHEAD);
1320 f2fs_put_page(apage, err ? 1 : 0);
1321 }
1322
1323 static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid,
1324 struct page *parent, int start)
1325 {
1326 struct page *page;
1327 int err;
1328
1329 if (!nid)
1330 return ERR_PTR(-ENOENT);
1331 if (f2fs_check_nid_range(sbi, nid))
1332 return ERR_PTR(-EINVAL);
1333 repeat:
1334 page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false);
1335 if (!page)
1336 return ERR_PTR(-ENOMEM);
1337
1338 err = read_node_page(page, 0);
1339 if (err < 0) {
1340 f2fs_put_page(page, 1);
1341 return ERR_PTR(err);
1342 } else if (err == LOCKED_PAGE) {
1343 err = 0;
1344 goto page_hit;
1345 }
1346
1347 if (parent)
1348 f2fs_ra_node_pages(parent, start + 1, MAX_RA_NODE);
1349
1350 lock_page(page);
1351
1352 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1353 f2fs_put_page(page, 1);
1354 goto repeat;
1355 }
1356
1357 if (unlikely(!PageUptodate(page))) {
1358 err = -EIO;
1359 goto out_err;
1360 }
1361
1362 if (!f2fs_inode_chksum_verify(sbi, page)) {
1363 err = -EBADMSG;
1364 goto out_err;
1365 }
1366 page_hit:
1367 if(unlikely(nid != nid_of_node(page))) {
1368 f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, "
1369 "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1370 nid, nid_of_node(page), ino_of_node(page),
1371 ofs_of_node(page), cpver_of_node(page),
1372 next_blkaddr_of_node(page));
1373 err = -EINVAL;
1374 out_err:
1375 ClearPageUptodate(page);
1376 f2fs_put_page(page, 1);
1377 return ERR_PTR(err);
1378 }
1379 return page;
1380 }
1381
1382 struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
1383 {
1384 return __get_node_page(sbi, nid, NULL, 0);
1385 }
1386
1387 struct page *f2fs_get_node_page_ra(struct page *parent, int start)
1388 {
1389 struct f2fs_sb_info *sbi = F2FS_P_SB(parent);
1390 nid_t nid = get_nid(parent, start, false);
1391
1392 return __get_node_page(sbi, nid, parent, start);
1393 }
1394
1395 static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
1396 {
1397 struct inode *inode;
1398 struct page *page;
1399 int ret;
1400
1401 /* should flush inline_data before evict_inode */
1402 inode = ilookup(sbi->sb, ino);
1403 if (!inode)
1404 return;
1405
1406 page = f2fs_pagecache_get_page(inode->i_mapping, 0,
1407 FGP_LOCK|FGP_NOWAIT, 0);
1408 if (!page)
1409 goto iput_out;
1410
1411 if (!PageUptodate(page))
1412 goto page_out;
1413
1414 if (!PageDirty(page))
1415 goto page_out;
1416
1417 if (!clear_page_dirty_for_io(page))
1418 goto page_out;
1419
1420 ret = f2fs_write_inline_data(inode, page);
1421 inode_dec_dirty_pages(inode);
1422 f2fs_remove_dirty_inode(inode);
1423 if (ret)
1424 set_page_dirty(page);
1425 page_out:
1426 f2fs_put_page(page, 1);
1427 iput_out:
1428 iput(inode);
1429 }
1430
1431 static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino)
1432 {
1433 pgoff_t index;
1434 struct pagevec pvec;
1435 struct page *last_page = NULL;
1436 int nr_pages;
1437
1438 pagevec_init(&pvec);
1439 index = 0;
1440
1441 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1442 PAGECACHE_TAG_DIRTY))) {
1443 int i;
1444
1445 for (i = 0; i < nr_pages; i++) {
1446 struct page *page = pvec.pages[i];
1447
1448 if (unlikely(f2fs_cp_error(sbi))) {
1449 f2fs_put_page(last_page, 0);
1450 pagevec_release(&pvec);
1451 return ERR_PTR(-EIO);
1452 }
1453
1454 if (!IS_DNODE(page) || !is_cold_node(page))
1455 continue;
1456 if (ino_of_node(page) != ino)
1457 continue;
1458
1459 lock_page(page);
1460
1461 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1462 continue_unlock:
1463 unlock_page(page);
1464 continue;
1465 }
1466 if (ino_of_node(page) != ino)
1467 goto continue_unlock;
1468
1469 if (!PageDirty(page)) {
1470 /* someone wrote it for us */
1471 goto continue_unlock;
1472 }
1473
1474 if (last_page)
1475 f2fs_put_page(last_page, 0);
1476
1477 get_page(page);
1478 last_page = page;
1479 unlock_page(page);
1480 }
1481 pagevec_release(&pvec);
1482 cond_resched();
1483 }
1484 return last_page;
1485 }
1486
1487 static int __write_node_page(struct page *page, bool atomic, bool *submitted,
1488 struct writeback_control *wbc, bool do_balance,
1489 enum iostat_type io_type, unsigned int *seq_id)
1490 {
1491 struct f2fs_sb_info *sbi = F2FS_P_SB(page);
1492 nid_t nid;
1493 struct node_info ni;
1494 struct f2fs_io_info fio = {
1495 .sbi = sbi,
1496 .ino = ino_of_node(page),
1497 .type = NODE,
1498 .op = REQ_OP_WRITE,
1499 .op_flags = wbc_to_write_flags(wbc),
1500 .page = page,
1501 .encrypted_page = NULL,
1502 .submitted = false,
1503 .io_type = io_type,
1504 .io_wbc = wbc,
1505 };
1506 unsigned int seq;
1507
1508 trace_f2fs_writepage(page, NODE);
1509
1510 if (unlikely(f2fs_cp_error(sbi)))
1511 goto redirty_out;
1512
1513 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1514 goto redirty_out;
1515
1516 if (wbc->sync_mode == WB_SYNC_NONE &&
1517 IS_DNODE(page) && is_cold_node(page))
1518 goto redirty_out;
1519
1520 /* get old block addr of this node page */
1521 nid = nid_of_node(page);
1522 f2fs_bug_on(sbi, page->index != nid);
1523
1524 if (f2fs_get_node_info(sbi, nid, &ni))
1525 goto redirty_out;
1526
1527 if (wbc->for_reclaim) {
1528 if (!down_read_trylock(&sbi->node_write))
1529 goto redirty_out;
1530 } else {
1531 down_read(&sbi->node_write);
1532 }
1533
1534 /* This page is already truncated */
1535 if (unlikely(ni.blk_addr == NULL_ADDR)) {
1536 ClearPageUptodate(page);
1537 dec_page_count(sbi, F2FS_DIRTY_NODES);
1538 up_read(&sbi->node_write);
1539 unlock_page(page);
1540 return 0;
1541 }
1542
1543 if (__is_valid_data_blkaddr(ni.blk_addr) &&
1544 !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC)) {
1545 up_read(&sbi->node_write);
1546 goto redirty_out;
1547 }
1548
1549 if (atomic && !test_opt(sbi, NOBARRIER))
1550 fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
1551
1552 set_page_writeback(page);
1553 ClearPageError(page);
1554
1555 if (f2fs_in_warm_node_list(sbi, page)) {
1556 seq = f2fs_add_fsync_node_entry(sbi, page);
1557 if (seq_id)
1558 *seq_id = seq;
1559 }
1560
1561 fio.old_blkaddr = ni.blk_addr;
1562 f2fs_do_write_node_page(nid, &fio);
1563 set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
1564 dec_page_count(sbi, F2FS_DIRTY_NODES);
1565 up_read(&sbi->node_write);
1566
1567 if (wbc->for_reclaim) {
1568 f2fs_submit_merged_write_cond(sbi, page->mapping->host, 0,
1569 page->index, NODE);
1570 submitted = NULL;
1571 }
1572
1573 unlock_page(page);
1574
1575 if (unlikely(f2fs_cp_error(sbi))) {
1576 f2fs_submit_merged_write(sbi, NODE);
1577 submitted = NULL;
1578 }
1579 if (submitted)
1580 *submitted = fio.submitted;
1581
1582 if (do_balance)
1583 f2fs_balance_fs(sbi, false);
1584 return 0;
1585
1586 redirty_out:
1587 redirty_page_for_writepage(wbc, page);
1588 return AOP_WRITEPAGE_ACTIVATE;
1589 }
1590
1591 void f2fs_move_node_page(struct page *node_page, int gc_type)
1592 {
1593 if (gc_type == FG_GC) {
1594 struct writeback_control wbc = {
1595 .sync_mode = WB_SYNC_ALL,
1596 .nr_to_write = 1,
1597 .for_reclaim = 0,
1598 };
1599
1600 set_page_dirty(node_page);
1601 f2fs_wait_on_page_writeback(node_page, NODE, true);
1602
1603 f2fs_bug_on(F2FS_P_SB(node_page), PageWriteback(node_page));
1604 if (!clear_page_dirty_for_io(node_page))
1605 goto out_page;
1606
1607 if (__write_node_page(node_page, false, NULL,
1608 &wbc, false, FS_GC_NODE_IO, NULL))
1609 unlock_page(node_page);
1610 goto release_page;
1611 } else {
1612 /* set page dirty and write it */
1613 if (!PageWriteback(node_page))
1614 set_page_dirty(node_page);
1615 }
1616 out_page:
1617 unlock_page(node_page);
1618 release_page:
1619 f2fs_put_page(node_page, 0);
1620 }
1621
1622 static int f2fs_write_node_page(struct page *page,
1623 struct writeback_control *wbc)
1624 {
1625 return __write_node_page(page, false, NULL, wbc, false,
1626 FS_NODE_IO, NULL);
1627 }
1628
1629 int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
1630 struct writeback_control *wbc, bool atomic,
1631 unsigned int *seq_id)
1632 {
1633 pgoff_t index;
1634 pgoff_t last_idx = ULONG_MAX;
1635 struct pagevec pvec;
1636 int ret = 0;
1637 struct page *last_page = NULL;
1638 bool marked = false;
1639 nid_t ino = inode->i_ino;
1640 int nr_pages;
1641
1642 if (atomic) {
1643 last_page = last_fsync_dnode(sbi, ino);
1644 if (IS_ERR_OR_NULL(last_page))
1645 return PTR_ERR_OR_ZERO(last_page);
1646 }
1647 retry:
1648 pagevec_init(&pvec);
1649 index = 0;
1650
1651 while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
1652 PAGECACHE_TAG_DIRTY))) {
1653 int i;
1654
1655 for (i = 0; i < nr_pages; i++) {
1656 struct page *page = pvec.pages[i];
1657 bool submitted = false;
1658
1659 if (unlikely(f2fs_cp_error(sbi))) {
1660 f2fs_put_page(last_page, 0);
1661 pagevec_release(&pvec);
1662 ret = -EIO;
1663 goto out;
1664 }
1665
1666 if (!IS_DNODE(page) || !is_cold_node(page))
1667 continue;
1668 if (ino_of_node(page) != ino)
1669 continue;
1670
1671 lock_page(page);
1672
1673 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1674 continue_unlock:
1675 unlock_page(page);
1676 continue;
1677 }
1678 if (ino_of_node(page) != ino)
1679 goto continue_unlock;
1680
1681 if (!PageDirty(page) && page != last_page) {
1682 /* someone wrote it for us */
1683 goto continue_unlock;
1684 }
1685
1686 f2fs_wait_on_page_writeback(page, NODE, true);
1687 BUG_ON(PageWriteback(page));
1688
1689 set_fsync_mark(page, 0);
1690 set_dentry_mark(page, 0);
1691
1692 if (!atomic || page == last_page) {
1693 set_fsync_mark(page, 1);
1694 if (IS_INODE(page)) {
1695 if (is_inode_flag_set(inode,
1696 FI_DIRTY_INODE))
1697 f2fs_update_inode(inode, page);
1698 set_dentry_mark(page,
1699 f2fs_need_dentry_mark(sbi, ino));
1700 }
1701 /* may be written by other thread */
1702 if (!PageDirty(page))
1703 set_page_dirty(page);
1704 }
1705
1706 if (!clear_page_dirty_for_io(page))
1707 goto continue_unlock;
1708
1709 ret = __write_node_page(page, atomic &&
1710 page == last_page,
1711 &submitted, wbc, true,
1712 FS_NODE_IO, seq_id);
1713 if (ret) {
1714 unlock_page(page);
1715 f2fs_put_page(last_page, 0);
1716 break;
1717 } else if (submitted) {
1718 last_idx = page->index;
1719 }
1720
1721 if (page == last_page) {
1722 f2fs_put_page(page, 0);
1723 marked = true;
1724 break;
1725 }
1726 }
1727 pagevec_release(&pvec);
1728 cond_resched();
1729
1730 if (ret || marked)
1731 break;
1732 }
1733 if (!ret && atomic && !marked) {
1734 f2fs_msg(sbi->sb, KERN_DEBUG,
1735 "Retry to write fsync mark: ino=%u, idx=%lx",
1736 ino, last_page->index);
1737 lock_page(last_page);
1738 f2fs_wait_on_page_writeback(last_page, NODE, true);
1739 set_page_dirty(last_page);
1740 unlock_page(last_page);
1741 goto retry;
1742 }
1743 out:
1744 if (last_idx != ULONG_MAX)
1745 f2fs_submit_merged_write_cond(sbi, NULL, ino, last_idx, NODE);
1746 return ret ? -EIO: 0;
1747 }
1748
1749 int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
1750 struct writeback_control *wbc,
1751 bool do_balance, enum iostat_type io_type)
1752 {
1753 pgoff_t index;
1754 struct pagevec pvec;
1755 int step = 0;
1756 int nwritten = 0;
1757 int ret = 0;
1758 int nr_pages, done = 0;
1759
1760 pagevec_init(&pvec);
1761
1762 next_step:
1763 index = 0;
1764
1765 while (!done && (nr_pages = pagevec_lookup_tag(&pvec,
1766 NODE_MAPPING(sbi), &index, PAGECACHE_TAG_DIRTY))) {
1767 int i;
1768
1769 for (i = 0; i < nr_pages; i++) {
1770 struct page *page = pvec.pages[i];
1771 bool submitted = false;
1772
1773 /* give a priority to WB_SYNC threads */
1774 if (atomic_read(&sbi->wb_sync_req[NODE]) &&
1775 wbc->sync_mode == WB_SYNC_NONE) {
1776 done = 1;
1777 break;
1778 }
1779
1780 /*
1781 * flushing sequence with step:
1782 * 0. indirect nodes
1783 * 1. dentry dnodes
1784 * 2. file dnodes
1785 */
1786 if (step == 0 && IS_DNODE(page))
1787 continue;
1788 if (step == 1 && (!IS_DNODE(page) ||
1789 is_cold_node(page)))
1790 continue;
1791 if (step == 2 && (!IS_DNODE(page) ||
1792 !is_cold_node(page)))
1793 continue;
1794 lock_node:
1795 if (wbc->sync_mode == WB_SYNC_ALL)
1796 lock_page(page);
1797 else if (!trylock_page(page))
1798 continue;
1799
1800 if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
1801 continue_unlock:
1802 unlock_page(page);
1803 continue;
1804 }
1805
1806 if (!PageDirty(page)) {
1807 /* someone wrote it for us */
1808 goto continue_unlock;
1809 }
1810
1811 /* flush inline_data */
1812 if (is_inline_node(page)) {
1813 clear_inline_node(page);
1814 unlock_page(page);
1815 flush_inline_data(sbi, ino_of_node(page));
1816 goto lock_node;
1817 }
1818
1819 f2fs_wait_on_page_writeback(page, NODE, true);
1820
1821 BUG_ON(PageWriteback(page));
1822 if (!clear_page_dirty_for_io(page))
1823 goto continue_unlock;
1824
1825 set_fsync_mark(page, 0);
1826 set_dentry_mark(page, 0);
1827
1828 ret = __write_node_page(page, false, &submitted,
1829 wbc, do_balance, io_type, NULL);
1830 if (ret)
1831 unlock_page(page);
1832 else if (submitted)
1833 nwritten++;
1834
1835 if (--wbc->nr_to_write == 0)
1836 break;
1837 }
1838 pagevec_release(&pvec);
1839 cond_resched();
1840
1841 if (wbc->nr_to_write == 0) {
1842 step = 2;
1843 break;
1844 }
1845 }
1846
1847 if (step < 2) {
1848 if (wbc->sync_mode == WB_SYNC_NONE && step == 1)
1849 goto out;
1850 step++;
1851 goto next_step;
1852 }
1853 out:
1854 if (nwritten)
1855 f2fs_submit_merged_write(sbi, NODE);
1856
1857 if (unlikely(f2fs_cp_error(sbi)))
1858 return -EIO;
1859 return ret;
1860 }
1861
1862 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
1863 unsigned int seq_id)
1864 {
1865 struct fsync_node_entry *fn;
1866 struct page *page;
1867 struct list_head *head = &sbi->fsync_node_list;
1868 unsigned long flags;
1869 unsigned int cur_seq_id = 0;
1870 int ret2, ret = 0;
1871
1872 while (seq_id && cur_seq_id < seq_id) {
1873 spin_lock_irqsave(&sbi->fsync_node_lock, flags);
1874 if (list_empty(head)) {
1875 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1876 break;
1877 }
1878 fn = list_first_entry(head, struct fsync_node_entry, list);
1879 if (fn->seq_id > seq_id) {
1880 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1881 break;
1882 }
1883 cur_seq_id = fn->seq_id;
1884 page = fn->page;
1885 get_page(page);
1886 spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
1887
1888 f2fs_wait_on_page_writeback(page, NODE, true);
1889 if (TestClearPageError(page))
1890 ret = -EIO;
1891
1892 put_page(page);
1893
1894 if (ret)
1895 break;
1896 }
1897
1898 ret2 = filemap_check_errors(NODE_MAPPING(sbi));
1899 if (!ret)
1900 ret = ret2;
1901
1902 return ret;
1903 }
1904
1905 static int f2fs_write_node_pages(struct address_space *mapping,
1906 struct writeback_control *wbc)
1907 {
1908 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
1909 struct blk_plug plug;
1910 long diff;
1911
1912 if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
1913 goto skip_write;
1914
1915 /* balancing f2fs's metadata in background */
1916 f2fs_balance_fs_bg(sbi);
1917
1918 /* collect a number of dirty node pages and write together */
1919 if (get_pages(sbi, F2FS_DIRTY_NODES) < nr_pages_to_skip(sbi, NODE))
1920 goto skip_write;
1921
1922 if (wbc->sync_mode == WB_SYNC_ALL)
1923 atomic_inc(&sbi->wb_sync_req[NODE]);
1924 else if (atomic_read(&sbi->wb_sync_req[NODE]))
1925 goto skip_write;
1926
1927 trace_f2fs_writepages(mapping->host, wbc, NODE);
1928
1929 diff = nr_pages_to_write(sbi, NODE, wbc);
1930 blk_start_plug(&plug);
1931 f2fs_sync_node_pages(sbi, wbc, true, FS_NODE_IO);
1932 blk_finish_plug(&plug);
1933 wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
1934
1935 if (wbc->sync_mode == WB_SYNC_ALL)
1936 atomic_dec(&sbi->wb_sync_req[NODE]);
1937 return 0;
1938
1939 skip_write:
1940 wbc->pages_skipped += get_pages(sbi, F2FS_DIRTY_NODES);
1941 trace_f2fs_writepages(mapping->host, wbc, NODE);
1942 return 0;
1943 }
1944
1945 static int f2fs_set_node_page_dirty(struct page *page)
1946 {
1947 trace_f2fs_set_page_dirty(page, NODE);
1948
1949 if (!PageUptodate(page))
1950 SetPageUptodate(page);
1951 #ifdef CONFIG_F2FS_CHECK_FS
1952 if (IS_INODE(page))
1953 f2fs_inode_chksum_set(F2FS_P_SB(page), page);
1954 #endif
1955 if (!PageDirty(page)) {
1956 __set_page_dirty_nobuffers(page);
1957 inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
1958 SetPagePrivate(page);
1959 f2fs_trace_pid(page);
1960 return 1;
1961 }
1962 return 0;
1963 }
1964
1965 /*
1966 * Structure of the f2fs node operations
1967 */
1968 const struct address_space_operations f2fs_node_aops = {
1969 .writepage = f2fs_write_node_page,
1970 .writepages = f2fs_write_node_pages,
1971 .set_page_dirty = f2fs_set_node_page_dirty,
1972 .invalidatepage = f2fs_invalidate_page,
1973 .releasepage = f2fs_release_page,
1974 #ifdef CONFIG_MIGRATION
1975 .migratepage = f2fs_migrate_page,
1976 #endif
1977 };
1978
1979 static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i,
1980 nid_t n)
1981 {
1982 return radix_tree_lookup(&nm_i->free_nid_root, n);
1983 }
1984
1985 static int __insert_free_nid(struct f2fs_sb_info *sbi,
1986 struct free_nid *i, enum nid_state state)
1987 {
1988 struct f2fs_nm_info *nm_i = NM_I(sbi);
1989
1990 int err = radix_tree_insert(&nm_i->free_nid_root, i->nid, i);
1991 if (err)
1992 return err;
1993
1994 f2fs_bug_on(sbi, state != i->state);
1995 nm_i->nid_cnt[state]++;
1996 if (state == FREE_NID)
1997 list_add_tail(&i->list, &nm_i->free_nid_list);
1998 return 0;
1999 }
2000
2001 static void __remove_free_nid(struct f2fs_sb_info *sbi,
2002 struct free_nid *i, enum nid_state state)
2003 {
2004 struct f2fs_nm_info *nm_i = NM_I(sbi);
2005
2006 f2fs_bug_on(sbi, state != i->state);
2007 nm_i->nid_cnt[state]--;
2008 if (state == FREE_NID)
2009 list_del(&i->list);
2010 radix_tree_delete(&nm_i->free_nid_root, i->nid);
2011 }
2012
2013 static void __move_free_nid(struct f2fs_sb_info *sbi, struct free_nid *i,
2014 enum nid_state org_state, enum nid_state dst_state)
2015 {
2016 struct f2fs_nm_info *nm_i = NM_I(sbi);
2017
2018 f2fs_bug_on(sbi, org_state != i->state);
2019 i->state = dst_state;
2020 nm_i->nid_cnt[org_state]--;
2021 nm_i->nid_cnt[dst_state]++;
2022
2023 switch (dst_state) {
2024 case PREALLOC_NID:
2025 list_del(&i->list);
2026 break;
2027 case FREE_NID:
2028 list_add_tail(&i->list, &nm_i->free_nid_list);
2029 break;
2030 default:
2031 BUG_ON(1);
2032 }
2033 }
2034
2035 static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid,
2036 bool set, bool build)
2037 {
2038 struct f2fs_nm_info *nm_i = NM_I(sbi);
2039 unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid);
2040 unsigned int nid_ofs = nid - START_NID(nid);
2041
2042 if (!test_bit_le(nat_ofs, nm_i->nat_block_bitmap))
2043 return;
2044
2045 if (set) {
2046 if (test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2047 return;
2048 __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2049 nm_i->free_nid_count[nat_ofs]++;
2050 } else {
2051 if (!test_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]))
2052 return;
2053 __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]);
2054 if (!build)
2055 nm_i->free_nid_count[nat_ofs]--;
2056 }
2057 }
2058
2059 /* return if the nid is recognized as free */
2060 static bool add_free_nid(struct f2fs_sb_info *sbi,
2061 nid_t nid, bool build, bool update)
2062 {
2063 struct f2fs_nm_info *nm_i = NM_I(sbi);
2064 struct free_nid *i, *e;
2065 struct nat_entry *ne;
2066 int err = -EINVAL;
2067 bool ret = false;
2068
2069 /* 0 nid should not be used */
2070 if (unlikely(nid == 0))
2071 return false;
2072
2073 i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS);
2074 i->nid = nid;
2075 i->state = FREE_NID;
2076
2077 radix_tree_preload(GFP_NOFS | __GFP_NOFAIL);
2078
2079 spin_lock(&nm_i->nid_list_lock);
2080
2081 if (build) {
2082 /*
2083 * Thread A Thread B
2084 * - f2fs_create
2085 * - f2fs_new_inode
2086 * - f2fs_alloc_nid
2087 * - __insert_nid_to_list(PREALLOC_NID)
2088 * - f2fs_balance_fs_bg
2089 * - f2fs_build_free_nids
2090 * - __f2fs_build_free_nids
2091 * - scan_nat_page
2092 * - add_free_nid
2093 * - __lookup_nat_cache
2094 * - f2fs_add_link
2095 * - f2fs_init_inode_metadata
2096 * - f2fs_new_inode_page
2097 * - f2fs_new_node_page
2098 * - set_node_addr
2099 * - f2fs_alloc_nid_done
2100 * - __remove_nid_from_list(PREALLOC_NID)
2101 * - __insert_nid_to_list(FREE_NID)
2102 */
2103 ne = __lookup_nat_cache(nm_i, nid);
2104 if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) ||
2105 nat_get_blkaddr(ne) != NULL_ADDR))
2106 goto err_out;
2107
2108 e = __lookup_free_nid_list(nm_i, nid);
2109 if (e) {
2110 if (e->state == FREE_NID)
2111 ret = true;
2112 goto err_out;
2113 }
2114 }
2115 ret = true;
2116 err = __insert_free_nid(sbi, i, FREE_NID);
2117 err_out:
2118 if (update) {
2119 update_free_nid_bitmap(sbi, nid, ret, build);
2120 if (!build)
2121 nm_i->available_nids++;
2122 }
2123 spin_unlock(&nm_i->nid_list_lock);
2124 radix_tree_preload_end();
2125
2126 if (err)
2127 kmem_cache_free(free_nid_slab, i);
2128 return ret;
2129 }
2130
2131 static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid)
2132 {
2133 struct f2fs_nm_info *nm_i = NM_I(sbi);
2134 struct free_nid *i;
2135 bool need_free = false;
2136
2137 spin_lock(&nm_i->nid_list_lock);
2138 i = __lookup_free_nid_list(nm_i, nid);
2139 if (i && i->state == FREE_NID) {
2140 __remove_free_nid(sbi, i, FREE_NID);
2141 need_free = true;
2142 }
2143 spin_unlock(&nm_i->nid_list_lock);
2144
2145 if (need_free)
2146 kmem_cache_free(free_nid_slab, i);
2147 }
2148
2149 static int scan_nat_page(struct f2fs_sb_info *sbi,
2150 struct page *nat_page, nid_t start_nid)
2151 {
2152 struct f2fs_nm_info *nm_i = NM_I(sbi);
2153 struct f2fs_nat_block *nat_blk = page_address(nat_page);
2154 block_t blk_addr;
2155 unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid);
2156 int i;
2157
2158 __set_bit_le(nat_ofs, nm_i->nat_block_bitmap);
2159
2160 i = start_nid % NAT_ENTRY_PER_BLOCK;
2161
2162 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
2163 if (unlikely(start_nid >= nm_i->max_nid))
2164 break;
2165
2166 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
2167
2168 if (blk_addr == NEW_ADDR)
2169 return -EINVAL;
2170
2171 if (blk_addr == NULL_ADDR) {
2172 add_free_nid(sbi, start_nid, true, true);
2173 } else {
2174 spin_lock(&NM_I(sbi)->nid_list_lock);
2175 update_free_nid_bitmap(sbi, start_nid, false, true);
2176 spin_unlock(&NM_I(sbi)->nid_list_lock);
2177 }
2178 }
2179
2180 return 0;
2181 }
2182
2183 static void scan_curseg_cache(struct f2fs_sb_info *sbi)
2184 {
2185 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2186 struct f2fs_journal *journal = curseg->journal;
2187 int i;
2188
2189 down_read(&curseg->journal_rwsem);
2190 for (i = 0; i < nats_in_cursum(journal); i++) {
2191 block_t addr;
2192 nid_t nid;
2193
2194 addr = le32_to_cpu(nat_in_journal(journal, i).block_addr);
2195 nid = le32_to_cpu(nid_in_journal(journal, i));
2196 if (addr == NULL_ADDR)
2197 add_free_nid(sbi, nid, true, false);
2198 else
2199 remove_free_nid(sbi, nid);
2200 }
2201 up_read(&curseg->journal_rwsem);
2202 }
2203
2204 static void scan_free_nid_bits(struct f2fs_sb_info *sbi)
2205 {
2206 struct f2fs_nm_info *nm_i = NM_I(sbi);
2207 unsigned int i, idx;
2208 nid_t nid;
2209
2210 down_read(&nm_i->nat_tree_lock);
2211
2212 for (i = 0; i < nm_i->nat_blocks; i++) {
2213 if (!test_bit_le(i, nm_i->nat_block_bitmap))
2214 continue;
2215 if (!nm_i->free_nid_count[i])
2216 continue;
2217 for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) {
2218 idx = find_next_bit_le(nm_i->free_nid_bitmap[i],
2219 NAT_ENTRY_PER_BLOCK, idx);
2220 if (idx >= NAT_ENTRY_PER_BLOCK)
2221 break;
2222
2223 nid = i * NAT_ENTRY_PER_BLOCK + idx;
2224 add_free_nid(sbi, nid, true, false);
2225
2226 if (nm_i->nid_cnt[FREE_NID] >= MAX_FREE_NIDS)
2227 goto out;
2228 }
2229 }
2230 out:
2231 scan_curseg_cache(sbi);
2232
2233 up_read(&nm_i->nat_tree_lock);
2234 }
2235
2236 static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2237 bool sync, bool mount)
2238 {
2239 struct f2fs_nm_info *nm_i = NM_I(sbi);
2240 int i = 0, ret;
2241 nid_t nid = nm_i->next_scan_nid;
2242
2243 if (unlikely(nid >= nm_i->max_nid))
2244 nid = 0;
2245
2246 /* Enough entries */
2247 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2248 return 0;
2249
2250 if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
2251 return 0;
2252
2253 if (!mount) {
2254 /* try to find free nids in free_nid_bitmap */
2255 scan_free_nid_bits(sbi);
2256
2257 if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
2258 return 0;
2259 }
2260
2261 /* readahead nat pages to be scanned */
2262 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
2263 META_NAT, true);
2264
2265 down_read(&nm_i->nat_tree_lock);
2266
2267 while (1) {
2268 if (!test_bit_le(NAT_BLOCK_OFFSET(nid),
2269 nm_i->nat_block_bitmap)) {
2270 struct page *page = get_current_nat_page(sbi, nid);
2271
2272 if (IS_ERR(page)) {
2273 ret = PTR_ERR(page);
2274 } else {
2275 ret = scan_nat_page(sbi, page, nid);
2276 f2fs_put_page(page, 1);
2277 }
2278
2279 if (ret) {
2280 up_read(&nm_i->nat_tree_lock);
2281 f2fs_bug_on(sbi, !mount);
2282 f2fs_msg(sbi->sb, KERN_ERR,
2283 "NAT is corrupt, run fsck to fix it");
2284 return ret;
2285 }
2286 }
2287
2288 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
2289 if (unlikely(nid >= nm_i->max_nid))
2290 nid = 0;
2291
2292 if (++i >= FREE_NID_PAGES)
2293 break;
2294 }
2295
2296 /* go to the next free nat pages to find free nids abundantly */
2297 nm_i->next_scan_nid = nid;
2298
2299 /* find free nids from current sum_pages */
2300 scan_curseg_cache(sbi);
2301
2302 up_read(&nm_i->nat_tree_lock);
2303
2304 f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
2305 nm_i->ra_nid_pages, META_NAT, false);
2306
2307 return 0;
2308 }
2309
2310 int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
2311 {
2312 int ret;
2313
2314 mutex_lock(&NM_I(sbi)->build_lock);
2315 ret = __f2fs_build_free_nids(sbi, sync, mount);
2316 mutex_unlock(&NM_I(sbi)->build_lock);
2317
2318 return ret;
2319 }
2320
2321 /*
2322 * If this function returns success, caller can obtain a new nid
2323 * from second parameter of this function.
2324 * The returned nid could be used ino as well as nid when inode is created.
2325 */
2326 bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
2327 {
2328 struct f2fs_nm_info *nm_i = NM_I(sbi);
2329 struct free_nid *i = NULL;
2330 retry:
2331 if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
2332 f2fs_show_injection_info(FAULT_ALLOC_NID);
2333 return false;
2334 }
2335
2336 spin_lock(&nm_i->nid_list_lock);
2337
2338 if (unlikely(nm_i->available_nids == 0)) {
2339 spin_unlock(&nm_i->nid_list_lock);
2340 return false;
2341 }
2342
2343 /* We should not use stale free nids created by f2fs_build_free_nids */
2344 if (nm_i->nid_cnt[FREE_NID] && !on_f2fs_build_free_nids(nm_i)) {
2345 f2fs_bug_on(sbi, list_empty(&nm_i->free_nid_list));
2346 i = list_first_entry(&nm_i->free_nid_list,
2347 struct free_nid, list);
2348 *nid = i->nid;
2349
2350 __move_free_nid(sbi, i, FREE_NID, PREALLOC_NID);
2351 nm_i->available_nids--;
2352
2353 update_free_nid_bitmap(sbi, *nid, false, false);
2354
2355 spin_unlock(&nm_i->nid_list_lock);
2356 return true;
2357 }
2358 spin_unlock(&nm_i->nid_list_lock);
2359
2360 /* Let's scan nat pages and its caches to get free nids */
2361 if (!f2fs_build_free_nids(sbi, true, false))
2362 goto retry;
2363 return false;
2364 }
2365
2366 /*
2367 * f2fs_alloc_nid() should be called prior to this function.
2368 */
2369 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid)
2370 {
2371 struct f2fs_nm_info *nm_i = NM_I(sbi);
2372 struct free_nid *i;
2373
2374 spin_lock(&nm_i->nid_list_lock);
2375 i = __lookup_free_nid_list(nm_i, nid);
2376 f2fs_bug_on(sbi, !i);
2377 __remove_free_nid(sbi, i, PREALLOC_NID);
2378 spin_unlock(&nm_i->nid_list_lock);
2379
2380 kmem_cache_free(free_nid_slab, i);
2381 }
2382
2383 /*
2384 * f2fs_alloc_nid() should be called prior to this function.
2385 */
2386 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
2387 {
2388 struct f2fs_nm_info *nm_i = NM_I(sbi);
2389 struct free_nid *i;
2390 bool need_free = false;
2391
2392 if (!nid)
2393 return;
2394
2395 spin_lock(&nm_i->nid_list_lock);
2396 i = __lookup_free_nid_list(nm_i, nid);
2397 f2fs_bug_on(sbi, !i);
2398
2399 if (!f2fs_available_free_memory(sbi, FREE_NIDS)) {
2400 __remove_free_nid(sbi, i, PREALLOC_NID);
2401 need_free = true;
2402 } else {
2403 __move_free_nid(sbi, i, PREALLOC_NID, FREE_NID);
2404 }
2405
2406 nm_i->available_nids++;
2407
2408 update_free_nid_bitmap(sbi, nid, true, false);
2409
2410 spin_unlock(&nm_i->nid_list_lock);
2411
2412 if (need_free)
2413 kmem_cache_free(free_nid_slab, i);
2414 }
2415
2416 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
2417 {
2418 struct f2fs_nm_info *nm_i = NM_I(sbi);
2419 struct free_nid *i, *next;
2420 int nr = nr_shrink;
2421
2422 if (nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2423 return 0;
2424
2425 if (!mutex_trylock(&nm_i->build_lock))
2426 return 0;
2427
2428 spin_lock(&nm_i->nid_list_lock);
2429 list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
2430 if (nr_shrink <= 0 ||
2431 nm_i->nid_cnt[FREE_NID] <= MAX_FREE_NIDS)
2432 break;
2433
2434 __remove_free_nid(sbi, i, FREE_NID);
2435 kmem_cache_free(free_nid_slab, i);
2436 nr_shrink--;
2437 }
2438 spin_unlock(&nm_i->nid_list_lock);
2439 mutex_unlock(&nm_i->build_lock);
2440
2441 return nr - nr_shrink;
2442 }
2443
2444 void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
2445 {
2446 void *src_addr, *dst_addr;
2447 size_t inline_size;
2448 struct page *ipage;
2449 struct f2fs_inode *ri;
2450
2451 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
2452 f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
2453
2454 ri = F2FS_INODE(page);
2455 if (ri->i_inline & F2FS_INLINE_XATTR) {
2456 set_inode_flag(inode, FI_INLINE_XATTR);
2457 } else {
2458 clear_inode_flag(inode, FI_INLINE_XATTR);
2459 goto update_inode;
2460 }
2461
2462 dst_addr = inline_xattr_addr(inode, ipage);
2463 src_addr = inline_xattr_addr(inode, page);
2464 inline_size = inline_xattr_size(inode);
2465
2466 f2fs_wait_on_page_writeback(ipage, NODE, true);
2467 memcpy(dst_addr, src_addr, inline_size);
2468 update_inode:
2469 f2fs_update_inode(inode, ipage);
2470 f2fs_put_page(ipage, 1);
2471 }
2472
2473 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
2474 {
2475 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2476 nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid;
2477 nid_t new_xnid;
2478 struct dnode_of_data dn;
2479 struct node_info ni;
2480 struct page *xpage;
2481 int err;
2482
2483 if (!prev_xnid)
2484 goto recover_xnid;
2485
2486 /* 1: invalidate the previous xattr nid */
2487 err = f2fs_get_node_info(sbi, prev_xnid, &ni);
2488 if (err)
2489 return err;
2490
2491 f2fs_invalidate_blocks(sbi, ni.blk_addr);
2492 dec_valid_node_count(sbi, inode, false);
2493 set_node_addr(sbi, &ni, NULL_ADDR, false);
2494
2495 recover_xnid:
2496 /* 2: update xattr nid in inode */
2497 if (!f2fs_alloc_nid(sbi, &new_xnid))
2498 return -ENOSPC;
2499
2500 set_new_dnode(&dn, inode, NULL, NULL, new_xnid);
2501 xpage = f2fs_new_node_page(&dn, XATTR_NODE_OFFSET);
2502 if (IS_ERR(xpage)) {
2503 f2fs_alloc_nid_failed(sbi, new_xnid);
2504 return PTR_ERR(xpage);
2505 }
2506
2507 f2fs_alloc_nid_done(sbi, new_xnid);
2508 f2fs_update_inode_page(inode);
2509
2510 /* 3: update and set xattr node page dirty */
2511 memcpy(F2FS_NODE(xpage), F2FS_NODE(page), VALID_XATTR_BLOCK_SIZE);
2512
2513 set_page_dirty(xpage);
2514 f2fs_put_page(xpage, 1);
2515
2516 return 0;
2517 }
2518
2519 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
2520 {
2521 struct f2fs_inode *src, *dst;
2522 nid_t ino = ino_of_node(page);
2523 struct node_info old_ni, new_ni;
2524 struct page *ipage;
2525 int err;
2526
2527 err = f2fs_get_node_info(sbi, ino, &old_ni);
2528 if (err)
2529 return err;
2530
2531 if (unlikely(old_ni.blk_addr != NULL_ADDR))
2532 return -EINVAL;
2533 retry:
2534 ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
2535 if (!ipage) {
2536 congestion_wait(BLK_RW_ASYNC, HZ/50);
2537 goto retry;
2538 }
2539
2540 /* Should not use this inode from free nid list */
2541 remove_free_nid(sbi, ino);
2542
2543 if (!PageUptodate(ipage))
2544 SetPageUptodate(ipage);
2545 fill_node_footer(ipage, ino, ino, 0, true);
2546 set_cold_node(page, false);
2547
2548 src = F2FS_INODE(page);
2549 dst = F2FS_INODE(ipage);
2550
2551 memcpy(dst, src, (unsigned long)&src->i_ext - (unsigned long)src);
2552 dst->i_size = 0;
2553 dst->i_blocks = cpu_to_le64(1);
2554 dst->i_links = cpu_to_le32(1);
2555 dst->i_xattr_nid = 0;
2556 dst->i_inline = src->i_inline & (F2FS_INLINE_XATTR | F2FS_EXTRA_ATTR);
2557 if (dst->i_inline & F2FS_EXTRA_ATTR) {
2558 dst->i_extra_isize = src->i_extra_isize;
2559
2560 if (f2fs_sb_has_flexible_inline_xattr(sbi->sb) &&
2561 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2562 i_inline_xattr_size))
2563 dst->i_inline_xattr_size = src->i_inline_xattr_size;
2564
2565 if (f2fs_sb_has_project_quota(sbi->sb) &&
2566 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2567 i_projid))
2568 dst->i_projid = src->i_projid;
2569
2570 if (f2fs_sb_has_inode_crtime(sbi->sb) &&
2571 F2FS_FITS_IN_INODE(src, le16_to_cpu(src->i_extra_isize),
2572 i_crtime_nsec)) {
2573 dst->i_crtime = src->i_crtime;
2574 dst->i_crtime_nsec = src->i_crtime_nsec;
2575 }
2576 }
2577
2578 new_ni = old_ni;
2579 new_ni.ino = ino;
2580
2581 if (unlikely(inc_valid_node_count(sbi, NULL, true)))
2582 WARN_ON(1);
2583 set_node_addr(sbi, &new_ni, NEW_ADDR, false);
2584 inc_valid_inode_count(sbi);
2585 set_page_dirty(ipage);
2586 f2fs_put_page(ipage, 1);
2587 return 0;
2588 }
2589
2590 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
2591 unsigned int segno, struct f2fs_summary_block *sum)
2592 {
2593 struct f2fs_node *rn;
2594 struct f2fs_summary *sum_entry;
2595 block_t addr;
2596 int i, idx, last_offset, nrpages;
2597
2598 /* scan the node segment */
2599 last_offset = sbi->blocks_per_seg;
2600 addr = START_BLOCK(sbi, segno);
2601 sum_entry = &sum->entries[0];
2602
2603 for (i = 0; i < last_offset; i += nrpages, addr += nrpages) {
2604 nrpages = min(last_offset - i, BIO_MAX_PAGES);
2605
2606 /* readahead node pages */
2607 f2fs_ra_meta_pages(sbi, addr, nrpages, META_POR, true);
2608
2609 for (idx = addr; idx < addr + nrpages; idx++) {
2610 struct page *page = f2fs_get_tmp_page(sbi, idx);
2611
2612 if (IS_ERR(page))
2613 return PTR_ERR(page);
2614
2615 rn = F2FS_NODE(page);
2616 sum_entry->nid = rn->footer.nid;
2617 sum_entry->version = 0;
2618 sum_entry->ofs_in_node = 0;
2619 sum_entry++;
2620 f2fs_put_page(page, 1);
2621 }
2622
2623 invalidate_mapping_pages(META_MAPPING(sbi), addr,
2624 addr + nrpages);
2625 }
2626 return 0;
2627 }
2628
2629 static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
2630 {
2631 struct f2fs_nm_info *nm_i = NM_I(sbi);
2632 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2633 struct f2fs_journal *journal = curseg->journal;
2634 int i;
2635
2636 down_write(&curseg->journal_rwsem);
2637 for (i = 0; i < nats_in_cursum(journal); i++) {
2638 struct nat_entry *ne;
2639 struct f2fs_nat_entry raw_ne;
2640 nid_t nid = le32_to_cpu(nid_in_journal(journal, i));
2641
2642 raw_ne = nat_in_journal(journal, i);
2643
2644 ne = __lookup_nat_cache(nm_i, nid);
2645 if (!ne) {
2646 ne = __alloc_nat_entry(nid, true);
2647 __init_nat_entry(nm_i, ne, &raw_ne, true);
2648 }
2649
2650 /*
2651 * if a free nat in journal has not been used after last
2652 * checkpoint, we should remove it from available nids,
2653 * since later we will add it again.
2654 */
2655 if (!get_nat_flag(ne, IS_DIRTY) &&
2656 le32_to_cpu(raw_ne.block_addr) == NULL_ADDR) {
2657 spin_lock(&nm_i->nid_list_lock);
2658 nm_i->available_nids--;
2659 spin_unlock(&nm_i->nid_list_lock);
2660 }
2661
2662 __set_nat_cache_dirty(nm_i, ne);
2663 }
2664 update_nats_in_cursum(journal, -i);
2665 up_write(&curseg->journal_rwsem);
2666 }
2667
2668 static void __adjust_nat_entry_set(struct nat_entry_set *nes,
2669 struct list_head *head, int max)
2670 {
2671 struct nat_entry_set *cur;
2672
2673 if (nes->entry_cnt >= max)
2674 goto add_out;
2675
2676 list_for_each_entry(cur, head, set_list) {
2677 if (cur->entry_cnt >= nes->entry_cnt) {
2678 list_add(&nes->set_list, cur->set_list.prev);
2679 return;
2680 }
2681 }
2682 add_out:
2683 list_add_tail(&nes->set_list, head);
2684 }
2685
2686 static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2687 struct page *page)
2688 {
2689 struct f2fs_nm_info *nm_i = NM_I(sbi);
2690 unsigned int nat_index = start_nid / NAT_ENTRY_PER_BLOCK;
2691 struct f2fs_nat_block *nat_blk = page_address(page);
2692 int valid = 0;
2693 int i = 0;
2694
2695 if (!enabled_nat_bits(sbi, NULL))
2696 return;
2697
2698 if (nat_index == 0) {
2699 valid = 1;
2700 i = 1;
2701 }
2702 for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2703 if (nat_blk->entries[i].block_addr != NULL_ADDR)
2704 valid++;
2705 }
2706 if (valid == 0) {
2707 __set_bit_le(nat_index, nm_i->empty_nat_bits);
2708 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2709 return;
2710 }
2711
2712 __clear_bit_le(nat_index, nm_i->empty_nat_bits);
2713 if (valid == NAT_ENTRY_PER_BLOCK)
2714 __set_bit_le(nat_index, nm_i->full_nat_bits);
2715 else
2716 __clear_bit_le(nat_index, nm_i->full_nat_bits);
2717 }
2718
2719 static int __flush_nat_entry_set(struct f2fs_sb_info *sbi,
2720 struct nat_entry_set *set, struct cp_control *cpc)
2721 {
2722 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2723 struct f2fs_journal *journal = curseg->journal;
2724 nid_t start_nid = set->set * NAT_ENTRY_PER_BLOCK;
2725 bool to_journal = true;
2726 struct f2fs_nat_block *nat_blk;
2727 struct nat_entry *ne, *cur;
2728 struct page *page = NULL;
2729
2730 /*
2731 * there are two steps to flush nat entries:
2732 * #1, flush nat entries to journal in current hot data summary block.
2733 * #2, flush nat entries to nat page.
2734 */
2735 if (enabled_nat_bits(sbi, cpc) ||
2736 !__has_cursum_space(journal, set->entry_cnt, NAT_JOURNAL))
2737 to_journal = false;
2738
2739 if (to_journal) {
2740 down_write(&curseg->journal_rwsem);
2741 } else {
2742 page = get_next_nat_page(sbi, start_nid);
2743 if (IS_ERR(page))
2744 return PTR_ERR(page);
2745
2746 nat_blk = page_address(page);
2747 f2fs_bug_on(sbi, !nat_blk);
2748 }
2749
2750 /* flush dirty nats in nat entry set */
2751 list_for_each_entry_safe(ne, cur, &set->entry_list, list) {
2752 struct f2fs_nat_entry *raw_ne;
2753 nid_t nid = nat_get_nid(ne);
2754 int offset;
2755
2756 f2fs_bug_on(sbi, nat_get_blkaddr(ne) == NEW_ADDR);
2757
2758 if (to_journal) {
2759 offset = f2fs_lookup_journal_in_cursum(journal,
2760 NAT_JOURNAL, nid, 1);
2761 f2fs_bug_on(sbi, offset < 0);
2762 raw_ne = &nat_in_journal(journal, offset);
2763 nid_in_journal(journal, offset) = cpu_to_le32(nid);
2764 } else {
2765 raw_ne = &nat_blk->entries[nid - start_nid];
2766 }
2767 raw_nat_from_node_info(raw_ne, &ne->ni);
2768 nat_reset_flag(ne);
2769 __clear_nat_cache_dirty(NM_I(sbi), set, ne);
2770 if (nat_get_blkaddr(ne) == NULL_ADDR) {
2771 add_free_nid(sbi, nid, false, true);
2772 } else {
2773 spin_lock(&NM_I(sbi)->nid_list_lock);
2774 update_free_nid_bitmap(sbi, nid, false, false);
2775 spin_unlock(&NM_I(sbi)->nid_list_lock);
2776 }
2777 }
2778
2779 if (to_journal) {
2780 up_write(&curseg->journal_rwsem);
2781 } else {
2782 __update_nat_bits(sbi, start_nid, page);
2783 f2fs_put_page(page, 1);
2784 }
2785
2786 /* Allow dirty nats by node block allocation in write_begin */
2787 if (!set->entry_cnt) {
2788 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
2789 kmem_cache_free(nat_entry_set_slab, set);
2790 }
2791 return 0;
2792 }
2793
2794 /*
2795 * This function is called during the checkpointing process.
2796 */
2797 int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
2798 {
2799 struct f2fs_nm_info *nm_i = NM_I(sbi);
2800 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
2801 struct f2fs_journal *journal = curseg->journal;
2802 struct nat_entry_set *setvec[SETVEC_SIZE];
2803 struct nat_entry_set *set, *tmp;
2804 unsigned int found;
2805 nid_t set_idx = 0;
2806 LIST_HEAD(sets);
2807 int err = 0;
2808
2809 /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
2810 if (enabled_nat_bits(sbi, cpc)) {
2811 down_write(&nm_i->nat_tree_lock);
2812 remove_nats_in_journal(sbi);
2813 up_write(&nm_i->nat_tree_lock);
2814 }
2815
2816 if (!nm_i->dirty_nat_cnt)
2817 return 0;
2818
2819 down_write(&nm_i->nat_tree_lock);
2820
2821 /*
2822 * if there are no enough space in journal to store dirty nat
2823 * entries, remove all entries from journal and merge them
2824 * into nat entry set.
2825 */
2826 if (enabled_nat_bits(sbi, cpc) ||
2827 !__has_cursum_space(journal, nm_i->dirty_nat_cnt, NAT_JOURNAL))
2828 remove_nats_in_journal(sbi);
2829
2830 while ((found = __gang_lookup_nat_set(nm_i,
2831 set_idx, SETVEC_SIZE, setvec))) {
2832 unsigned idx;
2833 set_idx = setvec[found - 1]->set + 1;
2834 for (idx = 0; idx < found; idx++)
2835 __adjust_nat_entry_set(setvec[idx], &sets,
2836 MAX_NAT_JENTRIES(journal));
2837 }
2838
2839 /* flush dirty nats in nat entry set */
2840 list_for_each_entry_safe(set, tmp, &sets, set_list) {
2841 err = __flush_nat_entry_set(sbi, set, cpc);
2842 if (err)
2843 break;
2844 }
2845
2846 up_write(&nm_i->nat_tree_lock);
2847 /* Allow dirty nats by node block allocation in write_begin */
2848
2849 return err;
2850 }
2851
2852 static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
2853 {
2854 struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
2855 struct f2fs_nm_info *nm_i = NM_I(sbi);
2856 unsigned int nat_bits_bytes = nm_i->nat_blocks / BITS_PER_BYTE;
2857 unsigned int i;
2858 __u64 cp_ver = cur_cp_version(ckpt);
2859 block_t nat_bits_addr;
2860
2861 if (!enabled_nat_bits(sbi, NULL))
2862 return 0;
2863
2864 nm_i->nat_bits_blocks = F2FS_BLK_ALIGN((nat_bits_bytes << 1) + 8);
2865 nm_i->nat_bits = f2fs_kzalloc(sbi,
2866 nm_i->nat_bits_blocks << F2FS_BLKSIZE_BITS, GFP_KERNEL);
2867 if (!nm_i->nat_bits)
2868 return -ENOMEM;
2869
2870 nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
2871 nm_i->nat_bits_blocks;
2872 for (i = 0; i < nm_i->nat_bits_blocks; i++) {
2873 struct page *page;
2874
2875 page = f2fs_get_meta_page(sbi, nat_bits_addr++);
2876 if (IS_ERR(page)) {
2877 disable_nat_bits(sbi, true);
2878 return PTR_ERR(page);
2879 }
2880
2881 memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
2882 page_address(page), F2FS_BLKSIZE);
2883 f2fs_put_page(page, 1);
2884 }
2885
2886 cp_ver |= (cur_cp_crc(ckpt) << 32);
2887 if (cpu_to_le64(cp_ver) != *(__le64 *)nm_i->nat_bits) {
2888 disable_nat_bits(sbi, true);
2889 return 0;
2890 }
2891
2892 nm_i->full_nat_bits = nm_i->nat_bits + 8;
2893 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
2894
2895 f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint");
2896 return 0;
2897 }
2898
2899 static inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi)
2900 {
2901 struct f2fs_nm_info *nm_i = NM_I(sbi);
2902 unsigned int i = 0;
2903 nid_t nid, last_nid;
2904
2905 if (!enabled_nat_bits(sbi, NULL))
2906 return;
2907
2908 for (i = 0; i < nm_i->nat_blocks; i++) {
2909 i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i);
2910 if (i >= nm_i->nat_blocks)
2911 break;
2912
2913 __set_bit_le(i, nm_i->nat_block_bitmap);
2914
2915 nid = i * NAT_ENTRY_PER_BLOCK;
2916 last_nid = nid + NAT_ENTRY_PER_BLOCK;
2917
2918 spin_lock(&NM_I(sbi)->nid_list_lock);
2919 for (; nid < last_nid; nid++)
2920 update_free_nid_bitmap(sbi, nid, true, true);
2921 spin_unlock(&NM_I(sbi)->nid_list_lock);
2922 }
2923
2924 for (i = 0; i < nm_i->nat_blocks; i++) {
2925 i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i);
2926 if (i >= nm_i->nat_blocks)
2927 break;
2928
2929 __set_bit_le(i, nm_i->nat_block_bitmap);
2930 }
2931 }
2932
2933 static int init_node_manager(struct f2fs_sb_info *sbi)
2934 {
2935 struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi);
2936 struct f2fs_nm_info *nm_i = NM_I(sbi);
2937 unsigned char *version_bitmap;
2938 unsigned int nat_segs;
2939 int err;
2940
2941 nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr);
2942
2943 /* segment_count_nat includes pair segment so divide to 2. */
2944 nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1;
2945 nm_i->nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg);
2946 nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nm_i->nat_blocks;
2947
2948 /* not used nids: 0, node, meta, (and root counted as valid node) */
2949 nm_i->available_nids = nm_i->max_nid - sbi->total_valid_node_count -
2950 sbi->nquota_files - F2FS_RESERVED_NODE_NUM;
2951 nm_i->nid_cnt[FREE_NID] = 0;
2952 nm_i->nid_cnt[PREALLOC_NID] = 0;
2953 nm_i->nat_cnt = 0;
2954 nm_i->ram_thresh = DEF_RAM_THRESHOLD;
2955 nm_i->ra_nid_pages = DEF_RA_NID_PAGES;
2956 nm_i->dirty_nats_ratio = DEF_DIRTY_NAT_RATIO_THRESHOLD;
2957
2958 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
2959 INIT_LIST_HEAD(&nm_i->free_nid_list);
2960 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
2961 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
2962 INIT_LIST_HEAD(&nm_i->nat_entries);
2963 spin_lock_init(&nm_i->nat_list_lock);
2964
2965 mutex_init(&nm_i->build_lock);
2966 spin_lock_init(&nm_i->nid_list_lock);
2967 init_rwsem(&nm_i->nat_tree_lock);
2968
2969 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
2970 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
2971 version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP);
2972 if (!version_bitmap)
2973 return -EFAULT;
2974
2975 nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size,
2976 GFP_KERNEL);
2977 if (!nm_i->nat_bitmap)
2978 return -ENOMEM;
2979
2980 err = __get_nat_bitmaps(sbi);
2981 if (err)
2982 return err;
2983
2984 #ifdef CONFIG_F2FS_CHECK_FS
2985 nm_i->nat_bitmap_mir = kmemdup(version_bitmap, nm_i->bitmap_size,
2986 GFP_KERNEL);
2987 if (!nm_i->nat_bitmap_mir)
2988 return -ENOMEM;
2989 #endif
2990
2991 return 0;
2992 }
2993
2994 static int init_free_nid_cache(struct f2fs_sb_info *sbi)
2995 {
2996 struct f2fs_nm_info *nm_i = NM_I(sbi);
2997 int i;
2998
2999 nm_i->free_nid_bitmap =
3000 f2fs_kzalloc(sbi, array_size(sizeof(unsigned char *),
3001 nm_i->nat_blocks),
3002 GFP_KERNEL);
3003 if (!nm_i->free_nid_bitmap)
3004 return -ENOMEM;
3005
3006 for (i = 0; i < nm_i->nat_blocks; i++) {
3007 nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
3008 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
3009 if (!nm_i->free_nid_bitmap[i])
3010 return -ENOMEM;
3011 }
3012
3013 nm_i->nat_block_bitmap = f2fs_kvzalloc(sbi, nm_i->nat_blocks / 8,
3014 GFP_KERNEL);
3015 if (!nm_i->nat_block_bitmap)
3016 return -ENOMEM;
3017
3018 nm_i->free_nid_count =
3019 f2fs_kvzalloc(sbi, array_size(sizeof(unsigned short),
3020 nm_i->nat_blocks),
3021 GFP_KERNEL);
3022 if (!nm_i->free_nid_count)
3023 return -ENOMEM;
3024 return 0;
3025 }
3026
3027 int f2fs_build_node_manager(struct f2fs_sb_info *sbi)
3028 {
3029 int err;
3030
3031 sbi->nm_info = f2fs_kzalloc(sbi, sizeof(struct f2fs_nm_info),
3032 GFP_KERNEL);
3033 if (!sbi->nm_info)
3034 return -ENOMEM;
3035
3036 err = init_node_manager(sbi);
3037 if (err)
3038 return err;
3039
3040 err = init_free_nid_cache(sbi);
3041 if (err)
3042 return err;
3043
3044 /* load free nid status from nat_bits table */
3045 load_free_nid_bitmap(sbi);
3046
3047 return f2fs_build_free_nids(sbi, true, true);
3048 }
3049
3050 void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
3051 {
3052 struct f2fs_nm_info *nm_i = NM_I(sbi);
3053 struct free_nid *i, *next_i;
3054 struct nat_entry *natvec[NATVEC_SIZE];
3055 struct nat_entry_set *setvec[SETVEC_SIZE];
3056 nid_t nid = 0;
3057 unsigned int found;
3058
3059 if (!nm_i)
3060 return;
3061
3062 /* destroy free nid list */
3063 spin_lock(&nm_i->nid_list_lock);
3064 list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) {
3065 __remove_free_nid(sbi, i, FREE_NID);
3066 spin_unlock(&nm_i->nid_list_lock);
3067 kmem_cache_free(free_nid_slab, i);
3068 spin_lock(&nm_i->nid_list_lock);
3069 }
3070 f2fs_bug_on(sbi, nm_i->nid_cnt[FREE_NID]);
3071 f2fs_bug_on(sbi, nm_i->nid_cnt[PREALLOC_NID]);
3072 f2fs_bug_on(sbi, !list_empty(&nm_i->free_nid_list));
3073 spin_unlock(&nm_i->nid_list_lock);
3074
3075 /* destroy nat cache */
3076 down_write(&nm_i->nat_tree_lock);
3077 while ((found = __gang_lookup_nat_cache(nm_i,
3078 nid, NATVEC_SIZE, natvec))) {
3079 unsigned idx;
3080
3081 nid = nat_get_nid(natvec[found - 1]) + 1;
3082 for (idx = 0; idx < found; idx++) {
3083 spin_lock(&nm_i->nat_list_lock);
3084 list_del(&natvec[idx]->list);
3085 spin_unlock(&nm_i->nat_list_lock);
3086
3087 __del_from_nat_cache(nm_i, natvec[idx]);
3088 }
3089 }
3090 f2fs_bug_on(sbi, nm_i->nat_cnt);
3091
3092 /* destroy nat set cache */
3093 nid = 0;
3094 while ((found = __gang_lookup_nat_set(nm_i,
3095 nid, SETVEC_SIZE, setvec))) {
3096 unsigned idx;
3097
3098 nid = setvec[found - 1]->set + 1;
3099 for (idx = 0; idx < found; idx++) {
3100 /* entry_cnt is not zero, when cp_error was occurred */
3101 f2fs_bug_on(sbi, !list_empty(&setvec[idx]->entry_list));
3102 radix_tree_delete(&nm_i->nat_set_root, setvec[idx]->set);
3103 kmem_cache_free(nat_entry_set_slab, setvec[idx]);
3104 }
3105 }
3106 up_write(&nm_i->nat_tree_lock);
3107
3108 kvfree(nm_i->nat_block_bitmap);
3109 if (nm_i->free_nid_bitmap) {
3110 int i;
3111
3112 for (i = 0; i < nm_i->nat_blocks; i++)
3113 kvfree(nm_i->free_nid_bitmap[i]);
3114 kfree(nm_i->free_nid_bitmap);
3115 }
3116 kvfree(nm_i->free_nid_count);
3117
3118 kfree(nm_i->nat_bitmap);
3119 kfree(nm_i->nat_bits);
3120 #ifdef CONFIG_F2FS_CHECK_FS
3121 kfree(nm_i->nat_bitmap_mir);
3122 #endif
3123 sbi->nm_info = NULL;
3124 kfree(nm_i);
3125 }
3126
3127 int __init f2fs_create_node_manager_caches(void)
3128 {
3129 nat_entry_slab = f2fs_kmem_cache_create("nat_entry",
3130 sizeof(struct nat_entry));
3131 if (!nat_entry_slab)
3132 goto fail;
3133
3134 free_nid_slab = f2fs_kmem_cache_create("free_nid",
3135 sizeof(struct free_nid));
3136 if (!free_nid_slab)
3137 goto destroy_nat_entry;
3138
3139 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
3140 sizeof(struct nat_entry_set));
3141 if (!nat_entry_set_slab)
3142 goto destroy_free_nid;
3143
3144 fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
3145 sizeof(struct fsync_node_entry));
3146 if (!fsync_node_entry_slab)
3147 goto destroy_nat_entry_set;
3148 return 0;
3149
3150 destroy_nat_entry_set:
3151 kmem_cache_destroy(nat_entry_set_slab);
3152 destroy_free_nid:
3153 kmem_cache_destroy(free_nid_slab);
3154 destroy_nat_entry:
3155 kmem_cache_destroy(nat_entry_slab);
3156 fail:
3157 return -ENOMEM;
3158 }
3159
3160 void f2fs_destroy_node_manager_caches(void)
3161 {
3162 kmem_cache_destroy(fsync_node_entry_slab);
3163 kmem_cache_destroy(nat_entry_set_slab);
3164 kmem_cache_destroy(free_nid_slab);
3165 kmem_cache_destroy(nat_entry_slab);
3166 }