1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
9 #include <linux/f2fs_fs.h>
10 #include <linux/mpage.h>
11 #include <linux/backing-dev.h>
12 #include <linux/blkdev.h>
13 #include <linux/pagevec.h>
14 #include <linux/swap.h>
21 #include <trace/events/f2fs.h>
23 #define on_f2fs_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
25 static struct kmem_cache
*nat_entry_slab
;
26 static struct kmem_cache
*free_nid_slab
;
27 static struct kmem_cache
*nat_entry_set_slab
;
28 static struct kmem_cache
*fsync_node_entry_slab
;
31 * Check whether the given nid is within node id range.
33 int f2fs_check_nid_range(struct f2fs_sb_info
*sbi
, nid_t nid
)
35 if (unlikely(nid
< F2FS_ROOT_INO(sbi
) || nid
>= NM_I(sbi
)->max_nid
)) {
36 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
37 f2fs_warn(sbi
, "%s: out-of-range nid=%x, run fsck to fix.",
44 bool f2fs_available_free_memory(struct f2fs_sb_info
*sbi
, int type
)
46 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
47 struct discard_cmd_control
*dcc
= SM_I(sbi
)->dcc_info
;
49 unsigned long avail_ram
;
50 unsigned long mem_size
= 0;
58 /* only uses low memory */
59 avail_ram
= val
.totalram
- val
.totalhigh
;
62 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
64 if (type
== FREE_NIDS
) {
65 mem_size
= (nm_i
->nid_cnt
[FREE_NID
] *
66 sizeof(struct free_nid
)) >> PAGE_SHIFT
;
67 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 2);
68 } else if (type
== NAT_ENTRIES
) {
69 mem_size
= (nm_i
->nat_cnt
[TOTAL_NAT
] *
70 sizeof(struct nat_entry
)) >> PAGE_SHIFT
;
71 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 2);
72 if (excess_cached_nats(sbi
))
74 } else if (type
== DIRTY_DENTS
) {
75 if (sbi
->sb
->s_bdi
->wb
.dirty_exceeded
)
77 mem_size
= get_pages(sbi
, F2FS_DIRTY_DENTS
);
78 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
79 } else if (type
== INO_ENTRIES
) {
82 for (i
= 0; i
< MAX_INO_ENTRY
; i
++)
83 mem_size
+= sbi
->im
[i
].ino_num
*
84 sizeof(struct ino_entry
);
85 mem_size
>>= PAGE_SHIFT
;
86 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
87 } else if (type
== EXTENT_CACHE
) {
88 mem_size
= (atomic_read(&sbi
->total_ext_tree
) *
89 sizeof(struct extent_tree
) +
90 atomic_read(&sbi
->total_ext_node
) *
91 sizeof(struct extent_node
)) >> PAGE_SHIFT
;
92 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
93 } else if (type
== INMEM_PAGES
) {
94 /* it allows 20% / total_ram for inmemory pages */
95 mem_size
= get_pages(sbi
, F2FS_INMEM_PAGES
);
96 res
= mem_size
< (val
.totalram
/ 5);
97 } else if (type
== DISCARD_CACHE
) {
98 mem_size
= (atomic_read(&dcc
->discard_cmd_cnt
) *
99 sizeof(struct discard_cmd
)) >> PAGE_SHIFT
;
100 res
= mem_size
< (avail_ram
* nm_i
->ram_thresh
/ 100);
101 } else if (type
== COMPRESS_PAGE
) {
102 #ifdef CONFIG_F2FS_FS_COMPRESSION
103 unsigned long free_ram
= val
.freeram
;
106 * free memory is lower than watermark or cached page count
107 * exceed threshold, deny caching compress page.
109 res
= (free_ram
> avail_ram
* sbi
->compress_watermark
/ 100) &&
110 (COMPRESS_MAPPING(sbi
)->nrpages
<
111 free_ram
* sbi
->compress_percent
/ 100);
116 if (!sbi
->sb
->s_bdi
->wb
.dirty_exceeded
)
122 static void clear_node_page_dirty(struct page
*page
)
124 if (PageDirty(page
)) {
125 f2fs_clear_page_cache_dirty_tag(page
);
126 clear_page_dirty_for_io(page
);
127 dec_page_count(F2FS_P_SB(page
), F2FS_DIRTY_NODES
);
129 ClearPageUptodate(page
);
132 static struct page
*get_current_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
134 return f2fs_get_meta_page_retry(sbi
, current_nat_addr(sbi
, nid
));
137 static struct page
*get_next_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
139 struct page
*src_page
;
140 struct page
*dst_page
;
144 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
146 dst_off
= next_nat_addr(sbi
, current_nat_addr(sbi
, nid
));
148 /* get current nat block page with lock */
149 src_page
= get_current_nat_page(sbi
, nid
);
150 if (IS_ERR(src_page
))
152 dst_page
= f2fs_grab_meta_page(sbi
, dst_off
);
153 f2fs_bug_on(sbi
, PageDirty(src_page
));
155 src_addr
= page_address(src_page
);
156 dst_addr
= page_address(dst_page
);
157 memcpy(dst_addr
, src_addr
, PAGE_SIZE
);
158 set_page_dirty(dst_page
);
159 f2fs_put_page(src_page
, 1);
161 set_to_next_nat(nm_i
, nid
);
166 static struct nat_entry
*__alloc_nat_entry(struct f2fs_sb_info
*sbi
,
167 nid_t nid
, bool no_fail
)
169 struct nat_entry
*new;
171 new = f2fs_kmem_cache_alloc(nat_entry_slab
,
172 GFP_F2FS_ZERO
, no_fail
, sbi
);
174 nat_set_nid(new, nid
);
180 static void __free_nat_entry(struct nat_entry
*e
)
182 kmem_cache_free(nat_entry_slab
, e
);
185 /* must be locked by nat_tree_lock */
186 static struct nat_entry
*__init_nat_entry(struct f2fs_nm_info
*nm_i
,
187 struct nat_entry
*ne
, struct f2fs_nat_entry
*raw_ne
, bool no_fail
)
190 f2fs_radix_tree_insert(&nm_i
->nat_root
, nat_get_nid(ne
), ne
);
191 else if (radix_tree_insert(&nm_i
->nat_root
, nat_get_nid(ne
), ne
))
195 node_info_from_raw_nat(&ne
->ni
, raw_ne
);
197 spin_lock(&nm_i
->nat_list_lock
);
198 list_add_tail(&ne
->list
, &nm_i
->nat_entries
);
199 spin_unlock(&nm_i
->nat_list_lock
);
201 nm_i
->nat_cnt
[TOTAL_NAT
]++;
202 nm_i
->nat_cnt
[RECLAIMABLE_NAT
]++;
206 static struct nat_entry
*__lookup_nat_cache(struct f2fs_nm_info
*nm_i
, nid_t n
)
208 struct nat_entry
*ne
;
210 ne
= radix_tree_lookup(&nm_i
->nat_root
, n
);
212 /* for recent accessed nat entry, move it to tail of lru list */
213 if (ne
&& !get_nat_flag(ne
, IS_DIRTY
)) {
214 spin_lock(&nm_i
->nat_list_lock
);
215 if (!list_empty(&ne
->list
))
216 list_move_tail(&ne
->list
, &nm_i
->nat_entries
);
217 spin_unlock(&nm_i
->nat_list_lock
);
223 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info
*nm_i
,
224 nid_t start
, unsigned int nr
, struct nat_entry
**ep
)
226 return radix_tree_gang_lookup(&nm_i
->nat_root
, (void **)ep
, start
, nr
);
229 static void __del_from_nat_cache(struct f2fs_nm_info
*nm_i
, struct nat_entry
*e
)
231 radix_tree_delete(&nm_i
->nat_root
, nat_get_nid(e
));
232 nm_i
->nat_cnt
[TOTAL_NAT
]--;
233 nm_i
->nat_cnt
[RECLAIMABLE_NAT
]--;
237 static struct nat_entry_set
*__grab_nat_entry_set(struct f2fs_nm_info
*nm_i
,
238 struct nat_entry
*ne
)
240 nid_t set
= NAT_BLOCK_OFFSET(ne
->ni
.nid
);
241 struct nat_entry_set
*head
;
243 head
= radix_tree_lookup(&nm_i
->nat_set_root
, set
);
245 head
= f2fs_kmem_cache_alloc(nat_entry_set_slab
,
246 GFP_NOFS
, true, NULL
);
248 INIT_LIST_HEAD(&head
->entry_list
);
249 INIT_LIST_HEAD(&head
->set_list
);
252 f2fs_radix_tree_insert(&nm_i
->nat_set_root
, set
, head
);
257 static void __set_nat_cache_dirty(struct f2fs_nm_info
*nm_i
,
258 struct nat_entry
*ne
)
260 struct nat_entry_set
*head
;
261 bool new_ne
= nat_get_blkaddr(ne
) == NEW_ADDR
;
264 head
= __grab_nat_entry_set(nm_i
, ne
);
267 * update entry_cnt in below condition:
268 * 1. update NEW_ADDR to valid block address;
269 * 2. update old block address to new one;
271 if (!new_ne
&& (get_nat_flag(ne
, IS_PREALLOC
) ||
272 !get_nat_flag(ne
, IS_DIRTY
)))
275 set_nat_flag(ne
, IS_PREALLOC
, new_ne
);
277 if (get_nat_flag(ne
, IS_DIRTY
))
280 nm_i
->nat_cnt
[DIRTY_NAT
]++;
281 nm_i
->nat_cnt
[RECLAIMABLE_NAT
]--;
282 set_nat_flag(ne
, IS_DIRTY
, true);
284 spin_lock(&nm_i
->nat_list_lock
);
286 list_del_init(&ne
->list
);
288 list_move_tail(&ne
->list
, &head
->entry_list
);
289 spin_unlock(&nm_i
->nat_list_lock
);
292 static void __clear_nat_cache_dirty(struct f2fs_nm_info
*nm_i
,
293 struct nat_entry_set
*set
, struct nat_entry
*ne
)
295 spin_lock(&nm_i
->nat_list_lock
);
296 list_move_tail(&ne
->list
, &nm_i
->nat_entries
);
297 spin_unlock(&nm_i
->nat_list_lock
);
299 set_nat_flag(ne
, IS_DIRTY
, false);
301 nm_i
->nat_cnt
[DIRTY_NAT
]--;
302 nm_i
->nat_cnt
[RECLAIMABLE_NAT
]++;
305 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info
*nm_i
,
306 nid_t start
, unsigned int nr
, struct nat_entry_set
**ep
)
308 return radix_tree_gang_lookup(&nm_i
->nat_set_root
, (void **)ep
,
312 bool f2fs_in_warm_node_list(struct f2fs_sb_info
*sbi
, struct page
*page
)
314 return NODE_MAPPING(sbi
) == page
->mapping
&&
315 IS_DNODE(page
) && is_cold_node(page
);
318 void f2fs_init_fsync_node_info(struct f2fs_sb_info
*sbi
)
320 spin_lock_init(&sbi
->fsync_node_lock
);
321 INIT_LIST_HEAD(&sbi
->fsync_node_list
);
322 sbi
->fsync_seg_id
= 0;
323 sbi
->fsync_node_num
= 0;
326 static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info
*sbi
,
329 struct fsync_node_entry
*fn
;
333 fn
= f2fs_kmem_cache_alloc(fsync_node_entry_slab
,
334 GFP_NOFS
, true, NULL
);
338 INIT_LIST_HEAD(&fn
->list
);
340 spin_lock_irqsave(&sbi
->fsync_node_lock
, flags
);
341 list_add_tail(&fn
->list
, &sbi
->fsync_node_list
);
342 fn
->seq_id
= sbi
->fsync_seg_id
++;
344 sbi
->fsync_node_num
++;
345 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
350 void f2fs_del_fsync_node_entry(struct f2fs_sb_info
*sbi
, struct page
*page
)
352 struct fsync_node_entry
*fn
;
355 spin_lock_irqsave(&sbi
->fsync_node_lock
, flags
);
356 list_for_each_entry(fn
, &sbi
->fsync_node_list
, list
) {
357 if (fn
->page
== page
) {
359 sbi
->fsync_node_num
--;
360 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
361 kmem_cache_free(fsync_node_entry_slab
, fn
);
366 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
370 void f2fs_reset_fsync_node_info(struct f2fs_sb_info
*sbi
)
374 spin_lock_irqsave(&sbi
->fsync_node_lock
, flags
);
375 sbi
->fsync_seg_id
= 0;
376 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
379 int f2fs_need_dentry_mark(struct f2fs_sb_info
*sbi
, nid_t nid
)
381 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
385 down_read(&nm_i
->nat_tree_lock
);
386 e
= __lookup_nat_cache(nm_i
, nid
);
388 if (!get_nat_flag(e
, IS_CHECKPOINTED
) &&
389 !get_nat_flag(e
, HAS_FSYNCED_INODE
))
392 up_read(&nm_i
->nat_tree_lock
);
396 bool f2fs_is_checkpointed_node(struct f2fs_sb_info
*sbi
, nid_t nid
)
398 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
402 down_read(&nm_i
->nat_tree_lock
);
403 e
= __lookup_nat_cache(nm_i
, nid
);
404 if (e
&& !get_nat_flag(e
, IS_CHECKPOINTED
))
406 up_read(&nm_i
->nat_tree_lock
);
410 bool f2fs_need_inode_block_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
412 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
414 bool need_update
= true;
416 down_read(&nm_i
->nat_tree_lock
);
417 e
= __lookup_nat_cache(nm_i
, ino
);
418 if (e
&& get_nat_flag(e
, HAS_LAST_FSYNC
) &&
419 (get_nat_flag(e
, IS_CHECKPOINTED
) ||
420 get_nat_flag(e
, HAS_FSYNCED_INODE
)))
422 up_read(&nm_i
->nat_tree_lock
);
426 /* must be locked by nat_tree_lock */
427 static void cache_nat_entry(struct f2fs_sb_info
*sbi
, nid_t nid
,
428 struct f2fs_nat_entry
*ne
)
430 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
431 struct nat_entry
*new, *e
;
433 new = __alloc_nat_entry(sbi
, nid
, false);
437 down_write(&nm_i
->nat_tree_lock
);
438 e
= __lookup_nat_cache(nm_i
, nid
);
440 e
= __init_nat_entry(nm_i
, new, ne
, false);
442 f2fs_bug_on(sbi
, nat_get_ino(e
) != le32_to_cpu(ne
->ino
) ||
443 nat_get_blkaddr(e
) !=
444 le32_to_cpu(ne
->block_addr
) ||
445 nat_get_version(e
) != ne
->version
);
446 up_write(&nm_i
->nat_tree_lock
);
448 __free_nat_entry(new);
451 static void set_node_addr(struct f2fs_sb_info
*sbi
, struct node_info
*ni
,
452 block_t new_blkaddr
, bool fsync_done
)
454 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
456 struct nat_entry
*new = __alloc_nat_entry(sbi
, ni
->nid
, true);
458 down_write(&nm_i
->nat_tree_lock
);
459 e
= __lookup_nat_cache(nm_i
, ni
->nid
);
461 e
= __init_nat_entry(nm_i
, new, NULL
, true);
462 copy_node_info(&e
->ni
, ni
);
463 f2fs_bug_on(sbi
, ni
->blk_addr
== NEW_ADDR
);
464 } else if (new_blkaddr
== NEW_ADDR
) {
466 * when nid is reallocated,
467 * previous nat entry can be remained in nat cache.
468 * So, reinitialize it with new information.
470 copy_node_info(&e
->ni
, ni
);
471 f2fs_bug_on(sbi
, ni
->blk_addr
!= NULL_ADDR
);
473 /* let's free early to reduce memory consumption */
475 __free_nat_entry(new);
478 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) != ni
->blk_addr
);
479 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) == NULL_ADDR
&&
480 new_blkaddr
== NULL_ADDR
);
481 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) == NEW_ADDR
&&
482 new_blkaddr
== NEW_ADDR
);
483 f2fs_bug_on(sbi
, __is_valid_data_blkaddr(nat_get_blkaddr(e
)) &&
484 new_blkaddr
== NEW_ADDR
);
486 /* increment version no as node is removed */
487 if (nat_get_blkaddr(e
) != NEW_ADDR
&& new_blkaddr
== NULL_ADDR
) {
488 unsigned char version
= nat_get_version(e
);
490 nat_set_version(e
, inc_node_version(version
));
494 nat_set_blkaddr(e
, new_blkaddr
);
495 if (!__is_valid_data_blkaddr(new_blkaddr
))
496 set_nat_flag(e
, IS_CHECKPOINTED
, false);
497 __set_nat_cache_dirty(nm_i
, e
);
499 /* update fsync_mark if its inode nat entry is still alive */
500 if (ni
->nid
!= ni
->ino
)
501 e
= __lookup_nat_cache(nm_i
, ni
->ino
);
503 if (fsync_done
&& ni
->nid
== ni
->ino
)
504 set_nat_flag(e
, HAS_FSYNCED_INODE
, true);
505 set_nat_flag(e
, HAS_LAST_FSYNC
, fsync_done
);
507 up_write(&nm_i
->nat_tree_lock
);
510 int f2fs_try_to_free_nats(struct f2fs_sb_info
*sbi
, int nr_shrink
)
512 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
515 if (!down_write_trylock(&nm_i
->nat_tree_lock
))
518 spin_lock(&nm_i
->nat_list_lock
);
520 struct nat_entry
*ne
;
522 if (list_empty(&nm_i
->nat_entries
))
525 ne
= list_first_entry(&nm_i
->nat_entries
,
526 struct nat_entry
, list
);
528 spin_unlock(&nm_i
->nat_list_lock
);
530 __del_from_nat_cache(nm_i
, ne
);
533 spin_lock(&nm_i
->nat_list_lock
);
535 spin_unlock(&nm_i
->nat_list_lock
);
537 up_write(&nm_i
->nat_tree_lock
);
538 return nr
- nr_shrink
;
541 int f2fs_get_node_info(struct f2fs_sb_info
*sbi
, nid_t nid
,
542 struct node_info
*ni
)
544 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
545 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
546 struct f2fs_journal
*journal
= curseg
->journal
;
547 nid_t start_nid
= START_NID(nid
);
548 struct f2fs_nat_block
*nat_blk
;
549 struct page
*page
= NULL
;
550 struct f2fs_nat_entry ne
;
558 /* Check nat cache */
559 down_read(&nm_i
->nat_tree_lock
);
560 e
= __lookup_nat_cache(nm_i
, nid
);
562 ni
->ino
= nat_get_ino(e
);
563 ni
->blk_addr
= nat_get_blkaddr(e
);
564 ni
->version
= nat_get_version(e
);
565 up_read(&nm_i
->nat_tree_lock
);
570 * Check current segment summary by trying to grab journal_rwsem first.
571 * This sem is on the critical path on the checkpoint requiring the above
572 * nat_tree_lock. Therefore, we should retry, if we failed to grab here
573 * while not bothering checkpoint.
575 if (!rwsem_is_locked(&sbi
->cp_global_sem
)) {
576 down_read(&curseg
->journal_rwsem
);
577 } else if (!down_read_trylock(&curseg
->journal_rwsem
)) {
578 up_read(&nm_i
->nat_tree_lock
);
582 i
= f2fs_lookup_journal_in_cursum(journal
, NAT_JOURNAL
, nid
, 0);
584 ne
= nat_in_journal(journal
, i
);
585 node_info_from_raw_nat(ni
, &ne
);
587 up_read(&curseg
->journal_rwsem
);
589 up_read(&nm_i
->nat_tree_lock
);
593 /* Fill node_info from nat page */
594 index
= current_nat_addr(sbi
, nid
);
595 up_read(&nm_i
->nat_tree_lock
);
597 page
= f2fs_get_meta_page(sbi
, index
);
599 return PTR_ERR(page
);
601 nat_blk
= (struct f2fs_nat_block
*)page_address(page
);
602 ne
= nat_blk
->entries
[nid
- start_nid
];
603 node_info_from_raw_nat(ni
, &ne
);
604 f2fs_put_page(page
, 1);
606 blkaddr
= le32_to_cpu(ne
.block_addr
);
607 if (__is_valid_data_blkaddr(blkaddr
) &&
608 !f2fs_is_valid_blkaddr(sbi
, blkaddr
, DATA_GENERIC_ENHANCE
))
611 /* cache nat entry */
612 cache_nat_entry(sbi
, nid
, &ne
);
617 * readahead MAX_RA_NODE number of node pages.
619 static void f2fs_ra_node_pages(struct page
*parent
, int start
, int n
)
621 struct f2fs_sb_info
*sbi
= F2FS_P_SB(parent
);
622 struct blk_plug plug
;
626 blk_start_plug(&plug
);
628 /* Then, try readahead for siblings of the desired node */
630 end
= min(end
, NIDS_PER_BLOCK
);
631 for (i
= start
; i
< end
; i
++) {
632 nid
= get_nid(parent
, i
, false);
633 f2fs_ra_node_page(sbi
, nid
);
636 blk_finish_plug(&plug
);
639 pgoff_t
f2fs_get_next_page_offset(struct dnode_of_data
*dn
, pgoff_t pgofs
)
641 const long direct_index
= ADDRS_PER_INODE(dn
->inode
);
642 const long direct_blks
= ADDRS_PER_BLOCK(dn
->inode
);
643 const long indirect_blks
= ADDRS_PER_BLOCK(dn
->inode
) * NIDS_PER_BLOCK
;
644 unsigned int skipped_unit
= ADDRS_PER_BLOCK(dn
->inode
);
645 int cur_level
= dn
->cur_level
;
646 int max_level
= dn
->max_level
;
652 while (max_level
-- > cur_level
)
653 skipped_unit
*= NIDS_PER_BLOCK
;
655 switch (dn
->max_level
) {
657 base
+= 2 * indirect_blks
;
660 base
+= 2 * direct_blks
;
663 base
+= direct_index
;
666 f2fs_bug_on(F2FS_I_SB(dn
->inode
), 1);
669 return ((pgofs
- base
) / skipped_unit
+ 1) * skipped_unit
+ base
;
673 * The maximum depth is four.
674 * Offset[0] will have raw inode offset.
676 static int get_node_path(struct inode
*inode
, long block
,
677 int offset
[4], unsigned int noffset
[4])
679 const long direct_index
= ADDRS_PER_INODE(inode
);
680 const long direct_blks
= ADDRS_PER_BLOCK(inode
);
681 const long dptrs_per_blk
= NIDS_PER_BLOCK
;
682 const long indirect_blks
= ADDRS_PER_BLOCK(inode
) * NIDS_PER_BLOCK
;
683 const long dindirect_blks
= indirect_blks
* NIDS_PER_BLOCK
;
689 if (block
< direct_index
) {
693 block
-= direct_index
;
694 if (block
< direct_blks
) {
695 offset
[n
++] = NODE_DIR1_BLOCK
;
701 block
-= direct_blks
;
702 if (block
< direct_blks
) {
703 offset
[n
++] = NODE_DIR2_BLOCK
;
709 block
-= direct_blks
;
710 if (block
< indirect_blks
) {
711 offset
[n
++] = NODE_IND1_BLOCK
;
713 offset
[n
++] = block
/ direct_blks
;
714 noffset
[n
] = 4 + offset
[n
- 1];
715 offset
[n
] = block
% direct_blks
;
719 block
-= indirect_blks
;
720 if (block
< indirect_blks
) {
721 offset
[n
++] = NODE_IND2_BLOCK
;
722 noffset
[n
] = 4 + dptrs_per_blk
;
723 offset
[n
++] = block
/ direct_blks
;
724 noffset
[n
] = 5 + dptrs_per_blk
+ offset
[n
- 1];
725 offset
[n
] = block
% direct_blks
;
729 block
-= indirect_blks
;
730 if (block
< dindirect_blks
) {
731 offset
[n
++] = NODE_DIND_BLOCK
;
732 noffset
[n
] = 5 + (dptrs_per_blk
* 2);
733 offset
[n
++] = block
/ indirect_blks
;
734 noffset
[n
] = 6 + (dptrs_per_blk
* 2) +
735 offset
[n
- 1] * (dptrs_per_blk
+ 1);
736 offset
[n
++] = (block
/ direct_blks
) % dptrs_per_blk
;
737 noffset
[n
] = 7 + (dptrs_per_blk
* 2) +
738 offset
[n
- 2] * (dptrs_per_blk
+ 1) +
740 offset
[n
] = block
% direct_blks
;
751 * Caller should call f2fs_put_dnode(dn).
752 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
753 * f2fs_unlock_op() only if mode is set with ALLOC_NODE.
755 int f2fs_get_dnode_of_data(struct dnode_of_data
*dn
, pgoff_t index
, int mode
)
757 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
758 struct page
*npage
[4];
759 struct page
*parent
= NULL
;
761 unsigned int noffset
[4];
766 level
= get_node_path(dn
->inode
, index
, offset
, noffset
);
770 nids
[0] = dn
->inode
->i_ino
;
771 npage
[0] = dn
->inode_page
;
774 npage
[0] = f2fs_get_node_page(sbi
, nids
[0]);
775 if (IS_ERR(npage
[0]))
776 return PTR_ERR(npage
[0]);
779 /* if inline_data is set, should not report any block indices */
780 if (f2fs_has_inline_data(dn
->inode
) && index
) {
782 f2fs_put_page(npage
[0], 1);
788 nids
[1] = get_nid(parent
, offset
[0], true);
789 dn
->inode_page
= npage
[0];
790 dn
->inode_page_locked
= true;
792 /* get indirect or direct nodes */
793 for (i
= 1; i
<= level
; i
++) {
796 if (!nids
[i
] && mode
== ALLOC_NODE
) {
798 if (!f2fs_alloc_nid(sbi
, &(nids
[i
]))) {
804 npage
[i
] = f2fs_new_node_page(dn
, noffset
[i
]);
805 if (IS_ERR(npage
[i
])) {
806 f2fs_alloc_nid_failed(sbi
, nids
[i
]);
807 err
= PTR_ERR(npage
[i
]);
811 set_nid(parent
, offset
[i
- 1], nids
[i
], i
== 1);
812 f2fs_alloc_nid_done(sbi
, nids
[i
]);
814 } else if (mode
== LOOKUP_NODE_RA
&& i
== level
&& level
> 1) {
815 npage
[i
] = f2fs_get_node_page_ra(parent
, offset
[i
- 1]);
816 if (IS_ERR(npage
[i
])) {
817 err
= PTR_ERR(npage
[i
]);
823 dn
->inode_page_locked
= false;
826 f2fs_put_page(parent
, 1);
830 npage
[i
] = f2fs_get_node_page(sbi
, nids
[i
]);
831 if (IS_ERR(npage
[i
])) {
832 err
= PTR_ERR(npage
[i
]);
833 f2fs_put_page(npage
[0], 0);
839 nids
[i
+ 1] = get_nid(parent
, offset
[i
], false);
842 dn
->nid
= nids
[level
];
843 dn
->ofs_in_node
= offset
[level
];
844 dn
->node_page
= npage
[level
];
845 dn
->data_blkaddr
= f2fs_data_blkaddr(dn
);
847 if (is_inode_flag_set(dn
->inode
, FI_COMPRESSED_FILE
) &&
848 f2fs_sb_has_readonly(sbi
)) {
849 unsigned int c_len
= f2fs_cluster_blocks_are_contiguous(dn
);
855 blkaddr
= f2fs_data_blkaddr(dn
);
856 if (blkaddr
== COMPRESS_ADDR
)
857 blkaddr
= data_blkaddr(dn
->inode
, dn
->node_page
,
858 dn
->ofs_in_node
+ 1);
860 f2fs_update_extent_tree_range_compressed(dn
->inode
,
862 F2FS_I(dn
->inode
)->i_cluster_size
,
869 f2fs_put_page(parent
, 1);
871 f2fs_put_page(npage
[0], 0);
873 dn
->inode_page
= NULL
;
874 dn
->node_page
= NULL
;
875 if (err
== -ENOENT
) {
877 dn
->max_level
= level
;
878 dn
->ofs_in_node
= offset
[level
];
883 static int truncate_node(struct dnode_of_data
*dn
)
885 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
890 err
= f2fs_get_node_info(sbi
, dn
->nid
, &ni
);
894 /* Deallocate node address */
895 f2fs_invalidate_blocks(sbi
, ni
.blk_addr
);
896 dec_valid_node_count(sbi
, dn
->inode
, dn
->nid
== dn
->inode
->i_ino
);
897 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
899 if (dn
->nid
== dn
->inode
->i_ino
) {
900 f2fs_remove_orphan_inode(sbi
, dn
->nid
);
901 dec_valid_inode_count(sbi
);
902 f2fs_inode_synced(dn
->inode
);
905 clear_node_page_dirty(dn
->node_page
);
906 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
908 index
= dn
->node_page
->index
;
909 f2fs_put_page(dn
->node_page
, 1);
911 invalidate_mapping_pages(NODE_MAPPING(sbi
),
914 dn
->node_page
= NULL
;
915 trace_f2fs_truncate_node(dn
->inode
, dn
->nid
, ni
.blk_addr
);
920 static int truncate_dnode(struct dnode_of_data
*dn
)
928 /* get direct node */
929 page
= f2fs_get_node_page(F2FS_I_SB(dn
->inode
), dn
->nid
);
930 if (PTR_ERR(page
) == -ENOENT
)
932 else if (IS_ERR(page
))
933 return PTR_ERR(page
);
935 /* Make dnode_of_data for parameter */
936 dn
->node_page
= page
;
938 f2fs_truncate_data_blocks(dn
);
939 err
= truncate_node(dn
);
946 static int truncate_nodes(struct dnode_of_data
*dn
, unsigned int nofs
,
949 struct dnode_of_data rdn
= *dn
;
951 struct f2fs_node
*rn
;
953 unsigned int child_nofs
;
958 return NIDS_PER_BLOCK
+ 1;
960 trace_f2fs_truncate_nodes_enter(dn
->inode
, dn
->nid
, dn
->data_blkaddr
);
962 page
= f2fs_get_node_page(F2FS_I_SB(dn
->inode
), dn
->nid
);
964 trace_f2fs_truncate_nodes_exit(dn
->inode
, PTR_ERR(page
));
965 return PTR_ERR(page
);
968 f2fs_ra_node_pages(page
, ofs
, NIDS_PER_BLOCK
);
970 rn
= F2FS_NODE(page
);
972 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++, freed
++) {
973 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
977 ret
= truncate_dnode(&rdn
);
980 if (set_nid(page
, i
, 0, false))
981 dn
->node_changed
= true;
984 child_nofs
= nofs
+ ofs
* (NIDS_PER_BLOCK
+ 1) + 1;
985 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++) {
986 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
987 if (child_nid
== 0) {
988 child_nofs
+= NIDS_PER_BLOCK
+ 1;
992 ret
= truncate_nodes(&rdn
, child_nofs
, 0, depth
- 1);
993 if (ret
== (NIDS_PER_BLOCK
+ 1)) {
994 if (set_nid(page
, i
, 0, false))
995 dn
->node_changed
= true;
997 } else if (ret
< 0 && ret
!= -ENOENT
) {
1005 /* remove current indirect node */
1006 dn
->node_page
= page
;
1007 ret
= truncate_node(dn
);
1012 f2fs_put_page(page
, 1);
1014 trace_f2fs_truncate_nodes_exit(dn
->inode
, freed
);
1018 f2fs_put_page(page
, 1);
1019 trace_f2fs_truncate_nodes_exit(dn
->inode
, ret
);
1023 static int truncate_partial_nodes(struct dnode_of_data
*dn
,
1024 struct f2fs_inode
*ri
, int *offset
, int depth
)
1026 struct page
*pages
[2];
1031 int idx
= depth
- 2;
1033 nid
[0] = le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
1037 /* get indirect nodes in the path */
1038 for (i
= 0; i
< idx
+ 1; i
++) {
1039 /* reference count'll be increased */
1040 pages
[i
] = f2fs_get_node_page(F2FS_I_SB(dn
->inode
), nid
[i
]);
1041 if (IS_ERR(pages
[i
])) {
1042 err
= PTR_ERR(pages
[i
]);
1046 nid
[i
+ 1] = get_nid(pages
[i
], offset
[i
+ 1], false);
1049 f2fs_ra_node_pages(pages
[idx
], offset
[idx
+ 1], NIDS_PER_BLOCK
);
1051 /* free direct nodes linked to a partial indirect node */
1052 for (i
= offset
[idx
+ 1]; i
< NIDS_PER_BLOCK
; i
++) {
1053 child_nid
= get_nid(pages
[idx
], i
, false);
1056 dn
->nid
= child_nid
;
1057 err
= truncate_dnode(dn
);
1060 if (set_nid(pages
[idx
], i
, 0, false))
1061 dn
->node_changed
= true;
1064 if (offset
[idx
+ 1] == 0) {
1065 dn
->node_page
= pages
[idx
];
1067 err
= truncate_node(dn
);
1071 f2fs_put_page(pages
[idx
], 1);
1074 offset
[idx
+ 1] = 0;
1077 for (i
= idx
; i
>= 0; i
--)
1078 f2fs_put_page(pages
[i
], 1);
1080 trace_f2fs_truncate_partial_nodes(dn
->inode
, nid
, depth
, err
);
1086 * All the block addresses of data and nodes should be nullified.
1088 int f2fs_truncate_inode_blocks(struct inode
*inode
, pgoff_t from
)
1090 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1091 int err
= 0, cont
= 1;
1092 int level
, offset
[4], noffset
[4];
1093 unsigned int nofs
= 0;
1094 struct f2fs_inode
*ri
;
1095 struct dnode_of_data dn
;
1098 trace_f2fs_truncate_inode_blocks_enter(inode
, from
);
1100 level
= get_node_path(inode
, from
, offset
, noffset
);
1102 trace_f2fs_truncate_inode_blocks_exit(inode
, level
);
1106 page
= f2fs_get_node_page(sbi
, inode
->i_ino
);
1108 trace_f2fs_truncate_inode_blocks_exit(inode
, PTR_ERR(page
));
1109 return PTR_ERR(page
);
1112 set_new_dnode(&dn
, inode
, page
, NULL
, 0);
1115 ri
= F2FS_INODE(page
);
1123 if (!offset
[level
- 1])
1125 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
1126 if (err
< 0 && err
!= -ENOENT
)
1128 nofs
+= 1 + NIDS_PER_BLOCK
;
1131 nofs
= 5 + 2 * NIDS_PER_BLOCK
;
1132 if (!offset
[level
- 1])
1134 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
1135 if (err
< 0 && err
!= -ENOENT
)
1144 dn
.nid
= le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
1145 switch (offset
[0]) {
1146 case NODE_DIR1_BLOCK
:
1147 case NODE_DIR2_BLOCK
:
1148 err
= truncate_dnode(&dn
);
1151 case NODE_IND1_BLOCK
:
1152 case NODE_IND2_BLOCK
:
1153 err
= truncate_nodes(&dn
, nofs
, offset
[1], 2);
1156 case NODE_DIND_BLOCK
:
1157 err
= truncate_nodes(&dn
, nofs
, offset
[1], 3);
1164 if (err
< 0 && err
!= -ENOENT
)
1166 if (offset
[1] == 0 &&
1167 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]) {
1169 BUG_ON(page
->mapping
!= NODE_MAPPING(sbi
));
1170 f2fs_wait_on_page_writeback(page
, NODE
, true, true);
1171 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
] = 0;
1172 set_page_dirty(page
);
1180 f2fs_put_page(page
, 0);
1181 trace_f2fs_truncate_inode_blocks_exit(inode
, err
);
1182 return err
> 0 ? 0 : err
;
1185 /* caller must lock inode page */
1186 int f2fs_truncate_xattr_node(struct inode
*inode
)
1188 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1189 nid_t nid
= F2FS_I(inode
)->i_xattr_nid
;
1190 struct dnode_of_data dn
;
1197 npage
= f2fs_get_node_page(sbi
, nid
);
1199 return PTR_ERR(npage
);
1201 set_new_dnode(&dn
, inode
, NULL
, npage
, nid
);
1202 err
= truncate_node(&dn
);
1204 f2fs_put_page(npage
, 1);
1208 f2fs_i_xnid_write(inode
, 0);
1214 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
1217 int f2fs_remove_inode_page(struct inode
*inode
)
1219 struct dnode_of_data dn
;
1222 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
1223 err
= f2fs_get_dnode_of_data(&dn
, 0, LOOKUP_NODE
);
1227 err
= f2fs_truncate_xattr_node(inode
);
1229 f2fs_put_dnode(&dn
);
1233 /* remove potential inline_data blocks */
1234 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1235 S_ISLNK(inode
->i_mode
))
1236 f2fs_truncate_data_blocks_range(&dn
, 1);
1238 /* 0 is possible, after f2fs_new_inode() has failed */
1239 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode
)))) {
1240 f2fs_put_dnode(&dn
);
1244 if (unlikely(inode
->i_blocks
!= 0 && inode
->i_blocks
!= 8)) {
1245 f2fs_warn(F2FS_I_SB(inode
),
1246 "f2fs_remove_inode_page: inconsistent i_blocks, ino:%lu, iblocks:%llu",
1247 inode
->i_ino
, (unsigned long long)inode
->i_blocks
);
1248 set_sbi_flag(F2FS_I_SB(inode
), SBI_NEED_FSCK
);
1251 /* will put inode & node pages */
1252 err
= truncate_node(&dn
);
1254 f2fs_put_dnode(&dn
);
1260 struct page
*f2fs_new_inode_page(struct inode
*inode
)
1262 struct dnode_of_data dn
;
1264 /* allocate inode page for new inode */
1265 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
1267 /* caller should f2fs_put_page(page, 1); */
1268 return f2fs_new_node_page(&dn
, 0);
1271 struct page
*f2fs_new_node_page(struct dnode_of_data
*dn
, unsigned int ofs
)
1273 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1274 struct node_info new_ni
;
1278 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
1279 return ERR_PTR(-EPERM
);
1281 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), dn
->nid
, false);
1283 return ERR_PTR(-ENOMEM
);
1285 if (unlikely((err
= inc_valid_node_count(sbi
, dn
->inode
, !ofs
))))
1288 #ifdef CONFIG_F2FS_CHECK_FS
1289 err
= f2fs_get_node_info(sbi
, dn
->nid
, &new_ni
);
1291 dec_valid_node_count(sbi
, dn
->inode
, !ofs
);
1294 f2fs_bug_on(sbi
, new_ni
.blk_addr
!= NULL_ADDR
);
1296 new_ni
.nid
= dn
->nid
;
1297 new_ni
.ino
= dn
->inode
->i_ino
;
1298 new_ni
.blk_addr
= NULL_ADDR
;
1301 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
1303 f2fs_wait_on_page_writeback(page
, NODE
, true, true);
1304 fill_node_footer(page
, dn
->nid
, dn
->inode
->i_ino
, ofs
, true);
1305 set_cold_node(page
, S_ISDIR(dn
->inode
->i_mode
));
1306 if (!PageUptodate(page
))
1307 SetPageUptodate(page
);
1308 if (set_page_dirty(page
))
1309 dn
->node_changed
= true;
1311 if (f2fs_has_xattr_block(ofs
))
1312 f2fs_i_xnid_write(dn
->inode
, dn
->nid
);
1315 inc_valid_inode_count(sbi
);
1319 clear_node_page_dirty(page
);
1320 f2fs_put_page(page
, 1);
1321 return ERR_PTR(err
);
1325 * Caller should do after getting the following values.
1326 * 0: f2fs_put_page(page, 0)
1327 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1329 static int read_node_page(struct page
*page
, int op_flags
)
1331 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1332 struct node_info ni
;
1333 struct f2fs_io_info fio
= {
1337 .op_flags
= op_flags
,
1339 .encrypted_page
= NULL
,
1343 if (PageUptodate(page
)) {
1344 if (!f2fs_inode_chksum_verify(sbi
, page
)) {
1345 ClearPageUptodate(page
);
1351 err
= f2fs_get_node_info(sbi
, page
->index
, &ni
);
1355 /* NEW_ADDR can be seen, after cp_error drops some dirty node pages */
1356 if (unlikely(ni
.blk_addr
== NULL_ADDR
|| ni
.blk_addr
== NEW_ADDR
) ||
1357 is_sbi_flag_set(sbi
, SBI_IS_SHUTDOWN
)) {
1358 ClearPageUptodate(page
);
1362 fio
.new_blkaddr
= fio
.old_blkaddr
= ni
.blk_addr
;
1364 err
= f2fs_submit_page_bio(&fio
);
1367 f2fs_update_iostat(sbi
, FS_NODE_READ_IO
, F2FS_BLKSIZE
);
1373 * Readahead a node page
1375 void f2fs_ra_node_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
1382 if (f2fs_check_nid_range(sbi
, nid
))
1385 apage
= xa_load(&NODE_MAPPING(sbi
)->i_pages
, nid
);
1389 apage
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), nid
, false);
1393 err
= read_node_page(apage
, REQ_RAHEAD
);
1394 f2fs_put_page(apage
, err
? 1 : 0);
1397 static struct page
*__get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
,
1398 struct page
*parent
, int start
)
1404 return ERR_PTR(-ENOENT
);
1405 if (f2fs_check_nid_range(sbi
, nid
))
1406 return ERR_PTR(-EINVAL
);
1408 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), nid
, false);
1410 return ERR_PTR(-ENOMEM
);
1412 err
= read_node_page(page
, 0);
1414 f2fs_put_page(page
, 1);
1415 return ERR_PTR(err
);
1416 } else if (err
== LOCKED_PAGE
) {
1422 f2fs_ra_node_pages(parent
, start
+ 1, MAX_RA_NODE
);
1426 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1427 f2fs_put_page(page
, 1);
1431 if (unlikely(!PageUptodate(page
))) {
1436 if (!f2fs_inode_chksum_verify(sbi
, page
)) {
1441 if (unlikely(nid
!= nid_of_node(page
))) {
1442 f2fs_warn(sbi
, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1443 nid
, nid_of_node(page
), ino_of_node(page
),
1444 ofs_of_node(page
), cpver_of_node(page
),
1445 next_blkaddr_of_node(page
));
1448 ClearPageUptodate(page
);
1449 f2fs_put_page(page
, 1);
1450 return ERR_PTR(err
);
1455 struct page
*f2fs_get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
)
1457 return __get_node_page(sbi
, nid
, NULL
, 0);
1460 struct page
*f2fs_get_node_page_ra(struct page
*parent
, int start
)
1462 struct f2fs_sb_info
*sbi
= F2FS_P_SB(parent
);
1463 nid_t nid
= get_nid(parent
, start
, false);
1465 return __get_node_page(sbi
, nid
, parent
, start
);
1468 static void flush_inline_data(struct f2fs_sb_info
*sbi
, nid_t ino
)
1470 struct inode
*inode
;
1474 /* should flush inline_data before evict_inode */
1475 inode
= ilookup(sbi
->sb
, ino
);
1479 page
= f2fs_pagecache_get_page(inode
->i_mapping
, 0,
1480 FGP_LOCK
|FGP_NOWAIT
, 0);
1484 if (!PageUptodate(page
))
1487 if (!PageDirty(page
))
1490 if (!clear_page_dirty_for_io(page
))
1493 ret
= f2fs_write_inline_data(inode
, page
);
1494 inode_dec_dirty_pages(inode
);
1495 f2fs_remove_dirty_inode(inode
);
1497 set_page_dirty(page
);
1499 f2fs_put_page(page
, 1);
1504 static struct page
*last_fsync_dnode(struct f2fs_sb_info
*sbi
, nid_t ino
)
1507 struct pagevec pvec
;
1508 struct page
*last_page
= NULL
;
1511 pagevec_init(&pvec
);
1514 while ((nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1515 PAGECACHE_TAG_DIRTY
))) {
1518 for (i
= 0; i
< nr_pages
; i
++) {
1519 struct page
*page
= pvec
.pages
[i
];
1521 if (unlikely(f2fs_cp_error(sbi
))) {
1522 f2fs_put_page(last_page
, 0);
1523 pagevec_release(&pvec
);
1524 return ERR_PTR(-EIO
);
1527 if (!IS_DNODE(page
) || !is_cold_node(page
))
1529 if (ino_of_node(page
) != ino
)
1534 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1539 if (ino_of_node(page
) != ino
)
1540 goto continue_unlock
;
1542 if (!PageDirty(page
)) {
1543 /* someone wrote it for us */
1544 goto continue_unlock
;
1548 f2fs_put_page(last_page
, 0);
1554 pagevec_release(&pvec
);
1560 static int __write_node_page(struct page
*page
, bool atomic
, bool *submitted
,
1561 struct writeback_control
*wbc
, bool do_balance
,
1562 enum iostat_type io_type
, unsigned int *seq_id
)
1564 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1566 struct node_info ni
;
1567 struct f2fs_io_info fio
= {
1569 .ino
= ino_of_node(page
),
1572 .op_flags
= wbc_to_write_flags(wbc
),
1574 .encrypted_page
= NULL
,
1581 trace_f2fs_writepage(page
, NODE
);
1583 if (unlikely(f2fs_cp_error(sbi
))) {
1584 ClearPageUptodate(page
);
1585 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1590 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1593 if (!is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
1594 wbc
->sync_mode
== WB_SYNC_NONE
&&
1595 IS_DNODE(page
) && is_cold_node(page
))
1598 /* get old block addr of this node page */
1599 nid
= nid_of_node(page
);
1600 f2fs_bug_on(sbi
, page
->index
!= nid
);
1602 if (f2fs_get_node_info(sbi
, nid
, &ni
))
1605 if (wbc
->for_reclaim
) {
1606 if (!down_read_trylock(&sbi
->node_write
))
1609 down_read(&sbi
->node_write
);
1612 /* This page is already truncated */
1613 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
1614 ClearPageUptodate(page
);
1615 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1616 up_read(&sbi
->node_write
);
1621 if (__is_valid_data_blkaddr(ni
.blk_addr
) &&
1622 !f2fs_is_valid_blkaddr(sbi
, ni
.blk_addr
,
1623 DATA_GENERIC_ENHANCE
)) {
1624 up_read(&sbi
->node_write
);
1628 if (atomic
&& !test_opt(sbi
, NOBARRIER
))
1629 fio
.op_flags
|= REQ_PREFLUSH
| REQ_FUA
;
1631 /* should add to global list before clearing PAGECACHE status */
1632 if (f2fs_in_warm_node_list(sbi
, page
)) {
1633 seq
= f2fs_add_fsync_node_entry(sbi
, page
);
1638 set_page_writeback(page
);
1639 ClearPageError(page
);
1641 fio
.old_blkaddr
= ni
.blk_addr
;
1642 f2fs_do_write_node_page(nid
, &fio
);
1643 set_node_addr(sbi
, &ni
, fio
.new_blkaddr
, is_fsync_dnode(page
));
1644 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1645 up_read(&sbi
->node_write
);
1647 if (wbc
->for_reclaim
) {
1648 f2fs_submit_merged_write_cond(sbi
, NULL
, page
, 0, NODE
);
1654 if (unlikely(f2fs_cp_error(sbi
))) {
1655 f2fs_submit_merged_write(sbi
, NODE
);
1659 *submitted
= fio
.submitted
;
1662 f2fs_balance_fs(sbi
, false);
1666 redirty_page_for_writepage(wbc
, page
);
1667 return AOP_WRITEPAGE_ACTIVATE
;
1670 int f2fs_move_node_page(struct page
*node_page
, int gc_type
)
1674 if (gc_type
== FG_GC
) {
1675 struct writeback_control wbc
= {
1676 .sync_mode
= WB_SYNC_ALL
,
1681 f2fs_wait_on_page_writeback(node_page
, NODE
, true, true);
1683 set_page_dirty(node_page
);
1685 if (!clear_page_dirty_for_io(node_page
)) {
1690 if (__write_node_page(node_page
, false, NULL
,
1691 &wbc
, false, FS_GC_NODE_IO
, NULL
)) {
1693 unlock_page(node_page
);
1697 /* set page dirty and write it */
1698 if (!PageWriteback(node_page
))
1699 set_page_dirty(node_page
);
1702 unlock_page(node_page
);
1704 f2fs_put_page(node_page
, 0);
1708 static int f2fs_write_node_page(struct page
*page
,
1709 struct writeback_control
*wbc
)
1711 return __write_node_page(page
, false, NULL
, wbc
, false,
1715 int f2fs_fsync_node_pages(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
1716 struct writeback_control
*wbc
, bool atomic
,
1717 unsigned int *seq_id
)
1720 struct pagevec pvec
;
1722 struct page
*last_page
= NULL
;
1723 bool marked
= false;
1724 nid_t ino
= inode
->i_ino
;
1729 last_page
= last_fsync_dnode(sbi
, ino
);
1730 if (IS_ERR_OR_NULL(last_page
))
1731 return PTR_ERR_OR_ZERO(last_page
);
1734 pagevec_init(&pvec
);
1737 while ((nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1738 PAGECACHE_TAG_DIRTY
))) {
1741 for (i
= 0; i
< nr_pages
; i
++) {
1742 struct page
*page
= pvec
.pages
[i
];
1743 bool submitted
= false;
1745 if (unlikely(f2fs_cp_error(sbi
))) {
1746 f2fs_put_page(last_page
, 0);
1747 pagevec_release(&pvec
);
1752 if (!IS_DNODE(page
) || !is_cold_node(page
))
1754 if (ino_of_node(page
) != ino
)
1759 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1764 if (ino_of_node(page
) != ino
)
1765 goto continue_unlock
;
1767 if (!PageDirty(page
) && page
!= last_page
) {
1768 /* someone wrote it for us */
1769 goto continue_unlock
;
1772 f2fs_wait_on_page_writeback(page
, NODE
, true, true);
1774 set_fsync_mark(page
, 0);
1775 set_dentry_mark(page
, 0);
1777 if (!atomic
|| page
== last_page
) {
1778 set_fsync_mark(page
, 1);
1779 if (IS_INODE(page
)) {
1780 if (is_inode_flag_set(inode
,
1782 f2fs_update_inode(inode
, page
);
1783 set_dentry_mark(page
,
1784 f2fs_need_dentry_mark(sbi
, ino
));
1786 /* may be written by other thread */
1787 if (!PageDirty(page
))
1788 set_page_dirty(page
);
1791 if (!clear_page_dirty_for_io(page
))
1792 goto continue_unlock
;
1794 ret
= __write_node_page(page
, atomic
&&
1796 &submitted
, wbc
, true,
1797 FS_NODE_IO
, seq_id
);
1800 f2fs_put_page(last_page
, 0);
1802 } else if (submitted
) {
1806 if (page
== last_page
) {
1807 f2fs_put_page(page
, 0);
1812 pagevec_release(&pvec
);
1818 if (!ret
&& atomic
&& !marked
) {
1819 f2fs_debug(sbi
, "Retry to write fsync mark: ino=%u, idx=%lx",
1820 ino
, last_page
->index
);
1821 lock_page(last_page
);
1822 f2fs_wait_on_page_writeback(last_page
, NODE
, true, true);
1823 set_page_dirty(last_page
);
1824 unlock_page(last_page
);
1829 f2fs_submit_merged_write_cond(sbi
, NULL
, NULL
, ino
, NODE
);
1830 return ret
? -EIO
: 0;
1833 static int f2fs_match_ino(struct inode
*inode
, unsigned long ino
, void *data
)
1835 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1838 if (inode
->i_ino
!= ino
)
1841 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
))
1844 spin_lock(&sbi
->inode_lock
[DIRTY_META
]);
1845 clean
= list_empty(&F2FS_I(inode
)->gdirty_list
);
1846 spin_unlock(&sbi
->inode_lock
[DIRTY_META
]);
1851 inode
= igrab(inode
);
1857 static bool flush_dirty_inode(struct page
*page
)
1859 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1860 struct inode
*inode
;
1861 nid_t ino
= ino_of_node(page
);
1863 inode
= find_inode_nowait(sbi
->sb
, ino
, f2fs_match_ino
, NULL
);
1867 f2fs_update_inode(inode
, page
);
1874 void f2fs_flush_inline_data(struct f2fs_sb_info
*sbi
)
1877 struct pagevec pvec
;
1880 pagevec_init(&pvec
);
1882 while ((nr_pages
= pagevec_lookup_tag(&pvec
,
1883 NODE_MAPPING(sbi
), &index
, PAGECACHE_TAG_DIRTY
))) {
1886 for (i
= 0; i
< nr_pages
; i
++) {
1887 struct page
*page
= pvec
.pages
[i
];
1889 if (!IS_DNODE(page
))
1894 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1900 if (!PageDirty(page
)) {
1901 /* someone wrote it for us */
1902 goto continue_unlock
;
1905 /* flush inline_data, if it's async context. */
1906 if (page_private_inline(page
)) {
1907 clear_page_private_inline(page
);
1909 flush_inline_data(sbi
, ino_of_node(page
));
1914 pagevec_release(&pvec
);
1919 int f2fs_sync_node_pages(struct f2fs_sb_info
*sbi
,
1920 struct writeback_control
*wbc
,
1921 bool do_balance
, enum iostat_type io_type
)
1924 struct pagevec pvec
;
1928 int nr_pages
, done
= 0;
1930 pagevec_init(&pvec
);
1935 while (!done
&& (nr_pages
= pagevec_lookup_tag(&pvec
,
1936 NODE_MAPPING(sbi
), &index
, PAGECACHE_TAG_DIRTY
))) {
1939 for (i
= 0; i
< nr_pages
; i
++) {
1940 struct page
*page
= pvec
.pages
[i
];
1941 bool submitted
= false;
1942 bool may_dirty
= true;
1944 /* give a priority to WB_SYNC threads */
1945 if (atomic_read(&sbi
->wb_sync_req
[NODE
]) &&
1946 wbc
->sync_mode
== WB_SYNC_NONE
) {
1952 * flushing sequence with step:
1957 if (step
== 0 && IS_DNODE(page
))
1959 if (step
== 1 && (!IS_DNODE(page
) ||
1960 is_cold_node(page
)))
1962 if (step
== 2 && (!IS_DNODE(page
) ||
1963 !is_cold_node(page
)))
1966 if (wbc
->sync_mode
== WB_SYNC_ALL
)
1968 else if (!trylock_page(page
))
1971 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1977 if (!PageDirty(page
)) {
1978 /* someone wrote it for us */
1979 goto continue_unlock
;
1982 /* flush inline_data/inode, if it's async context. */
1986 /* flush inline_data */
1987 if (page_private_inline(page
)) {
1988 clear_page_private_inline(page
);
1990 flush_inline_data(sbi
, ino_of_node(page
));
1994 /* flush dirty inode */
1995 if (IS_INODE(page
) && may_dirty
) {
1997 if (flush_dirty_inode(page
))
2001 f2fs_wait_on_page_writeback(page
, NODE
, true, true);
2003 if (!clear_page_dirty_for_io(page
))
2004 goto continue_unlock
;
2006 set_fsync_mark(page
, 0);
2007 set_dentry_mark(page
, 0);
2009 ret
= __write_node_page(page
, false, &submitted
,
2010 wbc
, do_balance
, io_type
, NULL
);
2016 if (--wbc
->nr_to_write
== 0)
2019 pagevec_release(&pvec
);
2022 if (wbc
->nr_to_write
== 0) {
2029 if (!is_sbi_flag_set(sbi
, SBI_CP_DISABLED
) &&
2030 wbc
->sync_mode
== WB_SYNC_NONE
&& step
== 1)
2037 f2fs_submit_merged_write(sbi
, NODE
);
2039 if (unlikely(f2fs_cp_error(sbi
)))
2044 int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info
*sbi
,
2045 unsigned int seq_id
)
2047 struct fsync_node_entry
*fn
;
2049 struct list_head
*head
= &sbi
->fsync_node_list
;
2050 unsigned long flags
;
2051 unsigned int cur_seq_id
= 0;
2054 while (seq_id
&& cur_seq_id
< seq_id
) {
2055 spin_lock_irqsave(&sbi
->fsync_node_lock
, flags
);
2056 if (list_empty(head
)) {
2057 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
2060 fn
= list_first_entry(head
, struct fsync_node_entry
, list
);
2061 if (fn
->seq_id
> seq_id
) {
2062 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
2065 cur_seq_id
= fn
->seq_id
;
2068 spin_unlock_irqrestore(&sbi
->fsync_node_lock
, flags
);
2070 f2fs_wait_on_page_writeback(page
, NODE
, true, false);
2071 if (TestClearPageError(page
))
2080 ret2
= filemap_check_errors(NODE_MAPPING(sbi
));
2087 static int f2fs_write_node_pages(struct address_space
*mapping
,
2088 struct writeback_control
*wbc
)
2090 struct f2fs_sb_info
*sbi
= F2FS_M_SB(mapping
);
2091 struct blk_plug plug
;
2094 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
2097 /* balancing f2fs's metadata in background */
2098 f2fs_balance_fs_bg(sbi
, true);
2100 /* collect a number of dirty node pages and write together */
2101 if (wbc
->sync_mode
!= WB_SYNC_ALL
&&
2102 get_pages(sbi
, F2FS_DIRTY_NODES
) <
2103 nr_pages_to_skip(sbi
, NODE
))
2106 if (wbc
->sync_mode
== WB_SYNC_ALL
)
2107 atomic_inc(&sbi
->wb_sync_req
[NODE
]);
2108 else if (atomic_read(&sbi
->wb_sync_req
[NODE
]))
2111 trace_f2fs_writepages(mapping
->host
, wbc
, NODE
);
2113 diff
= nr_pages_to_write(sbi
, NODE
, wbc
);
2114 blk_start_plug(&plug
);
2115 f2fs_sync_node_pages(sbi
, wbc
, true, FS_NODE_IO
);
2116 blk_finish_plug(&plug
);
2117 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
2119 if (wbc
->sync_mode
== WB_SYNC_ALL
)
2120 atomic_dec(&sbi
->wb_sync_req
[NODE
]);
2124 wbc
->pages_skipped
+= get_pages(sbi
, F2FS_DIRTY_NODES
);
2125 trace_f2fs_writepages(mapping
->host
, wbc
, NODE
);
2129 static int f2fs_set_node_page_dirty(struct page
*page
)
2131 trace_f2fs_set_page_dirty(page
, NODE
);
2133 if (!PageUptodate(page
))
2134 SetPageUptodate(page
);
2135 #ifdef CONFIG_F2FS_CHECK_FS
2137 f2fs_inode_chksum_set(F2FS_P_SB(page
), page
);
2139 if (!PageDirty(page
)) {
2140 __set_page_dirty_nobuffers(page
);
2141 inc_page_count(F2FS_P_SB(page
), F2FS_DIRTY_NODES
);
2142 set_page_private_reference(page
);
2149 * Structure of the f2fs node operations
2151 const struct address_space_operations f2fs_node_aops
= {
2152 .writepage
= f2fs_write_node_page
,
2153 .writepages
= f2fs_write_node_pages
,
2154 .set_page_dirty
= f2fs_set_node_page_dirty
,
2155 .invalidatepage
= f2fs_invalidate_page
,
2156 .releasepage
= f2fs_release_page
,
2157 #ifdef CONFIG_MIGRATION
2158 .migratepage
= f2fs_migrate_page
,
2162 static struct free_nid
*__lookup_free_nid_list(struct f2fs_nm_info
*nm_i
,
2165 return radix_tree_lookup(&nm_i
->free_nid_root
, n
);
2168 static int __insert_free_nid(struct f2fs_sb_info
*sbi
,
2171 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2172 int err
= radix_tree_insert(&nm_i
->free_nid_root
, i
->nid
, i
);
2177 nm_i
->nid_cnt
[FREE_NID
]++;
2178 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
2182 static void __remove_free_nid(struct f2fs_sb_info
*sbi
,
2183 struct free_nid
*i
, enum nid_state state
)
2185 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2187 f2fs_bug_on(sbi
, state
!= i
->state
);
2188 nm_i
->nid_cnt
[state
]--;
2189 if (state
== FREE_NID
)
2191 radix_tree_delete(&nm_i
->free_nid_root
, i
->nid
);
2194 static void __move_free_nid(struct f2fs_sb_info
*sbi
, struct free_nid
*i
,
2195 enum nid_state org_state
, enum nid_state dst_state
)
2197 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2199 f2fs_bug_on(sbi
, org_state
!= i
->state
);
2200 i
->state
= dst_state
;
2201 nm_i
->nid_cnt
[org_state
]--;
2202 nm_i
->nid_cnt
[dst_state
]++;
2204 switch (dst_state
) {
2209 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
2216 bool f2fs_nat_bitmap_enabled(struct f2fs_sb_info
*sbi
)
2218 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2222 down_read(&nm_i
->nat_tree_lock
);
2223 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
2224 if (!test_bit_le(i
, nm_i
->nat_block_bitmap
)) {
2229 up_read(&nm_i
->nat_tree_lock
);
2234 static void update_free_nid_bitmap(struct f2fs_sb_info
*sbi
, nid_t nid
,
2235 bool set
, bool build
)
2237 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2238 unsigned int nat_ofs
= NAT_BLOCK_OFFSET(nid
);
2239 unsigned int nid_ofs
= nid
- START_NID(nid
);
2241 if (!test_bit_le(nat_ofs
, nm_i
->nat_block_bitmap
))
2245 if (test_bit_le(nid_ofs
, nm_i
->free_nid_bitmap
[nat_ofs
]))
2247 __set_bit_le(nid_ofs
, nm_i
->free_nid_bitmap
[nat_ofs
]);
2248 nm_i
->free_nid_count
[nat_ofs
]++;
2250 if (!test_bit_le(nid_ofs
, nm_i
->free_nid_bitmap
[nat_ofs
]))
2252 __clear_bit_le(nid_ofs
, nm_i
->free_nid_bitmap
[nat_ofs
]);
2254 nm_i
->free_nid_count
[nat_ofs
]--;
2258 /* return if the nid is recognized as free */
2259 static bool add_free_nid(struct f2fs_sb_info
*sbi
,
2260 nid_t nid
, bool build
, bool update
)
2262 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2263 struct free_nid
*i
, *e
;
2264 struct nat_entry
*ne
;
2268 /* 0 nid should not be used */
2269 if (unlikely(nid
== 0))
2272 if (unlikely(f2fs_check_nid_range(sbi
, nid
)))
2275 i
= f2fs_kmem_cache_alloc(free_nid_slab
, GFP_NOFS
, true, NULL
);
2277 i
->state
= FREE_NID
;
2279 radix_tree_preload(GFP_NOFS
| __GFP_NOFAIL
);
2281 spin_lock(&nm_i
->nid_list_lock
);
2289 * - __insert_nid_to_list(PREALLOC_NID)
2290 * - f2fs_balance_fs_bg
2291 * - f2fs_build_free_nids
2292 * - __f2fs_build_free_nids
2295 * - __lookup_nat_cache
2297 * - f2fs_init_inode_metadata
2298 * - f2fs_new_inode_page
2299 * - f2fs_new_node_page
2301 * - f2fs_alloc_nid_done
2302 * - __remove_nid_from_list(PREALLOC_NID)
2303 * - __insert_nid_to_list(FREE_NID)
2305 ne
= __lookup_nat_cache(nm_i
, nid
);
2306 if (ne
&& (!get_nat_flag(ne
, IS_CHECKPOINTED
) ||
2307 nat_get_blkaddr(ne
) != NULL_ADDR
))
2310 e
= __lookup_free_nid_list(nm_i
, nid
);
2312 if (e
->state
== FREE_NID
)
2318 err
= __insert_free_nid(sbi
, i
);
2321 update_free_nid_bitmap(sbi
, nid
, ret
, build
);
2323 nm_i
->available_nids
++;
2325 spin_unlock(&nm_i
->nid_list_lock
);
2326 radix_tree_preload_end();
2329 kmem_cache_free(free_nid_slab
, i
);
2333 static void remove_free_nid(struct f2fs_sb_info
*sbi
, nid_t nid
)
2335 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2337 bool need_free
= false;
2339 spin_lock(&nm_i
->nid_list_lock
);
2340 i
= __lookup_free_nid_list(nm_i
, nid
);
2341 if (i
&& i
->state
== FREE_NID
) {
2342 __remove_free_nid(sbi
, i
, FREE_NID
);
2345 spin_unlock(&nm_i
->nid_list_lock
);
2348 kmem_cache_free(free_nid_slab
, i
);
2351 static int scan_nat_page(struct f2fs_sb_info
*sbi
,
2352 struct page
*nat_page
, nid_t start_nid
)
2354 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2355 struct f2fs_nat_block
*nat_blk
= page_address(nat_page
);
2357 unsigned int nat_ofs
= NAT_BLOCK_OFFSET(start_nid
);
2360 __set_bit_le(nat_ofs
, nm_i
->nat_block_bitmap
);
2362 i
= start_nid
% NAT_ENTRY_PER_BLOCK
;
2364 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++, start_nid
++) {
2365 if (unlikely(start_nid
>= nm_i
->max_nid
))
2368 blk_addr
= le32_to_cpu(nat_blk
->entries
[i
].block_addr
);
2370 if (blk_addr
== NEW_ADDR
)
2373 if (blk_addr
== NULL_ADDR
) {
2374 add_free_nid(sbi
, start_nid
, true, true);
2376 spin_lock(&NM_I(sbi
)->nid_list_lock
);
2377 update_free_nid_bitmap(sbi
, start_nid
, false, true);
2378 spin_unlock(&NM_I(sbi
)->nid_list_lock
);
2385 static void scan_curseg_cache(struct f2fs_sb_info
*sbi
)
2387 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2388 struct f2fs_journal
*journal
= curseg
->journal
;
2391 down_read(&curseg
->journal_rwsem
);
2392 for (i
= 0; i
< nats_in_cursum(journal
); i
++) {
2396 addr
= le32_to_cpu(nat_in_journal(journal
, i
).block_addr
);
2397 nid
= le32_to_cpu(nid_in_journal(journal
, i
));
2398 if (addr
== NULL_ADDR
)
2399 add_free_nid(sbi
, nid
, true, false);
2401 remove_free_nid(sbi
, nid
);
2403 up_read(&curseg
->journal_rwsem
);
2406 static void scan_free_nid_bits(struct f2fs_sb_info
*sbi
)
2408 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2409 unsigned int i
, idx
;
2412 down_read(&nm_i
->nat_tree_lock
);
2414 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
2415 if (!test_bit_le(i
, nm_i
->nat_block_bitmap
))
2417 if (!nm_i
->free_nid_count
[i
])
2419 for (idx
= 0; idx
< NAT_ENTRY_PER_BLOCK
; idx
++) {
2420 idx
= find_next_bit_le(nm_i
->free_nid_bitmap
[i
],
2421 NAT_ENTRY_PER_BLOCK
, idx
);
2422 if (idx
>= NAT_ENTRY_PER_BLOCK
)
2425 nid
= i
* NAT_ENTRY_PER_BLOCK
+ idx
;
2426 add_free_nid(sbi
, nid
, true, false);
2428 if (nm_i
->nid_cnt
[FREE_NID
] >= MAX_FREE_NIDS
)
2433 scan_curseg_cache(sbi
);
2435 up_read(&nm_i
->nat_tree_lock
);
2438 static int __f2fs_build_free_nids(struct f2fs_sb_info
*sbi
,
2439 bool sync
, bool mount
)
2441 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2443 nid_t nid
= nm_i
->next_scan_nid
;
2445 if (unlikely(nid
>= nm_i
->max_nid
))
2448 if (unlikely(nid
% NAT_ENTRY_PER_BLOCK
))
2449 nid
= NAT_BLOCK_OFFSET(nid
) * NAT_ENTRY_PER_BLOCK
;
2451 /* Enough entries */
2452 if (nm_i
->nid_cnt
[FREE_NID
] >= NAT_ENTRY_PER_BLOCK
)
2455 if (!sync
&& !f2fs_available_free_memory(sbi
, FREE_NIDS
))
2459 /* try to find free nids in free_nid_bitmap */
2460 scan_free_nid_bits(sbi
);
2462 if (nm_i
->nid_cnt
[FREE_NID
] >= NAT_ENTRY_PER_BLOCK
)
2466 /* readahead nat pages to be scanned */
2467 f2fs_ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), FREE_NID_PAGES
,
2470 down_read(&nm_i
->nat_tree_lock
);
2473 if (!test_bit_le(NAT_BLOCK_OFFSET(nid
),
2474 nm_i
->nat_block_bitmap
)) {
2475 struct page
*page
= get_current_nat_page(sbi
, nid
);
2478 ret
= PTR_ERR(page
);
2480 ret
= scan_nat_page(sbi
, page
, nid
);
2481 f2fs_put_page(page
, 1);
2485 up_read(&nm_i
->nat_tree_lock
);
2486 f2fs_err(sbi
, "NAT is corrupt, run fsck to fix it");
2491 nid
+= (NAT_ENTRY_PER_BLOCK
- (nid
% NAT_ENTRY_PER_BLOCK
));
2492 if (unlikely(nid
>= nm_i
->max_nid
))
2495 if (++i
>= FREE_NID_PAGES
)
2499 /* go to the next free nat pages to find free nids abundantly */
2500 nm_i
->next_scan_nid
= nid
;
2502 /* find free nids from current sum_pages */
2503 scan_curseg_cache(sbi
);
2505 up_read(&nm_i
->nat_tree_lock
);
2507 f2fs_ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nm_i
->next_scan_nid
),
2508 nm_i
->ra_nid_pages
, META_NAT
, false);
2513 int f2fs_build_free_nids(struct f2fs_sb_info
*sbi
, bool sync
, bool mount
)
2517 mutex_lock(&NM_I(sbi
)->build_lock
);
2518 ret
= __f2fs_build_free_nids(sbi
, sync
, mount
);
2519 mutex_unlock(&NM_I(sbi
)->build_lock
);
2525 * If this function returns success, caller can obtain a new nid
2526 * from second parameter of this function.
2527 * The returned nid could be used ino as well as nid when inode is created.
2529 bool f2fs_alloc_nid(struct f2fs_sb_info
*sbi
, nid_t
*nid
)
2531 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2532 struct free_nid
*i
= NULL
;
2534 if (time_to_inject(sbi
, FAULT_ALLOC_NID
)) {
2535 f2fs_show_injection_info(sbi
, FAULT_ALLOC_NID
);
2539 spin_lock(&nm_i
->nid_list_lock
);
2541 if (unlikely(nm_i
->available_nids
== 0)) {
2542 spin_unlock(&nm_i
->nid_list_lock
);
2546 /* We should not use stale free nids created by f2fs_build_free_nids */
2547 if (nm_i
->nid_cnt
[FREE_NID
] && !on_f2fs_build_free_nids(nm_i
)) {
2548 f2fs_bug_on(sbi
, list_empty(&nm_i
->free_nid_list
));
2549 i
= list_first_entry(&nm_i
->free_nid_list
,
2550 struct free_nid
, list
);
2553 __move_free_nid(sbi
, i
, FREE_NID
, PREALLOC_NID
);
2554 nm_i
->available_nids
--;
2556 update_free_nid_bitmap(sbi
, *nid
, false, false);
2558 spin_unlock(&nm_i
->nid_list_lock
);
2561 spin_unlock(&nm_i
->nid_list_lock
);
2563 /* Let's scan nat pages and its caches to get free nids */
2564 if (!f2fs_build_free_nids(sbi
, true, false))
2570 * f2fs_alloc_nid() should be called prior to this function.
2572 void f2fs_alloc_nid_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
2574 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2577 spin_lock(&nm_i
->nid_list_lock
);
2578 i
= __lookup_free_nid_list(nm_i
, nid
);
2579 f2fs_bug_on(sbi
, !i
);
2580 __remove_free_nid(sbi
, i
, PREALLOC_NID
);
2581 spin_unlock(&nm_i
->nid_list_lock
);
2583 kmem_cache_free(free_nid_slab
, i
);
2587 * f2fs_alloc_nid() should be called prior to this function.
2589 void f2fs_alloc_nid_failed(struct f2fs_sb_info
*sbi
, nid_t nid
)
2591 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2593 bool need_free
= false;
2598 spin_lock(&nm_i
->nid_list_lock
);
2599 i
= __lookup_free_nid_list(nm_i
, nid
);
2600 f2fs_bug_on(sbi
, !i
);
2602 if (!f2fs_available_free_memory(sbi
, FREE_NIDS
)) {
2603 __remove_free_nid(sbi
, i
, PREALLOC_NID
);
2606 __move_free_nid(sbi
, i
, PREALLOC_NID
, FREE_NID
);
2609 nm_i
->available_nids
++;
2611 update_free_nid_bitmap(sbi
, nid
, true, false);
2613 spin_unlock(&nm_i
->nid_list_lock
);
2616 kmem_cache_free(free_nid_slab
, i
);
2619 int f2fs_try_to_free_nids(struct f2fs_sb_info
*sbi
, int nr_shrink
)
2621 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2624 if (nm_i
->nid_cnt
[FREE_NID
] <= MAX_FREE_NIDS
)
2627 if (!mutex_trylock(&nm_i
->build_lock
))
2630 while (nr_shrink
&& nm_i
->nid_cnt
[FREE_NID
] > MAX_FREE_NIDS
) {
2631 struct free_nid
*i
, *next
;
2632 unsigned int batch
= SHRINK_NID_BATCH_SIZE
;
2634 spin_lock(&nm_i
->nid_list_lock
);
2635 list_for_each_entry_safe(i
, next
, &nm_i
->free_nid_list
, list
) {
2636 if (!nr_shrink
|| !batch
||
2637 nm_i
->nid_cnt
[FREE_NID
] <= MAX_FREE_NIDS
)
2639 __remove_free_nid(sbi
, i
, FREE_NID
);
2640 kmem_cache_free(free_nid_slab
, i
);
2644 spin_unlock(&nm_i
->nid_list_lock
);
2647 mutex_unlock(&nm_i
->build_lock
);
2649 return nr
- nr_shrink
;
2652 int f2fs_recover_inline_xattr(struct inode
*inode
, struct page
*page
)
2654 void *src_addr
, *dst_addr
;
2657 struct f2fs_inode
*ri
;
2659 ipage
= f2fs_get_node_page(F2FS_I_SB(inode
), inode
->i_ino
);
2661 return PTR_ERR(ipage
);
2663 ri
= F2FS_INODE(page
);
2664 if (ri
->i_inline
& F2FS_INLINE_XATTR
) {
2665 if (!f2fs_has_inline_xattr(inode
)) {
2666 set_inode_flag(inode
, FI_INLINE_XATTR
);
2667 stat_inc_inline_xattr(inode
);
2670 if (f2fs_has_inline_xattr(inode
)) {
2671 stat_dec_inline_xattr(inode
);
2672 clear_inode_flag(inode
, FI_INLINE_XATTR
);
2677 dst_addr
= inline_xattr_addr(inode
, ipage
);
2678 src_addr
= inline_xattr_addr(inode
, page
);
2679 inline_size
= inline_xattr_size(inode
);
2681 f2fs_wait_on_page_writeback(ipage
, NODE
, true, true);
2682 memcpy(dst_addr
, src_addr
, inline_size
);
2684 f2fs_update_inode(inode
, ipage
);
2685 f2fs_put_page(ipage
, 1);
2689 int f2fs_recover_xattr_data(struct inode
*inode
, struct page
*page
)
2691 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2692 nid_t prev_xnid
= F2FS_I(inode
)->i_xattr_nid
;
2694 struct dnode_of_data dn
;
2695 struct node_info ni
;
2702 /* 1: invalidate the previous xattr nid */
2703 err
= f2fs_get_node_info(sbi
, prev_xnid
, &ni
);
2707 f2fs_invalidate_blocks(sbi
, ni
.blk_addr
);
2708 dec_valid_node_count(sbi
, inode
, false);
2709 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
2712 /* 2: update xattr nid in inode */
2713 if (!f2fs_alloc_nid(sbi
, &new_xnid
))
2716 set_new_dnode(&dn
, inode
, NULL
, NULL
, new_xnid
);
2717 xpage
= f2fs_new_node_page(&dn
, XATTR_NODE_OFFSET
);
2718 if (IS_ERR(xpage
)) {
2719 f2fs_alloc_nid_failed(sbi
, new_xnid
);
2720 return PTR_ERR(xpage
);
2723 f2fs_alloc_nid_done(sbi
, new_xnid
);
2724 f2fs_update_inode_page(inode
);
2726 /* 3: update and set xattr node page dirty */
2727 memcpy(F2FS_NODE(xpage
), F2FS_NODE(page
), VALID_XATTR_BLOCK_SIZE
);
2729 set_page_dirty(xpage
);
2730 f2fs_put_page(xpage
, 1);
2735 int f2fs_recover_inode_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
2737 struct f2fs_inode
*src
, *dst
;
2738 nid_t ino
= ino_of_node(page
);
2739 struct node_info old_ni
, new_ni
;
2743 err
= f2fs_get_node_info(sbi
, ino
, &old_ni
);
2747 if (unlikely(old_ni
.blk_addr
!= NULL_ADDR
))
2750 ipage
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), ino
, false);
2752 congestion_wait(BLK_RW_ASYNC
, DEFAULT_IO_TIMEOUT
);
2756 /* Should not use this inode from free nid list */
2757 remove_free_nid(sbi
, ino
);
2759 if (!PageUptodate(ipage
))
2760 SetPageUptodate(ipage
);
2761 fill_node_footer(ipage
, ino
, ino
, 0, true);
2762 set_cold_node(ipage
, false);
2764 src
= F2FS_INODE(page
);
2765 dst
= F2FS_INODE(ipage
);
2767 memcpy(dst
, src
, offsetof(struct f2fs_inode
, i_ext
));
2769 dst
->i_blocks
= cpu_to_le64(1);
2770 dst
->i_links
= cpu_to_le32(1);
2771 dst
->i_xattr_nid
= 0;
2772 dst
->i_inline
= src
->i_inline
& (F2FS_INLINE_XATTR
| F2FS_EXTRA_ATTR
);
2773 if (dst
->i_inline
& F2FS_EXTRA_ATTR
) {
2774 dst
->i_extra_isize
= src
->i_extra_isize
;
2776 if (f2fs_sb_has_flexible_inline_xattr(sbi
) &&
2777 F2FS_FITS_IN_INODE(src
, le16_to_cpu(src
->i_extra_isize
),
2778 i_inline_xattr_size
))
2779 dst
->i_inline_xattr_size
= src
->i_inline_xattr_size
;
2781 if (f2fs_sb_has_project_quota(sbi
) &&
2782 F2FS_FITS_IN_INODE(src
, le16_to_cpu(src
->i_extra_isize
),
2784 dst
->i_projid
= src
->i_projid
;
2786 if (f2fs_sb_has_inode_crtime(sbi
) &&
2787 F2FS_FITS_IN_INODE(src
, le16_to_cpu(src
->i_extra_isize
),
2789 dst
->i_crtime
= src
->i_crtime
;
2790 dst
->i_crtime_nsec
= src
->i_crtime_nsec
;
2797 if (unlikely(inc_valid_node_count(sbi
, NULL
, true)))
2799 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
2800 inc_valid_inode_count(sbi
);
2801 set_page_dirty(ipage
);
2802 f2fs_put_page(ipage
, 1);
2806 int f2fs_restore_node_summary(struct f2fs_sb_info
*sbi
,
2807 unsigned int segno
, struct f2fs_summary_block
*sum
)
2809 struct f2fs_node
*rn
;
2810 struct f2fs_summary
*sum_entry
;
2812 int i
, idx
, last_offset
, nrpages
;
2814 /* scan the node segment */
2815 last_offset
= sbi
->blocks_per_seg
;
2816 addr
= START_BLOCK(sbi
, segno
);
2817 sum_entry
= &sum
->entries
[0];
2819 for (i
= 0; i
< last_offset
; i
+= nrpages
, addr
+= nrpages
) {
2820 nrpages
= bio_max_segs(last_offset
- i
);
2822 /* readahead node pages */
2823 f2fs_ra_meta_pages(sbi
, addr
, nrpages
, META_POR
, true);
2825 for (idx
= addr
; idx
< addr
+ nrpages
; idx
++) {
2826 struct page
*page
= f2fs_get_tmp_page(sbi
, idx
);
2829 return PTR_ERR(page
);
2831 rn
= F2FS_NODE(page
);
2832 sum_entry
->nid
= rn
->footer
.nid
;
2833 sum_entry
->version
= 0;
2834 sum_entry
->ofs_in_node
= 0;
2836 f2fs_put_page(page
, 1);
2839 invalidate_mapping_pages(META_MAPPING(sbi
), addr
,
2845 static void remove_nats_in_journal(struct f2fs_sb_info
*sbi
)
2847 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2848 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2849 struct f2fs_journal
*journal
= curseg
->journal
;
2852 down_write(&curseg
->journal_rwsem
);
2853 for (i
= 0; i
< nats_in_cursum(journal
); i
++) {
2854 struct nat_entry
*ne
;
2855 struct f2fs_nat_entry raw_ne
;
2856 nid_t nid
= le32_to_cpu(nid_in_journal(journal
, i
));
2858 if (f2fs_check_nid_range(sbi
, nid
))
2861 raw_ne
= nat_in_journal(journal
, i
);
2863 ne
= __lookup_nat_cache(nm_i
, nid
);
2865 ne
= __alloc_nat_entry(sbi
, nid
, true);
2866 __init_nat_entry(nm_i
, ne
, &raw_ne
, true);
2870 * if a free nat in journal has not been used after last
2871 * checkpoint, we should remove it from available nids,
2872 * since later we will add it again.
2874 if (!get_nat_flag(ne
, IS_DIRTY
) &&
2875 le32_to_cpu(raw_ne
.block_addr
) == NULL_ADDR
) {
2876 spin_lock(&nm_i
->nid_list_lock
);
2877 nm_i
->available_nids
--;
2878 spin_unlock(&nm_i
->nid_list_lock
);
2881 __set_nat_cache_dirty(nm_i
, ne
);
2883 update_nats_in_cursum(journal
, -i
);
2884 up_write(&curseg
->journal_rwsem
);
2887 static void __adjust_nat_entry_set(struct nat_entry_set
*nes
,
2888 struct list_head
*head
, int max
)
2890 struct nat_entry_set
*cur
;
2892 if (nes
->entry_cnt
>= max
)
2895 list_for_each_entry(cur
, head
, set_list
) {
2896 if (cur
->entry_cnt
>= nes
->entry_cnt
) {
2897 list_add(&nes
->set_list
, cur
->set_list
.prev
);
2902 list_add_tail(&nes
->set_list
, head
);
2905 static void __update_nat_bits(struct f2fs_nm_info
*nm_i
, unsigned int nat_ofs
,
2909 __set_bit_le(nat_ofs
, nm_i
->empty_nat_bits
);
2910 __clear_bit_le(nat_ofs
, nm_i
->full_nat_bits
);
2914 __clear_bit_le(nat_ofs
, nm_i
->empty_nat_bits
);
2915 if (valid
== NAT_ENTRY_PER_BLOCK
)
2916 __set_bit_le(nat_ofs
, nm_i
->full_nat_bits
);
2918 __clear_bit_le(nat_ofs
, nm_i
->full_nat_bits
);
2921 static void update_nat_bits(struct f2fs_sb_info
*sbi
, nid_t start_nid
,
2924 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2925 unsigned int nat_index
= start_nid
/ NAT_ENTRY_PER_BLOCK
;
2926 struct f2fs_nat_block
*nat_blk
= page_address(page
);
2930 if (!is_set_ckpt_flags(sbi
, CP_NAT_BITS_FLAG
))
2933 if (nat_index
== 0) {
2937 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++) {
2938 if (le32_to_cpu(nat_blk
->entries
[i
].block_addr
) != NULL_ADDR
)
2942 __update_nat_bits(nm_i
, nat_index
, valid
);
2945 void f2fs_enable_nat_bits(struct f2fs_sb_info
*sbi
)
2947 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2948 unsigned int nat_ofs
;
2950 down_read(&nm_i
->nat_tree_lock
);
2952 for (nat_ofs
= 0; nat_ofs
< nm_i
->nat_blocks
; nat_ofs
++) {
2953 unsigned int valid
= 0, nid_ofs
= 0;
2955 /* handle nid zero due to it should never be used */
2956 if (unlikely(nat_ofs
== 0)) {
2961 for (; nid_ofs
< NAT_ENTRY_PER_BLOCK
; nid_ofs
++) {
2962 if (!test_bit_le(nid_ofs
,
2963 nm_i
->free_nid_bitmap
[nat_ofs
]))
2967 __update_nat_bits(nm_i
, nat_ofs
, valid
);
2970 up_read(&nm_i
->nat_tree_lock
);
2973 static int __flush_nat_entry_set(struct f2fs_sb_info
*sbi
,
2974 struct nat_entry_set
*set
, struct cp_control
*cpc
)
2976 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2977 struct f2fs_journal
*journal
= curseg
->journal
;
2978 nid_t start_nid
= set
->set
* NAT_ENTRY_PER_BLOCK
;
2979 bool to_journal
= true;
2980 struct f2fs_nat_block
*nat_blk
;
2981 struct nat_entry
*ne
, *cur
;
2982 struct page
*page
= NULL
;
2985 * there are two steps to flush nat entries:
2986 * #1, flush nat entries to journal in current hot data summary block.
2987 * #2, flush nat entries to nat page.
2989 if ((cpc
->reason
& CP_UMOUNT
) ||
2990 !__has_cursum_space(journal
, set
->entry_cnt
, NAT_JOURNAL
))
2994 down_write(&curseg
->journal_rwsem
);
2996 page
= get_next_nat_page(sbi
, start_nid
);
2998 return PTR_ERR(page
);
3000 nat_blk
= page_address(page
);
3001 f2fs_bug_on(sbi
, !nat_blk
);
3004 /* flush dirty nats in nat entry set */
3005 list_for_each_entry_safe(ne
, cur
, &set
->entry_list
, list
) {
3006 struct f2fs_nat_entry
*raw_ne
;
3007 nid_t nid
= nat_get_nid(ne
);
3010 f2fs_bug_on(sbi
, nat_get_blkaddr(ne
) == NEW_ADDR
);
3013 offset
= f2fs_lookup_journal_in_cursum(journal
,
3014 NAT_JOURNAL
, nid
, 1);
3015 f2fs_bug_on(sbi
, offset
< 0);
3016 raw_ne
= &nat_in_journal(journal
, offset
);
3017 nid_in_journal(journal
, offset
) = cpu_to_le32(nid
);
3019 raw_ne
= &nat_blk
->entries
[nid
- start_nid
];
3021 raw_nat_from_node_info(raw_ne
, &ne
->ni
);
3023 __clear_nat_cache_dirty(NM_I(sbi
), set
, ne
);
3024 if (nat_get_blkaddr(ne
) == NULL_ADDR
) {
3025 add_free_nid(sbi
, nid
, false, true);
3027 spin_lock(&NM_I(sbi
)->nid_list_lock
);
3028 update_free_nid_bitmap(sbi
, nid
, false, false);
3029 spin_unlock(&NM_I(sbi
)->nid_list_lock
);
3034 up_write(&curseg
->journal_rwsem
);
3036 update_nat_bits(sbi
, start_nid
, page
);
3037 f2fs_put_page(page
, 1);
3040 /* Allow dirty nats by node block allocation in write_begin */
3041 if (!set
->entry_cnt
) {
3042 radix_tree_delete(&NM_I(sbi
)->nat_set_root
, set
->set
);
3043 kmem_cache_free(nat_entry_set_slab
, set
);
3049 * This function is called during the checkpointing process.
3051 int f2fs_flush_nat_entries(struct f2fs_sb_info
*sbi
, struct cp_control
*cpc
)
3053 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3054 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
3055 struct f2fs_journal
*journal
= curseg
->journal
;
3056 struct nat_entry_set
*setvec
[SETVEC_SIZE
];
3057 struct nat_entry_set
*set
, *tmp
;
3064 * during unmount, let's flush nat_bits before checking
3065 * nat_cnt[DIRTY_NAT].
3067 if (cpc
->reason
& CP_UMOUNT
) {
3068 down_write(&nm_i
->nat_tree_lock
);
3069 remove_nats_in_journal(sbi
);
3070 up_write(&nm_i
->nat_tree_lock
);
3073 if (!nm_i
->nat_cnt
[DIRTY_NAT
])
3076 down_write(&nm_i
->nat_tree_lock
);
3079 * if there are no enough space in journal to store dirty nat
3080 * entries, remove all entries from journal and merge them
3081 * into nat entry set.
3083 if (cpc
->reason
& CP_UMOUNT
||
3084 !__has_cursum_space(journal
,
3085 nm_i
->nat_cnt
[DIRTY_NAT
], NAT_JOURNAL
))
3086 remove_nats_in_journal(sbi
);
3088 while ((found
= __gang_lookup_nat_set(nm_i
,
3089 set_idx
, SETVEC_SIZE
, setvec
))) {
3092 set_idx
= setvec
[found
- 1]->set
+ 1;
3093 for (idx
= 0; idx
< found
; idx
++)
3094 __adjust_nat_entry_set(setvec
[idx
], &sets
,
3095 MAX_NAT_JENTRIES(journal
));
3098 /* flush dirty nats in nat entry set */
3099 list_for_each_entry_safe(set
, tmp
, &sets
, set_list
) {
3100 err
= __flush_nat_entry_set(sbi
, set
, cpc
);
3105 up_write(&nm_i
->nat_tree_lock
);
3106 /* Allow dirty nats by node block allocation in write_begin */
3111 static int __get_nat_bitmaps(struct f2fs_sb_info
*sbi
)
3113 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
3114 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3115 unsigned int nat_bits_bytes
= nm_i
->nat_blocks
/ BITS_PER_BYTE
;
3117 __u64 cp_ver
= cur_cp_version(ckpt
);
3118 block_t nat_bits_addr
;
3120 nm_i
->nat_bits_blocks
= F2FS_BLK_ALIGN((nat_bits_bytes
<< 1) + 8);
3121 nm_i
->nat_bits
= f2fs_kvzalloc(sbi
,
3122 nm_i
->nat_bits_blocks
<< F2FS_BLKSIZE_BITS
, GFP_KERNEL
);
3123 if (!nm_i
->nat_bits
)
3126 nm_i
->full_nat_bits
= nm_i
->nat_bits
+ 8;
3127 nm_i
->empty_nat_bits
= nm_i
->full_nat_bits
+ nat_bits_bytes
;
3129 if (!is_set_ckpt_flags(sbi
, CP_NAT_BITS_FLAG
))
3132 nat_bits_addr
= __start_cp_addr(sbi
) + sbi
->blocks_per_seg
-
3133 nm_i
->nat_bits_blocks
;
3134 for (i
= 0; i
< nm_i
->nat_bits_blocks
; i
++) {
3137 page
= f2fs_get_meta_page(sbi
, nat_bits_addr
++);
3139 return PTR_ERR(page
);
3141 memcpy(nm_i
->nat_bits
+ (i
<< F2FS_BLKSIZE_BITS
),
3142 page_address(page
), F2FS_BLKSIZE
);
3143 f2fs_put_page(page
, 1);
3146 cp_ver
|= (cur_cp_crc(ckpt
) << 32);
3147 if (cpu_to_le64(cp_ver
) != *(__le64
*)nm_i
->nat_bits
) {
3148 clear_ckpt_flags(sbi
, CP_NAT_BITS_FLAG
);
3149 f2fs_notice(sbi
, "Disable nat_bits due to incorrect cp_ver (%llu, %llu)",
3150 cp_ver
, le64_to_cpu(*(__le64
*)nm_i
->nat_bits
));
3154 f2fs_notice(sbi
, "Found nat_bits in checkpoint");
3158 static inline void load_free_nid_bitmap(struct f2fs_sb_info
*sbi
)
3160 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3162 nid_t nid
, last_nid
;
3164 if (!is_set_ckpt_flags(sbi
, CP_NAT_BITS_FLAG
))
3167 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
3168 i
= find_next_bit_le(nm_i
->empty_nat_bits
, nm_i
->nat_blocks
, i
);
3169 if (i
>= nm_i
->nat_blocks
)
3172 __set_bit_le(i
, nm_i
->nat_block_bitmap
);
3174 nid
= i
* NAT_ENTRY_PER_BLOCK
;
3175 last_nid
= nid
+ NAT_ENTRY_PER_BLOCK
;
3177 spin_lock(&NM_I(sbi
)->nid_list_lock
);
3178 for (; nid
< last_nid
; nid
++)
3179 update_free_nid_bitmap(sbi
, nid
, true, true);
3180 spin_unlock(&NM_I(sbi
)->nid_list_lock
);
3183 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
3184 i
= find_next_bit_le(nm_i
->full_nat_bits
, nm_i
->nat_blocks
, i
);
3185 if (i
>= nm_i
->nat_blocks
)
3188 __set_bit_le(i
, nm_i
->nat_block_bitmap
);
3192 static int init_node_manager(struct f2fs_sb_info
*sbi
)
3194 struct f2fs_super_block
*sb_raw
= F2FS_RAW_SUPER(sbi
);
3195 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3196 unsigned char *version_bitmap
;
3197 unsigned int nat_segs
;
3200 nm_i
->nat_blkaddr
= le32_to_cpu(sb_raw
->nat_blkaddr
);
3202 /* segment_count_nat includes pair segment so divide to 2. */
3203 nat_segs
= le32_to_cpu(sb_raw
->segment_count_nat
) >> 1;
3204 nm_i
->nat_blocks
= nat_segs
<< le32_to_cpu(sb_raw
->log_blocks_per_seg
);
3205 nm_i
->max_nid
= NAT_ENTRY_PER_BLOCK
* nm_i
->nat_blocks
;
3207 /* not used nids: 0, node, meta, (and root counted as valid node) */
3208 nm_i
->available_nids
= nm_i
->max_nid
- sbi
->total_valid_node_count
-
3209 F2FS_RESERVED_NODE_NUM
;
3210 nm_i
->nid_cnt
[FREE_NID
] = 0;
3211 nm_i
->nid_cnt
[PREALLOC_NID
] = 0;
3212 nm_i
->ram_thresh
= DEF_RAM_THRESHOLD
;
3213 nm_i
->ra_nid_pages
= DEF_RA_NID_PAGES
;
3214 nm_i
->dirty_nats_ratio
= DEF_DIRTY_NAT_RATIO_THRESHOLD
;
3216 INIT_RADIX_TREE(&nm_i
->free_nid_root
, GFP_ATOMIC
);
3217 INIT_LIST_HEAD(&nm_i
->free_nid_list
);
3218 INIT_RADIX_TREE(&nm_i
->nat_root
, GFP_NOIO
);
3219 INIT_RADIX_TREE(&nm_i
->nat_set_root
, GFP_NOIO
);
3220 INIT_LIST_HEAD(&nm_i
->nat_entries
);
3221 spin_lock_init(&nm_i
->nat_list_lock
);
3223 mutex_init(&nm_i
->build_lock
);
3224 spin_lock_init(&nm_i
->nid_list_lock
);
3225 init_rwsem(&nm_i
->nat_tree_lock
);
3227 nm_i
->next_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
3228 nm_i
->bitmap_size
= __bitmap_size(sbi
, NAT_BITMAP
);
3229 version_bitmap
= __bitmap_ptr(sbi
, NAT_BITMAP
);
3230 nm_i
->nat_bitmap
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
3232 if (!nm_i
->nat_bitmap
)
3235 err
= __get_nat_bitmaps(sbi
);
3239 #ifdef CONFIG_F2FS_CHECK_FS
3240 nm_i
->nat_bitmap_mir
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
3242 if (!nm_i
->nat_bitmap_mir
)
3249 static int init_free_nid_cache(struct f2fs_sb_info
*sbi
)
3251 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3254 nm_i
->free_nid_bitmap
=
3255 f2fs_kvzalloc(sbi
, array_size(sizeof(unsigned char *),
3258 if (!nm_i
->free_nid_bitmap
)
3261 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
3262 nm_i
->free_nid_bitmap
[i
] = f2fs_kvzalloc(sbi
,
3263 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK
), GFP_KERNEL
);
3264 if (!nm_i
->free_nid_bitmap
[i
])
3268 nm_i
->nat_block_bitmap
= f2fs_kvzalloc(sbi
, nm_i
->nat_blocks
/ 8,
3270 if (!nm_i
->nat_block_bitmap
)
3273 nm_i
->free_nid_count
=
3274 f2fs_kvzalloc(sbi
, array_size(sizeof(unsigned short),
3277 if (!nm_i
->free_nid_count
)
3282 int f2fs_build_node_manager(struct f2fs_sb_info
*sbi
)
3286 sbi
->nm_info
= f2fs_kzalloc(sbi
, sizeof(struct f2fs_nm_info
),
3291 err
= init_node_manager(sbi
);
3295 err
= init_free_nid_cache(sbi
);
3299 /* load free nid status from nat_bits table */
3300 load_free_nid_bitmap(sbi
);
3302 return f2fs_build_free_nids(sbi
, true, true);
3305 void f2fs_destroy_node_manager(struct f2fs_sb_info
*sbi
)
3307 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
3308 struct free_nid
*i
, *next_i
;
3309 struct nat_entry
*natvec
[NATVEC_SIZE
];
3310 struct nat_entry_set
*setvec
[SETVEC_SIZE
];
3317 /* destroy free nid list */
3318 spin_lock(&nm_i
->nid_list_lock
);
3319 list_for_each_entry_safe(i
, next_i
, &nm_i
->free_nid_list
, list
) {
3320 __remove_free_nid(sbi
, i
, FREE_NID
);
3321 spin_unlock(&nm_i
->nid_list_lock
);
3322 kmem_cache_free(free_nid_slab
, i
);
3323 spin_lock(&nm_i
->nid_list_lock
);
3325 f2fs_bug_on(sbi
, nm_i
->nid_cnt
[FREE_NID
]);
3326 f2fs_bug_on(sbi
, nm_i
->nid_cnt
[PREALLOC_NID
]);
3327 f2fs_bug_on(sbi
, !list_empty(&nm_i
->free_nid_list
));
3328 spin_unlock(&nm_i
->nid_list_lock
);
3330 /* destroy nat cache */
3331 down_write(&nm_i
->nat_tree_lock
);
3332 while ((found
= __gang_lookup_nat_cache(nm_i
,
3333 nid
, NATVEC_SIZE
, natvec
))) {
3336 nid
= nat_get_nid(natvec
[found
- 1]) + 1;
3337 for (idx
= 0; idx
< found
; idx
++) {
3338 spin_lock(&nm_i
->nat_list_lock
);
3339 list_del(&natvec
[idx
]->list
);
3340 spin_unlock(&nm_i
->nat_list_lock
);
3342 __del_from_nat_cache(nm_i
, natvec
[idx
]);
3345 f2fs_bug_on(sbi
, nm_i
->nat_cnt
[TOTAL_NAT
]);
3347 /* destroy nat set cache */
3349 while ((found
= __gang_lookup_nat_set(nm_i
,
3350 nid
, SETVEC_SIZE
, setvec
))) {
3353 nid
= setvec
[found
- 1]->set
+ 1;
3354 for (idx
= 0; idx
< found
; idx
++) {
3355 /* entry_cnt is not zero, when cp_error was occurred */
3356 f2fs_bug_on(sbi
, !list_empty(&setvec
[idx
]->entry_list
));
3357 radix_tree_delete(&nm_i
->nat_set_root
, setvec
[idx
]->set
);
3358 kmem_cache_free(nat_entry_set_slab
, setvec
[idx
]);
3361 up_write(&nm_i
->nat_tree_lock
);
3363 kvfree(nm_i
->nat_block_bitmap
);
3364 if (nm_i
->free_nid_bitmap
) {
3367 for (i
= 0; i
< nm_i
->nat_blocks
; i
++)
3368 kvfree(nm_i
->free_nid_bitmap
[i
]);
3369 kvfree(nm_i
->free_nid_bitmap
);
3371 kvfree(nm_i
->free_nid_count
);
3373 kvfree(nm_i
->nat_bitmap
);
3374 kvfree(nm_i
->nat_bits
);
3375 #ifdef CONFIG_F2FS_CHECK_FS
3376 kvfree(nm_i
->nat_bitmap_mir
);
3378 sbi
->nm_info
= NULL
;
3382 int __init
f2fs_create_node_manager_caches(void)
3384 nat_entry_slab
= f2fs_kmem_cache_create("f2fs_nat_entry",
3385 sizeof(struct nat_entry
));
3386 if (!nat_entry_slab
)
3389 free_nid_slab
= f2fs_kmem_cache_create("f2fs_free_nid",
3390 sizeof(struct free_nid
));
3392 goto destroy_nat_entry
;
3394 nat_entry_set_slab
= f2fs_kmem_cache_create("f2fs_nat_entry_set",
3395 sizeof(struct nat_entry_set
));
3396 if (!nat_entry_set_slab
)
3397 goto destroy_free_nid
;
3399 fsync_node_entry_slab
= f2fs_kmem_cache_create("f2fs_fsync_node_entry",
3400 sizeof(struct fsync_node_entry
));
3401 if (!fsync_node_entry_slab
)
3402 goto destroy_nat_entry_set
;
3405 destroy_nat_entry_set
:
3406 kmem_cache_destroy(nat_entry_set_slab
);
3408 kmem_cache_destroy(free_nid_slab
);
3410 kmem_cache_destroy(nat_entry_slab
);
3415 void f2fs_destroy_node_manager_caches(void)
3417 kmem_cache_destroy(fsync_node_entry_slab
);
3418 kmem_cache_destroy(nat_entry_set_slab
);
3419 kmem_cache_destroy(free_nid_slab
);
3420 kmem_cache_destroy(nat_entry_slab
);