4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
23 #include <trace/events/f2fs.h>
25 #define on_build_free_nids(nmi) mutex_is_locked(&(nm_i)->build_lock)
27 static struct kmem_cache
*nat_entry_slab
;
28 static struct kmem_cache
*free_nid_slab
;
29 static struct kmem_cache
*nat_entry_set_slab
;
31 bool available_free_memory(struct f2fs_sb_info
*sbi
, int type
)
33 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
35 unsigned long avail_ram
;
36 unsigned long mem_size
= 0;
41 /* only uses low memory */
42 avail_ram
= val
.totalram
- val
.totalhigh
;
45 * give 25%, 25%, 50%, 50%, 50% memory for each components respectively
47 if (type
== FREE_NIDS
) {
48 mem_size
= (nm_i
->nid_cnt
[FREE_NID_LIST
] *
49 sizeof(struct free_nid
)) >> PAGE_SHIFT
;
50 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 2);
51 } else if (type
== NAT_ENTRIES
) {
52 mem_size
= (nm_i
->nat_cnt
* sizeof(struct nat_entry
)) >>
54 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 2);
55 if (excess_cached_nats(sbi
))
57 } else if (type
== DIRTY_DENTS
) {
58 if (sbi
->sb
->s_bdi
->wb
.dirty_exceeded
)
60 mem_size
= get_pages(sbi
, F2FS_DIRTY_DENTS
);
61 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
62 } else if (type
== INO_ENTRIES
) {
65 for (i
= 0; i
<= UPDATE_INO
; i
++)
66 mem_size
+= sbi
->im
[i
].ino_num
*
67 sizeof(struct ino_entry
);
68 mem_size
>>= PAGE_SHIFT
;
69 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
70 } else if (type
== EXTENT_CACHE
) {
71 mem_size
= (atomic_read(&sbi
->total_ext_tree
) *
72 sizeof(struct extent_tree
) +
73 atomic_read(&sbi
->total_ext_node
) *
74 sizeof(struct extent_node
)) >> PAGE_SHIFT
;
75 res
= mem_size
< ((avail_ram
* nm_i
->ram_thresh
/ 100) >> 1);
77 if (!sbi
->sb
->s_bdi
->wb
.dirty_exceeded
)
83 static void clear_node_page_dirty(struct page
*page
)
85 struct address_space
*mapping
= page
->mapping
;
86 unsigned int long flags
;
88 if (PageDirty(page
)) {
89 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
90 radix_tree_tag_clear(&mapping
->page_tree
,
93 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
95 clear_page_dirty_for_io(page
);
96 dec_page_count(F2FS_M_SB(mapping
), F2FS_DIRTY_NODES
);
98 ClearPageUptodate(page
);
101 static struct page
*get_current_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
103 pgoff_t index
= current_nat_addr(sbi
, nid
);
104 return get_meta_page(sbi
, index
);
107 static struct page
*get_next_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
109 struct page
*src_page
;
110 struct page
*dst_page
;
115 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
117 src_off
= current_nat_addr(sbi
, nid
);
118 dst_off
= next_nat_addr(sbi
, src_off
);
120 /* get current nat block page with lock */
121 src_page
= get_meta_page(sbi
, src_off
);
122 dst_page
= grab_meta_page(sbi
, dst_off
);
123 f2fs_bug_on(sbi
, PageDirty(src_page
));
125 src_addr
= page_address(src_page
);
126 dst_addr
= page_address(dst_page
);
127 memcpy(dst_addr
, src_addr
, PAGE_SIZE
);
128 set_page_dirty(dst_page
);
129 f2fs_put_page(src_page
, 1);
131 set_to_next_nat(nm_i
, nid
);
136 static struct nat_entry
*__lookup_nat_cache(struct f2fs_nm_info
*nm_i
, nid_t n
)
138 return radix_tree_lookup(&nm_i
->nat_root
, n
);
141 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info
*nm_i
,
142 nid_t start
, unsigned int nr
, struct nat_entry
**ep
)
144 return radix_tree_gang_lookup(&nm_i
->nat_root
, (void **)ep
, start
, nr
);
147 static void __del_from_nat_cache(struct f2fs_nm_info
*nm_i
, struct nat_entry
*e
)
150 radix_tree_delete(&nm_i
->nat_root
, nat_get_nid(e
));
152 kmem_cache_free(nat_entry_slab
, e
);
155 static void __set_nat_cache_dirty(struct f2fs_nm_info
*nm_i
,
156 struct nat_entry
*ne
)
158 nid_t set
= NAT_BLOCK_OFFSET(ne
->ni
.nid
);
159 struct nat_entry_set
*head
;
161 head
= radix_tree_lookup(&nm_i
->nat_set_root
, set
);
163 head
= f2fs_kmem_cache_alloc(nat_entry_set_slab
, GFP_NOFS
);
165 INIT_LIST_HEAD(&head
->entry_list
);
166 INIT_LIST_HEAD(&head
->set_list
);
169 f2fs_radix_tree_insert(&nm_i
->nat_set_root
, set
, head
);
172 if (get_nat_flag(ne
, IS_DIRTY
))
175 nm_i
->dirty_nat_cnt
++;
177 set_nat_flag(ne
, IS_DIRTY
, true);
179 if (nat_get_blkaddr(ne
) == NEW_ADDR
)
180 list_del_init(&ne
->list
);
182 list_move_tail(&ne
->list
, &head
->entry_list
);
185 static void __clear_nat_cache_dirty(struct f2fs_nm_info
*nm_i
,
186 struct nat_entry_set
*set
, struct nat_entry
*ne
)
188 list_move_tail(&ne
->list
, &nm_i
->nat_entries
);
189 set_nat_flag(ne
, IS_DIRTY
, false);
191 nm_i
->dirty_nat_cnt
--;
194 static unsigned int __gang_lookup_nat_set(struct f2fs_nm_info
*nm_i
,
195 nid_t start
, unsigned int nr
, struct nat_entry_set
**ep
)
197 return radix_tree_gang_lookup(&nm_i
->nat_set_root
, (void **)ep
,
201 int need_dentry_mark(struct f2fs_sb_info
*sbi
, nid_t nid
)
203 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
207 down_read(&nm_i
->nat_tree_lock
);
208 e
= __lookup_nat_cache(nm_i
, nid
);
210 if (!get_nat_flag(e
, IS_CHECKPOINTED
) &&
211 !get_nat_flag(e
, HAS_FSYNCED_INODE
))
214 up_read(&nm_i
->nat_tree_lock
);
218 bool is_checkpointed_node(struct f2fs_sb_info
*sbi
, nid_t nid
)
220 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
224 down_read(&nm_i
->nat_tree_lock
);
225 e
= __lookup_nat_cache(nm_i
, nid
);
226 if (e
&& !get_nat_flag(e
, IS_CHECKPOINTED
))
228 up_read(&nm_i
->nat_tree_lock
);
232 bool need_inode_block_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
234 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
236 bool need_update
= true;
238 down_read(&nm_i
->nat_tree_lock
);
239 e
= __lookup_nat_cache(nm_i
, ino
);
240 if (e
&& get_nat_flag(e
, HAS_LAST_FSYNC
) &&
241 (get_nat_flag(e
, IS_CHECKPOINTED
) ||
242 get_nat_flag(e
, HAS_FSYNCED_INODE
)))
244 up_read(&nm_i
->nat_tree_lock
);
248 static struct nat_entry
*grab_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
,
251 struct nat_entry
*new;
254 new = f2fs_kmem_cache_alloc(nat_entry_slab
, GFP_NOFS
);
255 f2fs_radix_tree_insert(&nm_i
->nat_root
, nid
, new);
257 new = kmem_cache_alloc(nat_entry_slab
, GFP_NOFS
);
260 if (radix_tree_insert(&nm_i
->nat_root
, nid
, new)) {
261 kmem_cache_free(nat_entry_slab
, new);
266 memset(new, 0, sizeof(struct nat_entry
));
267 nat_set_nid(new, nid
);
269 list_add_tail(&new->list
, &nm_i
->nat_entries
);
274 static void cache_nat_entry(struct f2fs_sb_info
*sbi
, nid_t nid
,
275 struct f2fs_nat_entry
*ne
)
277 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
280 e
= __lookup_nat_cache(nm_i
, nid
);
282 e
= grab_nat_entry(nm_i
, nid
, false);
284 node_info_from_raw_nat(&e
->ni
, ne
);
286 f2fs_bug_on(sbi
, nat_get_ino(e
) != le32_to_cpu(ne
->ino
) ||
287 nat_get_blkaddr(e
) !=
288 le32_to_cpu(ne
->block_addr
) ||
289 nat_get_version(e
) != ne
->version
);
293 static void set_node_addr(struct f2fs_sb_info
*sbi
, struct node_info
*ni
,
294 block_t new_blkaddr
, bool fsync_done
)
296 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
299 down_write(&nm_i
->nat_tree_lock
);
300 e
= __lookup_nat_cache(nm_i
, ni
->nid
);
302 e
= grab_nat_entry(nm_i
, ni
->nid
, true);
303 copy_node_info(&e
->ni
, ni
);
304 f2fs_bug_on(sbi
, ni
->blk_addr
== NEW_ADDR
);
305 } else if (new_blkaddr
== NEW_ADDR
) {
307 * when nid is reallocated,
308 * previous nat entry can be remained in nat cache.
309 * So, reinitialize it with new information.
311 copy_node_info(&e
->ni
, ni
);
312 f2fs_bug_on(sbi
, ni
->blk_addr
!= NULL_ADDR
);
316 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) != ni
->blk_addr
);
317 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) == NULL_ADDR
&&
318 new_blkaddr
== NULL_ADDR
);
319 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) == NEW_ADDR
&&
320 new_blkaddr
== NEW_ADDR
);
321 f2fs_bug_on(sbi
, nat_get_blkaddr(e
) != NEW_ADDR
&&
322 nat_get_blkaddr(e
) != NULL_ADDR
&&
323 new_blkaddr
== NEW_ADDR
);
325 /* increment version no as node is removed */
326 if (nat_get_blkaddr(e
) != NEW_ADDR
&& new_blkaddr
== NULL_ADDR
) {
327 unsigned char version
= nat_get_version(e
);
328 nat_set_version(e
, inc_node_version(version
));
330 /* in order to reuse the nid */
331 if (nm_i
->next_scan_nid
> ni
->nid
)
332 nm_i
->next_scan_nid
= ni
->nid
;
336 nat_set_blkaddr(e
, new_blkaddr
);
337 if (new_blkaddr
== NEW_ADDR
|| new_blkaddr
== NULL_ADDR
)
338 set_nat_flag(e
, IS_CHECKPOINTED
, false);
339 __set_nat_cache_dirty(nm_i
, e
);
341 /* update fsync_mark if its inode nat entry is still alive */
342 if (ni
->nid
!= ni
->ino
)
343 e
= __lookup_nat_cache(nm_i
, ni
->ino
);
345 if (fsync_done
&& ni
->nid
== ni
->ino
)
346 set_nat_flag(e
, HAS_FSYNCED_INODE
, true);
347 set_nat_flag(e
, HAS_LAST_FSYNC
, fsync_done
);
349 up_write(&nm_i
->nat_tree_lock
);
352 int try_to_free_nats(struct f2fs_sb_info
*sbi
, int nr_shrink
)
354 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
357 if (!down_write_trylock(&nm_i
->nat_tree_lock
))
360 while (nr_shrink
&& !list_empty(&nm_i
->nat_entries
)) {
361 struct nat_entry
*ne
;
362 ne
= list_first_entry(&nm_i
->nat_entries
,
363 struct nat_entry
, list
);
364 __del_from_nat_cache(nm_i
, ne
);
367 up_write(&nm_i
->nat_tree_lock
);
368 return nr
- nr_shrink
;
372 * This function always returns success
374 void get_node_info(struct f2fs_sb_info
*sbi
, nid_t nid
, struct node_info
*ni
)
376 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
377 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
378 struct f2fs_journal
*journal
= curseg
->journal
;
379 nid_t start_nid
= START_NID(nid
);
380 struct f2fs_nat_block
*nat_blk
;
381 struct page
*page
= NULL
;
382 struct f2fs_nat_entry ne
;
389 /* Check nat cache */
390 down_read(&nm_i
->nat_tree_lock
);
391 e
= __lookup_nat_cache(nm_i
, nid
);
393 ni
->ino
= nat_get_ino(e
);
394 ni
->blk_addr
= nat_get_blkaddr(e
);
395 ni
->version
= nat_get_version(e
);
396 up_read(&nm_i
->nat_tree_lock
);
400 memset(&ne
, 0, sizeof(struct f2fs_nat_entry
));
402 /* Check current segment summary */
403 down_read(&curseg
->journal_rwsem
);
404 i
= lookup_journal_in_cursum(journal
, NAT_JOURNAL
, nid
, 0);
406 ne
= nat_in_journal(journal
, i
);
407 node_info_from_raw_nat(ni
, &ne
);
409 up_read(&curseg
->journal_rwsem
);
411 up_read(&nm_i
->nat_tree_lock
);
415 /* Fill node_info from nat page */
416 index
= current_nat_addr(sbi
, nid
);
417 up_read(&nm_i
->nat_tree_lock
);
419 page
= get_meta_page(sbi
, index
);
420 nat_blk
= (struct f2fs_nat_block
*)page_address(page
);
421 ne
= nat_blk
->entries
[nid
- start_nid
];
422 node_info_from_raw_nat(ni
, &ne
);
423 f2fs_put_page(page
, 1);
425 /* cache nat entry */
426 down_write(&nm_i
->nat_tree_lock
);
427 cache_nat_entry(sbi
, nid
, &ne
);
428 up_write(&nm_i
->nat_tree_lock
);
432 * readahead MAX_RA_NODE number of node pages.
434 static void ra_node_pages(struct page
*parent
, int start
, int n
)
436 struct f2fs_sb_info
*sbi
= F2FS_P_SB(parent
);
437 struct blk_plug plug
;
441 blk_start_plug(&plug
);
443 /* Then, try readahead for siblings of the desired node */
445 end
= min(end
, NIDS_PER_BLOCK
);
446 for (i
= start
; i
< end
; i
++) {
447 nid
= get_nid(parent
, i
, false);
448 ra_node_page(sbi
, nid
);
451 blk_finish_plug(&plug
);
454 pgoff_t
get_next_page_offset(struct dnode_of_data
*dn
, pgoff_t pgofs
)
456 const long direct_index
= ADDRS_PER_INODE(dn
->inode
);
457 const long direct_blks
= ADDRS_PER_BLOCK
;
458 const long indirect_blks
= ADDRS_PER_BLOCK
* NIDS_PER_BLOCK
;
459 unsigned int skipped_unit
= ADDRS_PER_BLOCK
;
460 int cur_level
= dn
->cur_level
;
461 int max_level
= dn
->max_level
;
467 while (max_level
-- > cur_level
)
468 skipped_unit
*= NIDS_PER_BLOCK
;
470 switch (dn
->max_level
) {
472 base
+= 2 * indirect_blks
;
474 base
+= 2 * direct_blks
;
476 base
+= direct_index
;
479 f2fs_bug_on(F2FS_I_SB(dn
->inode
), 1);
482 return ((pgofs
- base
) / skipped_unit
+ 1) * skipped_unit
+ base
;
486 * The maximum depth is four.
487 * Offset[0] will have raw inode offset.
489 static int get_node_path(struct inode
*inode
, long block
,
490 int offset
[4], unsigned int noffset
[4])
492 const long direct_index
= ADDRS_PER_INODE(inode
);
493 const long direct_blks
= ADDRS_PER_BLOCK
;
494 const long dptrs_per_blk
= NIDS_PER_BLOCK
;
495 const long indirect_blks
= ADDRS_PER_BLOCK
* NIDS_PER_BLOCK
;
496 const long dindirect_blks
= indirect_blks
* NIDS_PER_BLOCK
;
502 if (block
< direct_index
) {
506 block
-= direct_index
;
507 if (block
< direct_blks
) {
508 offset
[n
++] = NODE_DIR1_BLOCK
;
514 block
-= direct_blks
;
515 if (block
< direct_blks
) {
516 offset
[n
++] = NODE_DIR2_BLOCK
;
522 block
-= direct_blks
;
523 if (block
< indirect_blks
) {
524 offset
[n
++] = NODE_IND1_BLOCK
;
526 offset
[n
++] = block
/ direct_blks
;
527 noffset
[n
] = 4 + offset
[n
- 1];
528 offset
[n
] = block
% direct_blks
;
532 block
-= indirect_blks
;
533 if (block
< indirect_blks
) {
534 offset
[n
++] = NODE_IND2_BLOCK
;
535 noffset
[n
] = 4 + dptrs_per_blk
;
536 offset
[n
++] = block
/ direct_blks
;
537 noffset
[n
] = 5 + dptrs_per_blk
+ offset
[n
- 1];
538 offset
[n
] = block
% direct_blks
;
542 block
-= indirect_blks
;
543 if (block
< dindirect_blks
) {
544 offset
[n
++] = NODE_DIND_BLOCK
;
545 noffset
[n
] = 5 + (dptrs_per_blk
* 2);
546 offset
[n
++] = block
/ indirect_blks
;
547 noffset
[n
] = 6 + (dptrs_per_blk
* 2) +
548 offset
[n
- 1] * (dptrs_per_blk
+ 1);
549 offset
[n
++] = (block
/ direct_blks
) % dptrs_per_blk
;
550 noffset
[n
] = 7 + (dptrs_per_blk
* 2) +
551 offset
[n
- 2] * (dptrs_per_blk
+ 1) +
553 offset
[n
] = block
% direct_blks
;
564 * Caller should call f2fs_put_dnode(dn).
565 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
566 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
567 * In the case of RDONLY_NODE, we don't need to care about mutex.
569 int get_dnode_of_data(struct dnode_of_data
*dn
, pgoff_t index
, int mode
)
571 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
572 struct page
*npage
[4];
573 struct page
*parent
= NULL
;
575 unsigned int noffset
[4];
580 level
= get_node_path(dn
->inode
, index
, offset
, noffset
);
582 nids
[0] = dn
->inode
->i_ino
;
583 npage
[0] = dn
->inode_page
;
586 npage
[0] = get_node_page(sbi
, nids
[0]);
587 if (IS_ERR(npage
[0]))
588 return PTR_ERR(npage
[0]);
591 /* if inline_data is set, should not report any block indices */
592 if (f2fs_has_inline_data(dn
->inode
) && index
) {
594 f2fs_put_page(npage
[0], 1);
600 nids
[1] = get_nid(parent
, offset
[0], true);
601 dn
->inode_page
= npage
[0];
602 dn
->inode_page_locked
= true;
604 /* get indirect or direct nodes */
605 for (i
= 1; i
<= level
; i
++) {
608 if (!nids
[i
] && mode
== ALLOC_NODE
) {
610 if (!alloc_nid(sbi
, &(nids
[i
]))) {
616 npage
[i
] = new_node_page(dn
, noffset
[i
], NULL
);
617 if (IS_ERR(npage
[i
])) {
618 alloc_nid_failed(sbi
, nids
[i
]);
619 err
= PTR_ERR(npage
[i
]);
623 set_nid(parent
, offset
[i
- 1], nids
[i
], i
== 1);
624 alloc_nid_done(sbi
, nids
[i
]);
626 } else if (mode
== LOOKUP_NODE_RA
&& i
== level
&& level
> 1) {
627 npage
[i
] = get_node_page_ra(parent
, offset
[i
- 1]);
628 if (IS_ERR(npage
[i
])) {
629 err
= PTR_ERR(npage
[i
]);
635 dn
->inode_page_locked
= false;
638 f2fs_put_page(parent
, 1);
642 npage
[i
] = get_node_page(sbi
, nids
[i
]);
643 if (IS_ERR(npage
[i
])) {
644 err
= PTR_ERR(npage
[i
]);
645 f2fs_put_page(npage
[0], 0);
651 nids
[i
+ 1] = get_nid(parent
, offset
[i
], false);
654 dn
->nid
= nids
[level
];
655 dn
->ofs_in_node
= offset
[level
];
656 dn
->node_page
= npage
[level
];
657 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
661 f2fs_put_page(parent
, 1);
663 f2fs_put_page(npage
[0], 0);
665 dn
->inode_page
= NULL
;
666 dn
->node_page
= NULL
;
667 if (err
== -ENOENT
) {
669 dn
->max_level
= level
;
670 dn
->ofs_in_node
= offset
[level
];
675 static void truncate_node(struct dnode_of_data
*dn
)
677 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
680 get_node_info(sbi
, dn
->nid
, &ni
);
681 if (dn
->inode
->i_blocks
== 0) {
682 f2fs_bug_on(sbi
, ni
.blk_addr
!= NULL_ADDR
);
685 f2fs_bug_on(sbi
, ni
.blk_addr
== NULL_ADDR
);
687 /* Deallocate node address */
688 invalidate_blocks(sbi
, ni
.blk_addr
);
689 dec_valid_node_count(sbi
, dn
->inode
);
690 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
692 if (dn
->nid
== dn
->inode
->i_ino
) {
693 remove_orphan_inode(sbi
, dn
->nid
);
694 dec_valid_inode_count(sbi
);
695 f2fs_inode_synced(dn
->inode
);
698 clear_node_page_dirty(dn
->node_page
);
699 set_sbi_flag(sbi
, SBI_IS_DIRTY
);
701 f2fs_put_page(dn
->node_page
, 1);
703 invalidate_mapping_pages(NODE_MAPPING(sbi
),
704 dn
->node_page
->index
, dn
->node_page
->index
);
706 dn
->node_page
= NULL
;
707 trace_f2fs_truncate_node(dn
->inode
, dn
->nid
, ni
.blk_addr
);
710 static int truncate_dnode(struct dnode_of_data
*dn
)
717 /* get direct node */
718 page
= get_node_page(F2FS_I_SB(dn
->inode
), dn
->nid
);
719 if (IS_ERR(page
) && PTR_ERR(page
) == -ENOENT
)
721 else if (IS_ERR(page
))
722 return PTR_ERR(page
);
724 /* Make dnode_of_data for parameter */
725 dn
->node_page
= page
;
727 truncate_data_blocks(dn
);
732 static int truncate_nodes(struct dnode_of_data
*dn
, unsigned int nofs
,
735 struct dnode_of_data rdn
= *dn
;
737 struct f2fs_node
*rn
;
739 unsigned int child_nofs
;
744 return NIDS_PER_BLOCK
+ 1;
746 trace_f2fs_truncate_nodes_enter(dn
->inode
, dn
->nid
, dn
->data_blkaddr
);
748 page
= get_node_page(F2FS_I_SB(dn
->inode
), dn
->nid
);
750 trace_f2fs_truncate_nodes_exit(dn
->inode
, PTR_ERR(page
));
751 return PTR_ERR(page
);
754 ra_node_pages(page
, ofs
, NIDS_PER_BLOCK
);
756 rn
= F2FS_NODE(page
);
758 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++, freed
++) {
759 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
763 ret
= truncate_dnode(&rdn
);
766 if (set_nid(page
, i
, 0, false))
767 dn
->node_changed
= true;
770 child_nofs
= nofs
+ ofs
* (NIDS_PER_BLOCK
+ 1) + 1;
771 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++) {
772 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
773 if (child_nid
== 0) {
774 child_nofs
+= NIDS_PER_BLOCK
+ 1;
778 ret
= truncate_nodes(&rdn
, child_nofs
, 0, depth
- 1);
779 if (ret
== (NIDS_PER_BLOCK
+ 1)) {
780 if (set_nid(page
, i
, 0, false))
781 dn
->node_changed
= true;
783 } else if (ret
< 0 && ret
!= -ENOENT
) {
791 /* remove current indirect node */
792 dn
->node_page
= page
;
796 f2fs_put_page(page
, 1);
798 trace_f2fs_truncate_nodes_exit(dn
->inode
, freed
);
802 f2fs_put_page(page
, 1);
803 trace_f2fs_truncate_nodes_exit(dn
->inode
, ret
);
807 static int truncate_partial_nodes(struct dnode_of_data
*dn
,
808 struct f2fs_inode
*ri
, int *offset
, int depth
)
810 struct page
*pages
[2];
817 nid
[0] = le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
821 /* get indirect nodes in the path */
822 for (i
= 0; i
< idx
+ 1; i
++) {
823 /* reference count'll be increased */
824 pages
[i
] = get_node_page(F2FS_I_SB(dn
->inode
), nid
[i
]);
825 if (IS_ERR(pages
[i
])) {
826 err
= PTR_ERR(pages
[i
]);
830 nid
[i
+ 1] = get_nid(pages
[i
], offset
[i
+ 1], false);
833 ra_node_pages(pages
[idx
], offset
[idx
+ 1], NIDS_PER_BLOCK
);
835 /* free direct nodes linked to a partial indirect node */
836 for (i
= offset
[idx
+ 1]; i
< NIDS_PER_BLOCK
; i
++) {
837 child_nid
= get_nid(pages
[idx
], i
, false);
841 err
= truncate_dnode(dn
);
844 if (set_nid(pages
[idx
], i
, 0, false))
845 dn
->node_changed
= true;
848 if (offset
[idx
+ 1] == 0) {
849 dn
->node_page
= pages
[idx
];
853 f2fs_put_page(pages
[idx
], 1);
859 for (i
= idx
; i
>= 0; i
--)
860 f2fs_put_page(pages
[i
], 1);
862 trace_f2fs_truncate_partial_nodes(dn
->inode
, nid
, depth
, err
);
868 * All the block addresses of data and nodes should be nullified.
870 int truncate_inode_blocks(struct inode
*inode
, pgoff_t from
)
872 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
873 int err
= 0, cont
= 1;
874 int level
, offset
[4], noffset
[4];
875 unsigned int nofs
= 0;
876 struct f2fs_inode
*ri
;
877 struct dnode_of_data dn
;
880 trace_f2fs_truncate_inode_blocks_enter(inode
, from
);
882 level
= get_node_path(inode
, from
, offset
, noffset
);
884 page
= get_node_page(sbi
, inode
->i_ino
);
886 trace_f2fs_truncate_inode_blocks_exit(inode
, PTR_ERR(page
));
887 return PTR_ERR(page
);
890 set_new_dnode(&dn
, inode
, page
, NULL
, 0);
893 ri
= F2FS_INODE(page
);
901 if (!offset
[level
- 1])
903 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
904 if (err
< 0 && err
!= -ENOENT
)
906 nofs
+= 1 + NIDS_PER_BLOCK
;
909 nofs
= 5 + 2 * NIDS_PER_BLOCK
;
910 if (!offset
[level
- 1])
912 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
913 if (err
< 0 && err
!= -ENOENT
)
922 dn
.nid
= le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
924 case NODE_DIR1_BLOCK
:
925 case NODE_DIR2_BLOCK
:
926 err
= truncate_dnode(&dn
);
929 case NODE_IND1_BLOCK
:
930 case NODE_IND2_BLOCK
:
931 err
= truncate_nodes(&dn
, nofs
, offset
[1], 2);
934 case NODE_DIND_BLOCK
:
935 err
= truncate_nodes(&dn
, nofs
, offset
[1], 3);
942 if (err
< 0 && err
!= -ENOENT
)
944 if (offset
[1] == 0 &&
945 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]) {
947 BUG_ON(page
->mapping
!= NODE_MAPPING(sbi
));
948 f2fs_wait_on_page_writeback(page
, NODE
, true);
949 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
] = 0;
950 set_page_dirty(page
);
958 f2fs_put_page(page
, 0);
959 trace_f2fs_truncate_inode_blocks_exit(inode
, err
);
960 return err
> 0 ? 0 : err
;
963 int truncate_xattr_node(struct inode
*inode
, struct page
*page
)
965 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
966 nid_t nid
= F2FS_I(inode
)->i_xattr_nid
;
967 struct dnode_of_data dn
;
973 npage
= get_node_page(sbi
, nid
);
975 return PTR_ERR(npage
);
977 f2fs_i_xnid_write(inode
, 0);
979 set_new_dnode(&dn
, inode
, page
, npage
, nid
);
982 dn
.inode_page_locked
= true;
988 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
991 int remove_inode_page(struct inode
*inode
)
993 struct dnode_of_data dn
;
996 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
997 err
= get_dnode_of_data(&dn
, 0, LOOKUP_NODE
);
1001 err
= truncate_xattr_node(inode
, dn
.inode_page
);
1003 f2fs_put_dnode(&dn
);
1007 /* remove potential inline_data blocks */
1008 if (S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
1009 S_ISLNK(inode
->i_mode
))
1010 truncate_data_blocks_range(&dn
, 1);
1012 /* 0 is possible, after f2fs_new_inode() has failed */
1013 f2fs_bug_on(F2FS_I_SB(inode
),
1014 inode
->i_blocks
!= 0 && inode
->i_blocks
!= 8);
1016 /* will put inode & node pages */
1021 struct page
*new_inode_page(struct inode
*inode
)
1023 struct dnode_of_data dn
;
1025 /* allocate inode page for new inode */
1026 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
1028 /* caller should f2fs_put_page(page, 1); */
1029 return new_node_page(&dn
, 0, NULL
);
1032 struct page
*new_node_page(struct dnode_of_data
*dn
,
1033 unsigned int ofs
, struct page
*ipage
)
1035 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1036 struct node_info new_ni
;
1040 if (unlikely(is_inode_flag_set(dn
->inode
, FI_NO_ALLOC
)))
1041 return ERR_PTR(-EPERM
);
1043 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), dn
->nid
, false);
1045 return ERR_PTR(-ENOMEM
);
1047 if (unlikely(!inc_valid_node_count(sbi
, dn
->inode
))) {
1051 #ifdef CONFIG_F2FS_CHECK_FS
1052 get_node_info(sbi
, dn
->nid
, &new_ni
);
1053 f2fs_bug_on(sbi
, new_ni
.blk_addr
!= NULL_ADDR
);
1055 new_ni
.nid
= dn
->nid
;
1056 new_ni
.ino
= dn
->inode
->i_ino
;
1057 new_ni
.blk_addr
= NULL_ADDR
;
1060 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
1062 f2fs_wait_on_page_writeback(page
, NODE
, true);
1063 fill_node_footer(page
, dn
->nid
, dn
->inode
->i_ino
, ofs
, true);
1064 set_cold_node(dn
->inode
, page
);
1065 if (!PageUptodate(page
))
1066 SetPageUptodate(page
);
1067 if (set_page_dirty(page
))
1068 dn
->node_changed
= true;
1070 if (f2fs_has_xattr_block(ofs
))
1071 f2fs_i_xnid_write(dn
->inode
, dn
->nid
);
1074 inc_valid_inode_count(sbi
);
1078 clear_node_page_dirty(page
);
1079 f2fs_put_page(page
, 1);
1080 return ERR_PTR(err
);
1084 * Caller should do after getting the following values.
1085 * 0: f2fs_put_page(page, 0)
1086 * LOCKED_PAGE or error: f2fs_put_page(page, 1)
1088 static int read_node_page(struct page
*page
, int op_flags
)
1090 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1091 struct node_info ni
;
1092 struct f2fs_io_info fio
= {
1096 .op_flags
= op_flags
,
1098 .encrypted_page
= NULL
,
1101 if (PageUptodate(page
))
1104 get_node_info(sbi
, page
->index
, &ni
);
1106 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
1107 ClearPageUptodate(page
);
1111 fio
.new_blkaddr
= fio
.old_blkaddr
= ni
.blk_addr
;
1112 return f2fs_submit_page_bio(&fio
);
1116 * Readahead a node page
1118 void ra_node_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
1125 f2fs_bug_on(sbi
, check_nid_range(sbi
, nid
));
1128 apage
= radix_tree_lookup(&NODE_MAPPING(sbi
)->page_tree
, nid
);
1133 apage
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), nid
, false);
1137 err
= read_node_page(apage
, REQ_RAHEAD
);
1138 f2fs_put_page(apage
, err
? 1 : 0);
1141 static struct page
*__get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
,
1142 struct page
*parent
, int start
)
1148 return ERR_PTR(-ENOENT
);
1149 f2fs_bug_on(sbi
, check_nid_range(sbi
, nid
));
1151 page
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), nid
, false);
1153 return ERR_PTR(-ENOMEM
);
1155 err
= read_node_page(page
, 0);
1157 f2fs_put_page(page
, 1);
1158 return ERR_PTR(err
);
1159 } else if (err
== LOCKED_PAGE
) {
1165 ra_node_pages(parent
, start
+ 1, MAX_RA_NODE
);
1169 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1170 f2fs_put_page(page
, 1);
1174 if (unlikely(!PageUptodate(page
))) {
1179 if(unlikely(nid
!= nid_of_node(page
))) {
1180 f2fs_msg(sbi
->sb
, KERN_WARNING
, "inconsistent node block, "
1181 "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1182 nid
, nid_of_node(page
), ino_of_node(page
),
1183 ofs_of_node(page
), cpver_of_node(page
),
1184 next_blkaddr_of_node(page
));
1185 ClearPageUptodate(page
);
1188 f2fs_put_page(page
, 1);
1189 return ERR_PTR(err
);
1194 struct page
*get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
)
1196 return __get_node_page(sbi
, nid
, NULL
, 0);
1199 struct page
*get_node_page_ra(struct page
*parent
, int start
)
1201 struct f2fs_sb_info
*sbi
= F2FS_P_SB(parent
);
1202 nid_t nid
= get_nid(parent
, start
, false);
1204 return __get_node_page(sbi
, nid
, parent
, start
);
1207 static void flush_inline_data(struct f2fs_sb_info
*sbi
, nid_t ino
)
1209 struct inode
*inode
;
1213 /* should flush inline_data before evict_inode */
1214 inode
= ilookup(sbi
->sb
, ino
);
1218 page
= pagecache_get_page(inode
->i_mapping
, 0, FGP_LOCK
|FGP_NOWAIT
, 0);
1222 if (!PageUptodate(page
))
1225 if (!PageDirty(page
))
1228 if (!clear_page_dirty_for_io(page
))
1231 ret
= f2fs_write_inline_data(inode
, page
);
1232 inode_dec_dirty_pages(inode
);
1233 remove_dirty_inode(inode
);
1235 set_page_dirty(page
);
1237 f2fs_put_page(page
, 1);
1242 void move_node_page(struct page
*node_page
, int gc_type
)
1244 if (gc_type
== FG_GC
) {
1245 struct f2fs_sb_info
*sbi
= F2FS_P_SB(node_page
);
1246 struct writeback_control wbc
= {
1247 .sync_mode
= WB_SYNC_ALL
,
1252 set_page_dirty(node_page
);
1253 f2fs_wait_on_page_writeback(node_page
, NODE
, true);
1255 f2fs_bug_on(sbi
, PageWriteback(node_page
));
1256 if (!clear_page_dirty_for_io(node_page
))
1259 if (NODE_MAPPING(sbi
)->a_ops
->writepage(node_page
, &wbc
))
1260 unlock_page(node_page
);
1263 /* set page dirty and write it */
1264 if (!PageWriteback(node_page
))
1265 set_page_dirty(node_page
);
1268 unlock_page(node_page
);
1270 f2fs_put_page(node_page
, 0);
1273 static struct page
*last_fsync_dnode(struct f2fs_sb_info
*sbi
, nid_t ino
)
1276 struct pagevec pvec
;
1277 struct page
*last_page
= NULL
;
1279 pagevec_init(&pvec
, 0);
1283 while (index
<= end
) {
1285 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1286 PAGECACHE_TAG_DIRTY
,
1287 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1291 for (i
= 0; i
< nr_pages
; i
++) {
1292 struct page
*page
= pvec
.pages
[i
];
1294 if (unlikely(f2fs_cp_error(sbi
))) {
1295 f2fs_put_page(last_page
, 0);
1296 pagevec_release(&pvec
);
1297 return ERR_PTR(-EIO
);
1300 if (!IS_DNODE(page
) || !is_cold_node(page
))
1302 if (ino_of_node(page
) != ino
)
1307 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1312 if (ino_of_node(page
) != ino
)
1313 goto continue_unlock
;
1315 if (!PageDirty(page
)) {
1316 /* someone wrote it for us */
1317 goto continue_unlock
;
1321 f2fs_put_page(last_page
, 0);
1327 pagevec_release(&pvec
);
1333 static int __write_node_page(struct page
*page
, bool atomic
, bool *submitted
,
1334 struct writeback_control
*wbc
)
1336 struct f2fs_sb_info
*sbi
= F2FS_P_SB(page
);
1338 struct node_info ni
;
1339 struct f2fs_io_info fio
= {
1343 .op_flags
= wbc_to_write_flags(wbc
),
1345 .encrypted_page
= NULL
,
1349 trace_f2fs_writepage(page
, NODE
);
1351 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1353 if (unlikely(f2fs_cp_error(sbi
)))
1356 /* get old block addr of this node page */
1357 nid
= nid_of_node(page
);
1358 f2fs_bug_on(sbi
, page
->index
!= nid
);
1360 if (wbc
->for_reclaim
) {
1361 if (!down_read_trylock(&sbi
->node_write
))
1364 down_read(&sbi
->node_write
);
1367 get_node_info(sbi
, nid
, &ni
);
1369 /* This page is already truncated */
1370 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
1371 ClearPageUptodate(page
);
1372 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1373 up_read(&sbi
->node_write
);
1378 if (atomic
&& !test_opt(sbi
, NOBARRIER
))
1379 fio
.op_flags
|= REQ_PREFLUSH
| REQ_FUA
;
1381 set_page_writeback(page
);
1382 fio
.old_blkaddr
= ni
.blk_addr
;
1383 write_node_page(nid
, &fio
);
1384 set_node_addr(sbi
, &ni
, fio
.new_blkaddr
, is_fsync_dnode(page
));
1385 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1386 up_read(&sbi
->node_write
);
1388 if (wbc
->for_reclaim
) {
1389 f2fs_submit_merged_write_cond(sbi
, page
->mapping
->host
, 0,
1396 if (unlikely(f2fs_cp_error(sbi
))) {
1397 f2fs_submit_merged_write(sbi
, NODE
);
1401 *submitted
= fio
.submitted
;
1406 redirty_page_for_writepage(wbc
, page
);
1407 return AOP_WRITEPAGE_ACTIVATE
;
1410 static int f2fs_write_node_page(struct page
*page
,
1411 struct writeback_control
*wbc
)
1413 return __write_node_page(page
, false, NULL
, wbc
);
1416 int fsync_node_pages(struct f2fs_sb_info
*sbi
, struct inode
*inode
,
1417 struct writeback_control
*wbc
, bool atomic
)
1420 pgoff_t last_idx
= ULONG_MAX
;
1421 struct pagevec pvec
;
1423 struct page
*last_page
= NULL
;
1424 bool marked
= false;
1425 nid_t ino
= inode
->i_ino
;
1428 last_page
= last_fsync_dnode(sbi
, ino
);
1429 if (IS_ERR_OR_NULL(last_page
))
1430 return PTR_ERR_OR_ZERO(last_page
);
1433 pagevec_init(&pvec
, 0);
1437 while (index
<= end
) {
1439 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1440 PAGECACHE_TAG_DIRTY
,
1441 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1445 for (i
= 0; i
< nr_pages
; i
++) {
1446 struct page
*page
= pvec
.pages
[i
];
1447 bool submitted
= false;
1449 if (unlikely(f2fs_cp_error(sbi
))) {
1450 f2fs_put_page(last_page
, 0);
1451 pagevec_release(&pvec
);
1456 if (!IS_DNODE(page
) || !is_cold_node(page
))
1458 if (ino_of_node(page
) != ino
)
1463 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1468 if (ino_of_node(page
) != ino
)
1469 goto continue_unlock
;
1471 if (!PageDirty(page
) && page
!= last_page
) {
1472 /* someone wrote it for us */
1473 goto continue_unlock
;
1476 f2fs_wait_on_page_writeback(page
, NODE
, true);
1477 BUG_ON(PageWriteback(page
));
1479 set_fsync_mark(page
, 0);
1480 set_dentry_mark(page
, 0);
1482 if (!atomic
|| page
== last_page
) {
1483 set_fsync_mark(page
, 1);
1484 if (IS_INODE(page
)) {
1485 if (is_inode_flag_set(inode
,
1487 update_inode(inode
, page
);
1488 set_dentry_mark(page
,
1489 need_dentry_mark(sbi
, ino
));
1491 /* may be written by other thread */
1492 if (!PageDirty(page
))
1493 set_page_dirty(page
);
1496 if (!clear_page_dirty_for_io(page
))
1497 goto continue_unlock
;
1499 ret
= __write_node_page(page
, atomic
&&
1504 f2fs_put_page(last_page
, 0);
1506 } else if (submitted
) {
1507 last_idx
= page
->index
;
1510 if (page
== last_page
) {
1511 f2fs_put_page(page
, 0);
1516 pagevec_release(&pvec
);
1522 if (!ret
&& atomic
&& !marked
) {
1523 f2fs_msg(sbi
->sb
, KERN_DEBUG
,
1524 "Retry to write fsync mark: ino=%u, idx=%lx",
1525 ino
, last_page
->index
);
1526 lock_page(last_page
);
1527 f2fs_wait_on_page_writeback(last_page
, NODE
, true);
1528 set_page_dirty(last_page
);
1529 unlock_page(last_page
);
1533 if (last_idx
!= ULONG_MAX
)
1534 f2fs_submit_merged_write_cond(sbi
, NULL
, ino
, last_idx
, NODE
);
1535 return ret
? -EIO
: 0;
1538 int sync_node_pages(struct f2fs_sb_info
*sbi
, struct writeback_control
*wbc
)
1541 struct pagevec pvec
;
1546 pagevec_init(&pvec
, 0);
1552 while (index
<= end
) {
1554 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1555 PAGECACHE_TAG_DIRTY
,
1556 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1560 for (i
= 0; i
< nr_pages
; i
++) {
1561 struct page
*page
= pvec
.pages
[i
];
1562 bool submitted
= false;
1564 if (unlikely(f2fs_cp_error(sbi
))) {
1565 pagevec_release(&pvec
);
1571 * flushing sequence with step:
1576 if (step
== 0 && IS_DNODE(page
))
1578 if (step
== 1 && (!IS_DNODE(page
) ||
1579 is_cold_node(page
)))
1581 if (step
== 2 && (!IS_DNODE(page
) ||
1582 !is_cold_node(page
)))
1585 if (!trylock_page(page
))
1588 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1594 if (!PageDirty(page
)) {
1595 /* someone wrote it for us */
1596 goto continue_unlock
;
1599 /* flush inline_data */
1600 if (is_inline_node(page
)) {
1601 clear_inline_node(page
);
1603 flush_inline_data(sbi
, ino_of_node(page
));
1607 f2fs_wait_on_page_writeback(page
, NODE
, true);
1609 BUG_ON(PageWriteback(page
));
1610 if (!clear_page_dirty_for_io(page
))
1611 goto continue_unlock
;
1613 set_fsync_mark(page
, 0);
1614 set_dentry_mark(page
, 0);
1616 ret
= __write_node_page(page
, false, &submitted
, wbc
);
1622 if (--wbc
->nr_to_write
== 0)
1625 pagevec_release(&pvec
);
1628 if (wbc
->nr_to_write
== 0) {
1640 f2fs_submit_merged_write(sbi
, NODE
);
1644 int wait_on_node_pages_writeback(struct f2fs_sb_info
*sbi
, nid_t ino
)
1646 pgoff_t index
= 0, end
= ULONG_MAX
;
1647 struct pagevec pvec
;
1650 pagevec_init(&pvec
, 0);
1652 while (index
<= end
) {
1654 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1655 PAGECACHE_TAG_WRITEBACK
,
1656 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1660 for (i
= 0; i
< nr_pages
; i
++) {
1661 struct page
*page
= pvec
.pages
[i
];
1663 /* until radix tree lookup accepts end_index */
1664 if (unlikely(page
->index
> end
))
1667 if (ino
&& ino_of_node(page
) == ino
) {
1668 f2fs_wait_on_page_writeback(page
, NODE
, true);
1669 if (TestClearPageError(page
))
1673 pagevec_release(&pvec
);
1677 ret2
= filemap_check_errors(NODE_MAPPING(sbi
));
1683 static int f2fs_write_node_pages(struct address_space
*mapping
,
1684 struct writeback_control
*wbc
)
1686 struct f2fs_sb_info
*sbi
= F2FS_M_SB(mapping
);
1687 struct blk_plug plug
;
1690 if (unlikely(is_sbi_flag_set(sbi
, SBI_POR_DOING
)))
1693 /* balancing f2fs's metadata in background */
1694 f2fs_balance_fs_bg(sbi
);
1696 /* collect a number of dirty node pages and write together */
1697 if (get_pages(sbi
, F2FS_DIRTY_NODES
) < nr_pages_to_skip(sbi
, NODE
))
1700 trace_f2fs_writepages(mapping
->host
, wbc
, NODE
);
1702 diff
= nr_pages_to_write(sbi
, NODE
, wbc
);
1703 wbc
->sync_mode
= WB_SYNC_NONE
;
1704 blk_start_plug(&plug
);
1705 sync_node_pages(sbi
, wbc
);
1706 blk_finish_plug(&plug
);
1707 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1711 wbc
->pages_skipped
+= get_pages(sbi
, F2FS_DIRTY_NODES
);
1712 trace_f2fs_writepages(mapping
->host
, wbc
, NODE
);
1716 static int f2fs_set_node_page_dirty(struct page
*page
)
1718 trace_f2fs_set_page_dirty(page
, NODE
);
1720 if (!PageUptodate(page
))
1721 SetPageUptodate(page
);
1722 if (!PageDirty(page
)) {
1723 f2fs_set_page_dirty_nobuffers(page
);
1724 inc_page_count(F2FS_P_SB(page
), F2FS_DIRTY_NODES
);
1725 SetPagePrivate(page
);
1726 f2fs_trace_pid(page
);
1733 * Structure of the f2fs node operations
1735 const struct address_space_operations f2fs_node_aops
= {
1736 .writepage
= f2fs_write_node_page
,
1737 .writepages
= f2fs_write_node_pages
,
1738 .set_page_dirty
= f2fs_set_node_page_dirty
,
1739 .invalidatepage
= f2fs_invalidate_page
,
1740 .releasepage
= f2fs_release_page
,
1741 #ifdef CONFIG_MIGRATION
1742 .migratepage
= f2fs_migrate_page
,
1746 static struct free_nid
*__lookup_free_nid_list(struct f2fs_nm_info
*nm_i
,
1749 return radix_tree_lookup(&nm_i
->free_nid_root
, n
);
1752 static int __insert_nid_to_list(struct f2fs_sb_info
*sbi
,
1753 struct free_nid
*i
, enum nid_list list
, bool new)
1755 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1758 int err
= radix_tree_insert(&nm_i
->free_nid_root
, i
->nid
, i
);
1763 f2fs_bug_on(sbi
, list
== FREE_NID_LIST
? i
->state
!= NID_NEW
:
1764 i
->state
!= NID_ALLOC
);
1765 nm_i
->nid_cnt
[list
]++;
1766 list_add_tail(&i
->list
, &nm_i
->nid_list
[list
]);
1770 static void __remove_nid_from_list(struct f2fs_sb_info
*sbi
,
1771 struct free_nid
*i
, enum nid_list list
, bool reuse
)
1773 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1775 f2fs_bug_on(sbi
, list
== FREE_NID_LIST
? i
->state
!= NID_NEW
:
1776 i
->state
!= NID_ALLOC
);
1777 nm_i
->nid_cnt
[list
]--;
1780 radix_tree_delete(&nm_i
->free_nid_root
, i
->nid
);
1783 /* return if the nid is recognized as free */
1784 static bool add_free_nid(struct f2fs_sb_info
*sbi
, nid_t nid
, bool build
)
1786 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1787 struct free_nid
*i
, *e
;
1788 struct nat_entry
*ne
;
1792 /* 0 nid should not be used */
1793 if (unlikely(nid
== 0))
1796 i
= f2fs_kmem_cache_alloc(free_nid_slab
, GFP_NOFS
);
1800 if (radix_tree_preload(GFP_NOFS
))
1803 spin_lock(&nm_i
->nid_list_lock
);
1811 * - __insert_nid_to_list(ALLOC_NID_LIST)
1812 * - f2fs_balance_fs_bg
1814 * - __build_free_nids
1817 * - __lookup_nat_cache
1819 * - init_inode_metadata
1824 * - __remove_nid_from_list(ALLOC_NID_LIST)
1825 * - __insert_nid_to_list(FREE_NID_LIST)
1827 ne
= __lookup_nat_cache(nm_i
, nid
);
1828 if (ne
&& (!get_nat_flag(ne
, IS_CHECKPOINTED
) ||
1829 nat_get_blkaddr(ne
) != NULL_ADDR
))
1832 e
= __lookup_free_nid_list(nm_i
, nid
);
1834 if (e
->state
== NID_NEW
)
1840 err
= __insert_nid_to_list(sbi
, i
, FREE_NID_LIST
, true);
1842 spin_unlock(&nm_i
->nid_list_lock
);
1843 radix_tree_preload_end();
1846 kmem_cache_free(free_nid_slab
, i
);
1850 static void remove_free_nid(struct f2fs_sb_info
*sbi
, nid_t nid
)
1852 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1854 bool need_free
= false;
1856 spin_lock(&nm_i
->nid_list_lock
);
1857 i
= __lookup_free_nid_list(nm_i
, nid
);
1858 if (i
&& i
->state
== NID_NEW
) {
1859 __remove_nid_from_list(sbi
, i
, FREE_NID_LIST
, false);
1862 spin_unlock(&nm_i
->nid_list_lock
);
1865 kmem_cache_free(free_nid_slab
, i
);
1868 static void update_free_nid_bitmap(struct f2fs_sb_info
*sbi
, nid_t nid
,
1869 bool set
, bool build
)
1871 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1872 unsigned int nat_ofs
= NAT_BLOCK_OFFSET(nid
);
1873 unsigned int nid_ofs
= nid
- START_NID(nid
);
1875 if (!test_bit_le(nat_ofs
, nm_i
->nat_block_bitmap
))
1879 __set_bit_le(nid_ofs
, nm_i
->free_nid_bitmap
[nat_ofs
]);
1881 __clear_bit_le(nid_ofs
, nm_i
->free_nid_bitmap
[nat_ofs
]);
1884 nm_i
->free_nid_count
[nat_ofs
]++;
1886 nm_i
->free_nid_count
[nat_ofs
]--;
1889 static void scan_nat_page(struct f2fs_sb_info
*sbi
,
1890 struct page
*nat_page
, nid_t start_nid
)
1892 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1893 struct f2fs_nat_block
*nat_blk
= page_address(nat_page
);
1895 unsigned int nat_ofs
= NAT_BLOCK_OFFSET(start_nid
);
1898 if (test_bit_le(nat_ofs
, nm_i
->nat_block_bitmap
))
1901 __set_bit_le(nat_ofs
, nm_i
->nat_block_bitmap
);
1903 i
= start_nid
% NAT_ENTRY_PER_BLOCK
;
1905 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++, start_nid
++) {
1908 if (unlikely(start_nid
>= nm_i
->max_nid
))
1911 blk_addr
= le32_to_cpu(nat_blk
->entries
[i
].block_addr
);
1912 f2fs_bug_on(sbi
, blk_addr
== NEW_ADDR
);
1913 if (blk_addr
== NULL_ADDR
)
1914 freed
= add_free_nid(sbi
, start_nid
, true);
1915 spin_lock(&NM_I(sbi
)->nid_list_lock
);
1916 update_free_nid_bitmap(sbi
, start_nid
, freed
, true);
1917 spin_unlock(&NM_I(sbi
)->nid_list_lock
);
1921 static void scan_free_nid_bits(struct f2fs_sb_info
*sbi
)
1923 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1924 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1925 struct f2fs_journal
*journal
= curseg
->journal
;
1926 unsigned int i
, idx
;
1928 down_read(&nm_i
->nat_tree_lock
);
1930 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
1931 if (!test_bit_le(i
, nm_i
->nat_block_bitmap
))
1933 if (!nm_i
->free_nid_count
[i
])
1935 for (idx
= 0; idx
< NAT_ENTRY_PER_BLOCK
; idx
++) {
1938 if (!test_bit_le(idx
, nm_i
->free_nid_bitmap
[i
]))
1941 nid
= i
* NAT_ENTRY_PER_BLOCK
+ idx
;
1942 add_free_nid(sbi
, nid
, true);
1944 if (nm_i
->nid_cnt
[FREE_NID_LIST
] >= MAX_FREE_NIDS
)
1949 down_read(&curseg
->journal_rwsem
);
1950 for (i
= 0; i
< nats_in_cursum(journal
); i
++) {
1954 addr
= le32_to_cpu(nat_in_journal(journal
, i
).block_addr
);
1955 nid
= le32_to_cpu(nid_in_journal(journal
, i
));
1956 if (addr
== NULL_ADDR
)
1957 add_free_nid(sbi
, nid
, true);
1959 remove_free_nid(sbi
, nid
);
1961 up_read(&curseg
->journal_rwsem
);
1962 up_read(&nm_i
->nat_tree_lock
);
1965 static void __build_free_nids(struct f2fs_sb_info
*sbi
, bool sync
, bool mount
)
1967 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1968 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1969 struct f2fs_journal
*journal
= curseg
->journal
;
1971 nid_t nid
= nm_i
->next_scan_nid
;
1973 if (unlikely(nid
>= nm_i
->max_nid
))
1976 /* Enough entries */
1977 if (nm_i
->nid_cnt
[FREE_NID_LIST
] >= NAT_ENTRY_PER_BLOCK
)
1980 if (!sync
&& !available_free_memory(sbi
, FREE_NIDS
))
1984 /* try to find free nids in free_nid_bitmap */
1985 scan_free_nid_bits(sbi
);
1987 if (nm_i
->nid_cnt
[FREE_NID_LIST
])
1991 /* readahead nat pages to be scanned */
1992 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), FREE_NID_PAGES
,
1995 down_read(&nm_i
->nat_tree_lock
);
1998 struct page
*page
= get_current_nat_page(sbi
, nid
);
2000 scan_nat_page(sbi
, page
, nid
);
2001 f2fs_put_page(page
, 1);
2003 nid
+= (NAT_ENTRY_PER_BLOCK
- (nid
% NAT_ENTRY_PER_BLOCK
));
2004 if (unlikely(nid
>= nm_i
->max_nid
))
2007 if (++i
>= FREE_NID_PAGES
)
2011 /* go to the next free nat pages to find free nids abundantly */
2012 nm_i
->next_scan_nid
= nid
;
2014 /* find free nids from current sum_pages */
2015 down_read(&curseg
->journal_rwsem
);
2016 for (i
= 0; i
< nats_in_cursum(journal
); i
++) {
2019 addr
= le32_to_cpu(nat_in_journal(journal
, i
).block_addr
);
2020 nid
= le32_to_cpu(nid_in_journal(journal
, i
));
2021 if (addr
== NULL_ADDR
)
2022 add_free_nid(sbi
, nid
, true);
2024 remove_free_nid(sbi
, nid
);
2026 up_read(&curseg
->journal_rwsem
);
2027 up_read(&nm_i
->nat_tree_lock
);
2029 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nm_i
->next_scan_nid
),
2030 nm_i
->ra_nid_pages
, META_NAT
, false);
2033 void build_free_nids(struct f2fs_sb_info
*sbi
, bool sync
, bool mount
)
2035 mutex_lock(&NM_I(sbi
)->build_lock
);
2036 __build_free_nids(sbi
, sync
, mount
);
2037 mutex_unlock(&NM_I(sbi
)->build_lock
);
2041 * If this function returns success, caller can obtain a new nid
2042 * from second parameter of this function.
2043 * The returned nid could be used ino as well as nid when inode is created.
2045 bool alloc_nid(struct f2fs_sb_info
*sbi
, nid_t
*nid
)
2047 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2048 struct free_nid
*i
= NULL
;
2050 #ifdef CONFIG_F2FS_FAULT_INJECTION
2051 if (time_to_inject(sbi
, FAULT_ALLOC_NID
)) {
2052 f2fs_show_injection_info(FAULT_ALLOC_NID
);
2056 spin_lock(&nm_i
->nid_list_lock
);
2058 if (unlikely(nm_i
->available_nids
== 0)) {
2059 spin_unlock(&nm_i
->nid_list_lock
);
2063 /* We should not use stale free nids created by build_free_nids */
2064 if (nm_i
->nid_cnt
[FREE_NID_LIST
] && !on_build_free_nids(nm_i
)) {
2065 f2fs_bug_on(sbi
, list_empty(&nm_i
->nid_list
[FREE_NID_LIST
]));
2066 i
= list_first_entry(&nm_i
->nid_list
[FREE_NID_LIST
],
2067 struct free_nid
, list
);
2070 __remove_nid_from_list(sbi
, i
, FREE_NID_LIST
, true);
2071 i
->state
= NID_ALLOC
;
2072 __insert_nid_to_list(sbi
, i
, ALLOC_NID_LIST
, false);
2073 nm_i
->available_nids
--;
2075 update_free_nid_bitmap(sbi
, *nid
, false, false);
2077 spin_unlock(&nm_i
->nid_list_lock
);
2080 spin_unlock(&nm_i
->nid_list_lock
);
2082 /* Let's scan nat pages and its caches to get free nids */
2083 build_free_nids(sbi
, true, false);
2088 * alloc_nid() should be called prior to this function.
2090 void alloc_nid_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
2092 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2095 spin_lock(&nm_i
->nid_list_lock
);
2096 i
= __lookup_free_nid_list(nm_i
, nid
);
2097 f2fs_bug_on(sbi
, !i
);
2098 __remove_nid_from_list(sbi
, i
, ALLOC_NID_LIST
, false);
2099 spin_unlock(&nm_i
->nid_list_lock
);
2101 kmem_cache_free(free_nid_slab
, i
);
2105 * alloc_nid() should be called prior to this function.
2107 void alloc_nid_failed(struct f2fs_sb_info
*sbi
, nid_t nid
)
2109 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2111 bool need_free
= false;
2116 spin_lock(&nm_i
->nid_list_lock
);
2117 i
= __lookup_free_nid_list(nm_i
, nid
);
2118 f2fs_bug_on(sbi
, !i
);
2120 if (!available_free_memory(sbi
, FREE_NIDS
)) {
2121 __remove_nid_from_list(sbi
, i
, ALLOC_NID_LIST
, false);
2124 __remove_nid_from_list(sbi
, i
, ALLOC_NID_LIST
, true);
2126 __insert_nid_to_list(sbi
, i
, FREE_NID_LIST
, false);
2129 nm_i
->available_nids
++;
2131 update_free_nid_bitmap(sbi
, nid
, true, false);
2133 spin_unlock(&nm_i
->nid_list_lock
);
2136 kmem_cache_free(free_nid_slab
, i
);
2139 int try_to_free_nids(struct f2fs_sb_info
*sbi
, int nr_shrink
)
2141 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2142 struct free_nid
*i
, *next
;
2145 if (nm_i
->nid_cnt
[FREE_NID_LIST
] <= MAX_FREE_NIDS
)
2148 if (!mutex_trylock(&nm_i
->build_lock
))
2151 spin_lock(&nm_i
->nid_list_lock
);
2152 list_for_each_entry_safe(i
, next
, &nm_i
->nid_list
[FREE_NID_LIST
],
2154 if (nr_shrink
<= 0 ||
2155 nm_i
->nid_cnt
[FREE_NID_LIST
] <= MAX_FREE_NIDS
)
2158 __remove_nid_from_list(sbi
, i
, FREE_NID_LIST
, false);
2159 kmem_cache_free(free_nid_slab
, i
);
2162 spin_unlock(&nm_i
->nid_list_lock
);
2163 mutex_unlock(&nm_i
->build_lock
);
2165 return nr
- nr_shrink
;
2168 void recover_inline_xattr(struct inode
*inode
, struct page
*page
)
2170 void *src_addr
, *dst_addr
;
2173 struct f2fs_inode
*ri
;
2175 ipage
= get_node_page(F2FS_I_SB(inode
), inode
->i_ino
);
2176 f2fs_bug_on(F2FS_I_SB(inode
), IS_ERR(ipage
));
2178 ri
= F2FS_INODE(page
);
2179 if (!(ri
->i_inline
& F2FS_INLINE_XATTR
)) {
2180 clear_inode_flag(inode
, FI_INLINE_XATTR
);
2184 dst_addr
= inline_xattr_addr(ipage
);
2185 src_addr
= inline_xattr_addr(page
);
2186 inline_size
= inline_xattr_size(inode
);
2188 f2fs_wait_on_page_writeback(ipage
, NODE
, true);
2189 memcpy(dst_addr
, src_addr
, inline_size
);
2191 update_inode(inode
, ipage
);
2192 f2fs_put_page(ipage
, 1);
2195 int recover_xattr_data(struct inode
*inode
, struct page
*page
, block_t blkaddr
)
2197 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
2198 nid_t prev_xnid
= F2FS_I(inode
)->i_xattr_nid
;
2199 nid_t new_xnid
= nid_of_node(page
);
2200 struct node_info ni
;
2206 /* 1: invalidate the previous xattr nid */
2207 get_node_info(sbi
, prev_xnid
, &ni
);
2208 f2fs_bug_on(sbi
, ni
.blk_addr
== NULL_ADDR
);
2209 invalidate_blocks(sbi
, ni
.blk_addr
);
2210 dec_valid_node_count(sbi
, inode
);
2211 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
2214 /* 2: update xattr nid in inode */
2215 remove_free_nid(sbi
, new_xnid
);
2216 f2fs_i_xnid_write(inode
, new_xnid
);
2217 if (unlikely(!inc_valid_node_count(sbi
, inode
)))
2218 f2fs_bug_on(sbi
, 1);
2219 update_inode_page(inode
);
2221 /* 3: update and set xattr node page dirty */
2222 xpage
= grab_cache_page(NODE_MAPPING(sbi
), new_xnid
);
2226 memcpy(F2FS_NODE(xpage
), F2FS_NODE(page
), PAGE_SIZE
);
2228 get_node_info(sbi
, new_xnid
, &ni
);
2229 ni
.ino
= inode
->i_ino
;
2230 set_node_addr(sbi
, &ni
, NEW_ADDR
, false);
2231 set_page_dirty(xpage
);
2232 f2fs_put_page(xpage
, 1);
2237 int recover_inode_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
2239 struct f2fs_inode
*src
, *dst
;
2240 nid_t ino
= ino_of_node(page
);
2241 struct node_info old_ni
, new_ni
;
2244 get_node_info(sbi
, ino
, &old_ni
);
2246 if (unlikely(old_ni
.blk_addr
!= NULL_ADDR
))
2249 ipage
= f2fs_grab_cache_page(NODE_MAPPING(sbi
), ino
, false);
2251 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
2255 /* Should not use this inode from free nid list */
2256 remove_free_nid(sbi
, ino
);
2258 if (!PageUptodate(ipage
))
2259 SetPageUptodate(ipage
);
2260 fill_node_footer(ipage
, ino
, ino
, 0, true);
2262 src
= F2FS_INODE(page
);
2263 dst
= F2FS_INODE(ipage
);
2265 memcpy(dst
, src
, (unsigned long)&src
->i_ext
- (unsigned long)src
);
2267 dst
->i_blocks
= cpu_to_le64(1);
2268 dst
->i_links
= cpu_to_le32(1);
2269 dst
->i_xattr_nid
= 0;
2270 dst
->i_inline
= src
->i_inline
& F2FS_INLINE_XATTR
;
2275 if (unlikely(!inc_valid_node_count(sbi
, NULL
)))
2277 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
2278 inc_valid_inode_count(sbi
);
2279 set_page_dirty(ipage
);
2280 f2fs_put_page(ipage
, 1);
2284 int restore_node_summary(struct f2fs_sb_info
*sbi
,
2285 unsigned int segno
, struct f2fs_summary_block
*sum
)
2287 struct f2fs_node
*rn
;
2288 struct f2fs_summary
*sum_entry
;
2290 int i
, idx
, last_offset
, nrpages
;
2292 /* scan the node segment */
2293 last_offset
= sbi
->blocks_per_seg
;
2294 addr
= START_BLOCK(sbi
, segno
);
2295 sum_entry
= &sum
->entries
[0];
2297 for (i
= 0; i
< last_offset
; i
+= nrpages
, addr
+= nrpages
) {
2298 nrpages
= min(last_offset
- i
, BIO_MAX_PAGES
);
2300 /* readahead node pages */
2301 ra_meta_pages(sbi
, addr
, nrpages
, META_POR
, true);
2303 for (idx
= addr
; idx
< addr
+ nrpages
; idx
++) {
2304 struct page
*page
= get_tmp_page(sbi
, idx
);
2306 rn
= F2FS_NODE(page
);
2307 sum_entry
->nid
= rn
->footer
.nid
;
2308 sum_entry
->version
= 0;
2309 sum_entry
->ofs_in_node
= 0;
2311 f2fs_put_page(page
, 1);
2314 invalidate_mapping_pages(META_MAPPING(sbi
), addr
,
2320 static void remove_nats_in_journal(struct f2fs_sb_info
*sbi
)
2322 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2323 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2324 struct f2fs_journal
*journal
= curseg
->journal
;
2327 down_write(&curseg
->journal_rwsem
);
2328 for (i
= 0; i
< nats_in_cursum(journal
); i
++) {
2329 struct nat_entry
*ne
;
2330 struct f2fs_nat_entry raw_ne
;
2331 nid_t nid
= le32_to_cpu(nid_in_journal(journal
, i
));
2333 raw_ne
= nat_in_journal(journal
, i
);
2335 ne
= __lookup_nat_cache(nm_i
, nid
);
2337 ne
= grab_nat_entry(nm_i
, nid
, true);
2338 node_info_from_raw_nat(&ne
->ni
, &raw_ne
);
2342 * if a free nat in journal has not been used after last
2343 * checkpoint, we should remove it from available nids,
2344 * since later we will add it again.
2346 if (!get_nat_flag(ne
, IS_DIRTY
) &&
2347 le32_to_cpu(raw_ne
.block_addr
) == NULL_ADDR
) {
2348 spin_lock(&nm_i
->nid_list_lock
);
2349 nm_i
->available_nids
--;
2350 spin_unlock(&nm_i
->nid_list_lock
);
2353 __set_nat_cache_dirty(nm_i
, ne
);
2355 update_nats_in_cursum(journal
, -i
);
2356 up_write(&curseg
->journal_rwsem
);
2359 static void __adjust_nat_entry_set(struct nat_entry_set
*nes
,
2360 struct list_head
*head
, int max
)
2362 struct nat_entry_set
*cur
;
2364 if (nes
->entry_cnt
>= max
)
2367 list_for_each_entry(cur
, head
, set_list
) {
2368 if (cur
->entry_cnt
>= nes
->entry_cnt
) {
2369 list_add(&nes
->set_list
, cur
->set_list
.prev
);
2374 list_add_tail(&nes
->set_list
, head
);
2377 static void __update_nat_bits(struct f2fs_sb_info
*sbi
, nid_t start_nid
,
2380 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2381 unsigned int nat_index
= start_nid
/ NAT_ENTRY_PER_BLOCK
;
2382 struct f2fs_nat_block
*nat_blk
= page_address(page
);
2386 if (!enabled_nat_bits(sbi
, NULL
))
2389 for (i
= 0; i
< NAT_ENTRY_PER_BLOCK
; i
++) {
2390 if (start_nid
== 0 && i
== 0)
2392 if (nat_blk
->entries
[i
].block_addr
)
2396 __set_bit_le(nat_index
, nm_i
->empty_nat_bits
);
2397 __clear_bit_le(nat_index
, nm_i
->full_nat_bits
);
2401 __clear_bit_le(nat_index
, nm_i
->empty_nat_bits
);
2402 if (valid
== NAT_ENTRY_PER_BLOCK
)
2403 __set_bit_le(nat_index
, nm_i
->full_nat_bits
);
2405 __clear_bit_le(nat_index
, nm_i
->full_nat_bits
);
2408 static void __flush_nat_entry_set(struct f2fs_sb_info
*sbi
,
2409 struct nat_entry_set
*set
, struct cp_control
*cpc
)
2411 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2412 struct f2fs_journal
*journal
= curseg
->journal
;
2413 nid_t start_nid
= set
->set
* NAT_ENTRY_PER_BLOCK
;
2414 bool to_journal
= true;
2415 struct f2fs_nat_block
*nat_blk
;
2416 struct nat_entry
*ne
, *cur
;
2417 struct page
*page
= NULL
;
2420 * there are two steps to flush nat entries:
2421 * #1, flush nat entries to journal in current hot data summary block.
2422 * #2, flush nat entries to nat page.
2424 if (enabled_nat_bits(sbi
, cpc
) ||
2425 !__has_cursum_space(journal
, set
->entry_cnt
, NAT_JOURNAL
))
2429 down_write(&curseg
->journal_rwsem
);
2431 page
= get_next_nat_page(sbi
, start_nid
);
2432 nat_blk
= page_address(page
);
2433 f2fs_bug_on(sbi
, !nat_blk
);
2436 /* flush dirty nats in nat entry set */
2437 list_for_each_entry_safe(ne
, cur
, &set
->entry_list
, list
) {
2438 struct f2fs_nat_entry
*raw_ne
;
2439 nid_t nid
= nat_get_nid(ne
);
2442 f2fs_bug_on(sbi
, nat_get_blkaddr(ne
) == NEW_ADDR
);
2445 offset
= lookup_journal_in_cursum(journal
,
2446 NAT_JOURNAL
, nid
, 1);
2447 f2fs_bug_on(sbi
, offset
< 0);
2448 raw_ne
= &nat_in_journal(journal
, offset
);
2449 nid_in_journal(journal
, offset
) = cpu_to_le32(nid
);
2451 raw_ne
= &nat_blk
->entries
[nid
- start_nid
];
2453 raw_nat_from_node_info(raw_ne
, &ne
->ni
);
2455 __clear_nat_cache_dirty(NM_I(sbi
), set
, ne
);
2456 if (nat_get_blkaddr(ne
) == NULL_ADDR
) {
2457 add_free_nid(sbi
, nid
, false);
2458 spin_lock(&NM_I(sbi
)->nid_list_lock
);
2459 NM_I(sbi
)->available_nids
++;
2460 update_free_nid_bitmap(sbi
, nid
, true, false);
2461 spin_unlock(&NM_I(sbi
)->nid_list_lock
);
2463 spin_lock(&NM_I(sbi
)->nid_list_lock
);
2464 update_free_nid_bitmap(sbi
, nid
, false, false);
2465 spin_unlock(&NM_I(sbi
)->nid_list_lock
);
2470 up_write(&curseg
->journal_rwsem
);
2472 __update_nat_bits(sbi
, start_nid
, page
);
2473 f2fs_put_page(page
, 1);
2476 /* Allow dirty nats by node block allocation in write_begin */
2477 if (!set
->entry_cnt
) {
2478 radix_tree_delete(&NM_I(sbi
)->nat_set_root
, set
->set
);
2479 kmem_cache_free(nat_entry_set_slab
, set
);
2484 * This function is called during the checkpointing process.
2486 void flush_nat_entries(struct f2fs_sb_info
*sbi
, struct cp_control
*cpc
)
2488 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2489 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
2490 struct f2fs_journal
*journal
= curseg
->journal
;
2491 struct nat_entry_set
*setvec
[SETVEC_SIZE
];
2492 struct nat_entry_set
*set
, *tmp
;
2497 if (!nm_i
->dirty_nat_cnt
)
2500 down_write(&nm_i
->nat_tree_lock
);
2503 * if there are no enough space in journal to store dirty nat
2504 * entries, remove all entries from journal and merge them
2505 * into nat entry set.
2507 if (enabled_nat_bits(sbi
, cpc
) ||
2508 !__has_cursum_space(journal
, nm_i
->dirty_nat_cnt
, NAT_JOURNAL
))
2509 remove_nats_in_journal(sbi
);
2511 while ((found
= __gang_lookup_nat_set(nm_i
,
2512 set_idx
, SETVEC_SIZE
, setvec
))) {
2514 set_idx
= setvec
[found
- 1]->set
+ 1;
2515 for (idx
= 0; idx
< found
; idx
++)
2516 __adjust_nat_entry_set(setvec
[idx
], &sets
,
2517 MAX_NAT_JENTRIES(journal
));
2520 /* flush dirty nats in nat entry set */
2521 list_for_each_entry_safe(set
, tmp
, &sets
, set_list
)
2522 __flush_nat_entry_set(sbi
, set
, cpc
);
2524 up_write(&nm_i
->nat_tree_lock
);
2525 /* Allow dirty nats by node block allocation in write_begin */
2528 static int __get_nat_bitmaps(struct f2fs_sb_info
*sbi
)
2530 struct f2fs_checkpoint
*ckpt
= F2FS_CKPT(sbi
);
2531 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2532 unsigned int nat_bits_bytes
= nm_i
->nat_blocks
/ BITS_PER_BYTE
;
2534 __u64 cp_ver
= cur_cp_version(ckpt
);
2535 block_t nat_bits_addr
;
2537 if (!enabled_nat_bits(sbi
, NULL
))
2540 nm_i
->nat_bits_blocks
= F2FS_BYTES_TO_BLK((nat_bits_bytes
<< 1) + 8 +
2542 nm_i
->nat_bits
= kzalloc(nm_i
->nat_bits_blocks
<< F2FS_BLKSIZE_BITS
,
2544 if (!nm_i
->nat_bits
)
2547 nat_bits_addr
= __start_cp_addr(sbi
) + sbi
->blocks_per_seg
-
2548 nm_i
->nat_bits_blocks
;
2549 for (i
= 0; i
< nm_i
->nat_bits_blocks
; i
++) {
2550 struct page
*page
= get_meta_page(sbi
, nat_bits_addr
++);
2552 memcpy(nm_i
->nat_bits
+ (i
<< F2FS_BLKSIZE_BITS
),
2553 page_address(page
), F2FS_BLKSIZE
);
2554 f2fs_put_page(page
, 1);
2557 cp_ver
|= (cur_cp_crc(ckpt
) << 32);
2558 if (cpu_to_le64(cp_ver
) != *(__le64
*)nm_i
->nat_bits
) {
2559 disable_nat_bits(sbi
, true);
2563 nm_i
->full_nat_bits
= nm_i
->nat_bits
+ 8;
2564 nm_i
->empty_nat_bits
= nm_i
->full_nat_bits
+ nat_bits_bytes
;
2566 f2fs_msg(sbi
->sb
, KERN_NOTICE
, "Found nat_bits in checkpoint");
2570 static inline void load_free_nid_bitmap(struct f2fs_sb_info
*sbi
)
2572 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2574 nid_t nid
, last_nid
;
2576 if (!enabled_nat_bits(sbi
, NULL
))
2579 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
2580 i
= find_next_bit_le(nm_i
->empty_nat_bits
, nm_i
->nat_blocks
, i
);
2581 if (i
>= nm_i
->nat_blocks
)
2584 __set_bit_le(i
, nm_i
->nat_block_bitmap
);
2586 nid
= i
* NAT_ENTRY_PER_BLOCK
;
2587 last_nid
= (i
+ 1) * NAT_ENTRY_PER_BLOCK
;
2589 spin_lock(&NM_I(sbi
)->nid_list_lock
);
2590 for (; nid
< last_nid
; nid
++)
2591 update_free_nid_bitmap(sbi
, nid
, true, true);
2592 spin_unlock(&NM_I(sbi
)->nid_list_lock
);
2595 for (i
= 0; i
< nm_i
->nat_blocks
; i
++) {
2596 i
= find_next_bit_le(nm_i
->full_nat_bits
, nm_i
->nat_blocks
, i
);
2597 if (i
>= nm_i
->nat_blocks
)
2600 __set_bit_le(i
, nm_i
->nat_block_bitmap
);
2604 static int init_node_manager(struct f2fs_sb_info
*sbi
)
2606 struct f2fs_super_block
*sb_raw
= F2FS_RAW_SUPER(sbi
);
2607 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2608 unsigned char *version_bitmap
;
2609 unsigned int nat_segs
;
2612 nm_i
->nat_blkaddr
= le32_to_cpu(sb_raw
->nat_blkaddr
);
2614 /* segment_count_nat includes pair segment so divide to 2. */
2615 nat_segs
= le32_to_cpu(sb_raw
->segment_count_nat
) >> 1;
2616 nm_i
->nat_blocks
= nat_segs
<< le32_to_cpu(sb_raw
->log_blocks_per_seg
);
2617 nm_i
->max_nid
= NAT_ENTRY_PER_BLOCK
* nm_i
->nat_blocks
;
2619 /* not used nids: 0, node, meta, (and root counted as valid node) */
2620 nm_i
->available_nids
= nm_i
->max_nid
- sbi
->total_valid_node_count
-
2621 F2FS_RESERVED_NODE_NUM
;
2622 nm_i
->nid_cnt
[FREE_NID_LIST
] = 0;
2623 nm_i
->nid_cnt
[ALLOC_NID_LIST
] = 0;
2625 nm_i
->ram_thresh
= DEF_RAM_THRESHOLD
;
2626 nm_i
->ra_nid_pages
= DEF_RA_NID_PAGES
;
2627 nm_i
->dirty_nats_ratio
= DEF_DIRTY_NAT_RATIO_THRESHOLD
;
2629 INIT_RADIX_TREE(&nm_i
->free_nid_root
, GFP_ATOMIC
);
2630 INIT_LIST_HEAD(&nm_i
->nid_list
[FREE_NID_LIST
]);
2631 INIT_LIST_HEAD(&nm_i
->nid_list
[ALLOC_NID_LIST
]);
2632 INIT_RADIX_TREE(&nm_i
->nat_root
, GFP_NOIO
);
2633 INIT_RADIX_TREE(&nm_i
->nat_set_root
, GFP_NOIO
);
2634 INIT_LIST_HEAD(&nm_i
->nat_entries
);
2636 mutex_init(&nm_i
->build_lock
);
2637 spin_lock_init(&nm_i
->nid_list_lock
);
2638 init_rwsem(&nm_i
->nat_tree_lock
);
2640 nm_i
->next_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
2641 nm_i
->bitmap_size
= __bitmap_size(sbi
, NAT_BITMAP
);
2642 version_bitmap
= __bitmap_ptr(sbi
, NAT_BITMAP
);
2643 if (!version_bitmap
)
2646 nm_i
->nat_bitmap
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
2648 if (!nm_i
->nat_bitmap
)
2651 err
= __get_nat_bitmaps(sbi
);
2655 #ifdef CONFIG_F2FS_CHECK_FS
2656 nm_i
->nat_bitmap_mir
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
2658 if (!nm_i
->nat_bitmap_mir
)
2665 static int init_free_nid_cache(struct f2fs_sb_info
*sbi
)
2667 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2669 nm_i
->free_nid_bitmap
= f2fs_kvzalloc(nm_i
->nat_blocks
*
2670 NAT_ENTRY_BITMAP_SIZE
, GFP_KERNEL
);
2671 if (!nm_i
->free_nid_bitmap
)
2674 nm_i
->nat_block_bitmap
= f2fs_kvzalloc(nm_i
->nat_blocks
/ 8,
2676 if (!nm_i
->nat_block_bitmap
)
2679 nm_i
->free_nid_count
= f2fs_kvzalloc(nm_i
->nat_blocks
*
2680 sizeof(unsigned short), GFP_KERNEL
);
2681 if (!nm_i
->free_nid_count
)
2686 int build_node_manager(struct f2fs_sb_info
*sbi
)
2690 sbi
->nm_info
= kzalloc(sizeof(struct f2fs_nm_info
), GFP_KERNEL
);
2694 err
= init_node_manager(sbi
);
2698 err
= init_free_nid_cache(sbi
);
2702 /* load free nid status from nat_bits table */
2703 load_free_nid_bitmap(sbi
);
2705 build_free_nids(sbi
, true, true);
2709 void destroy_node_manager(struct f2fs_sb_info
*sbi
)
2711 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
2712 struct free_nid
*i
, *next_i
;
2713 struct nat_entry
*natvec
[NATVEC_SIZE
];
2714 struct nat_entry_set
*setvec
[SETVEC_SIZE
];
2721 /* destroy free nid list */
2722 spin_lock(&nm_i
->nid_list_lock
);
2723 list_for_each_entry_safe(i
, next_i
, &nm_i
->nid_list
[FREE_NID_LIST
],
2725 __remove_nid_from_list(sbi
, i
, FREE_NID_LIST
, false);
2726 spin_unlock(&nm_i
->nid_list_lock
);
2727 kmem_cache_free(free_nid_slab
, i
);
2728 spin_lock(&nm_i
->nid_list_lock
);
2730 f2fs_bug_on(sbi
, nm_i
->nid_cnt
[FREE_NID_LIST
]);
2731 f2fs_bug_on(sbi
, nm_i
->nid_cnt
[ALLOC_NID_LIST
]);
2732 f2fs_bug_on(sbi
, !list_empty(&nm_i
->nid_list
[ALLOC_NID_LIST
]));
2733 spin_unlock(&nm_i
->nid_list_lock
);
2735 /* destroy nat cache */
2736 down_write(&nm_i
->nat_tree_lock
);
2737 while ((found
= __gang_lookup_nat_cache(nm_i
,
2738 nid
, NATVEC_SIZE
, natvec
))) {
2741 nid
= nat_get_nid(natvec
[found
- 1]) + 1;
2742 for (idx
= 0; idx
< found
; idx
++)
2743 __del_from_nat_cache(nm_i
, natvec
[idx
]);
2745 f2fs_bug_on(sbi
, nm_i
->nat_cnt
);
2747 /* destroy nat set cache */
2749 while ((found
= __gang_lookup_nat_set(nm_i
,
2750 nid
, SETVEC_SIZE
, setvec
))) {
2753 nid
= setvec
[found
- 1]->set
+ 1;
2754 for (idx
= 0; idx
< found
; idx
++) {
2755 /* entry_cnt is not zero, when cp_error was occurred */
2756 f2fs_bug_on(sbi
, !list_empty(&setvec
[idx
]->entry_list
));
2757 radix_tree_delete(&nm_i
->nat_set_root
, setvec
[idx
]->set
);
2758 kmem_cache_free(nat_entry_set_slab
, setvec
[idx
]);
2761 up_write(&nm_i
->nat_tree_lock
);
2763 kvfree(nm_i
->nat_block_bitmap
);
2764 kvfree(nm_i
->free_nid_bitmap
);
2765 kvfree(nm_i
->free_nid_count
);
2767 kfree(nm_i
->nat_bitmap
);
2768 kfree(nm_i
->nat_bits
);
2769 #ifdef CONFIG_F2FS_CHECK_FS
2770 kfree(nm_i
->nat_bitmap_mir
);
2772 sbi
->nm_info
= NULL
;
2776 int __init
create_node_manager_caches(void)
2778 nat_entry_slab
= f2fs_kmem_cache_create("nat_entry",
2779 sizeof(struct nat_entry
));
2780 if (!nat_entry_slab
)
2783 free_nid_slab
= f2fs_kmem_cache_create("free_nid",
2784 sizeof(struct free_nid
));
2786 goto destroy_nat_entry
;
2788 nat_entry_set_slab
= f2fs_kmem_cache_create("nat_entry_set",
2789 sizeof(struct nat_entry_set
));
2790 if (!nat_entry_set_slab
)
2791 goto destroy_free_nid
;
2795 kmem_cache_destroy(free_nid_slab
);
2797 kmem_cache_destroy(nat_entry_slab
);
2802 void destroy_node_manager_caches(void)
2804 kmem_cache_destroy(nat_entry_set_slab
);
2805 kmem_cache_destroy(free_nid_slab
);
2806 kmem_cache_destroy(nat_entry_slab
);