4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
22 #include <trace/events/f2fs.h>
24 #define on_build_free_nids(nmi) mutex_is_locked(&nm_i->build_lock)
26 static struct kmem_cache
*nat_entry_slab
;
27 static struct kmem_cache
*free_nid_slab
;
29 static inline bool available_free_memory(struct f2fs_nm_info
*nm_i
, int type
)
32 unsigned long mem_size
= 0;
35 if (type
== FREE_NIDS
)
36 mem_size
= nm_i
->fcnt
* sizeof(struct free_nid
);
37 else if (type
== NAT_ENTRIES
)
38 mem_size
+= nm_i
->nat_cnt
* sizeof(struct nat_entry
);
41 /* give 50:50 memory for free nids and nat caches respectively */
42 return (mem_size
< ((val
.totalram
* nm_i
->ram_thresh
) >> 11));
45 static void clear_node_page_dirty(struct page
*page
)
47 struct address_space
*mapping
= page
->mapping
;
48 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
49 unsigned int long flags
;
51 if (PageDirty(page
)) {
52 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
53 radix_tree_tag_clear(&mapping
->page_tree
,
56 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
58 clear_page_dirty_for_io(page
);
59 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
61 ClearPageUptodate(page
);
64 static struct page
*get_current_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
66 pgoff_t index
= current_nat_addr(sbi
, nid
);
67 return get_meta_page(sbi
, index
);
70 static struct page
*get_next_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
72 struct page
*src_page
;
73 struct page
*dst_page
;
78 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
80 src_off
= current_nat_addr(sbi
, nid
);
81 dst_off
= next_nat_addr(sbi
, src_off
);
83 /* get current nat block page with lock */
84 src_page
= get_meta_page(sbi
, src_off
);
86 /* Dirty src_page means that it is already the new target NAT page. */
87 if (PageDirty(src_page
))
90 dst_page
= grab_meta_page(sbi
, dst_off
);
92 src_addr
= page_address(src_page
);
93 dst_addr
= page_address(dst_page
);
94 memcpy(dst_addr
, src_addr
, PAGE_CACHE_SIZE
);
95 set_page_dirty(dst_page
);
96 f2fs_put_page(src_page
, 1);
98 set_to_next_nat(nm_i
, nid
);
103 static struct nat_entry
*__lookup_nat_cache(struct f2fs_nm_info
*nm_i
, nid_t n
)
105 return radix_tree_lookup(&nm_i
->nat_root
, n
);
108 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info
*nm_i
,
109 nid_t start
, unsigned int nr
, struct nat_entry
**ep
)
111 return radix_tree_gang_lookup(&nm_i
->nat_root
, (void **)ep
, start
, nr
);
114 static void __del_from_nat_cache(struct f2fs_nm_info
*nm_i
, struct nat_entry
*e
)
117 radix_tree_delete(&nm_i
->nat_root
, nat_get_nid(e
));
119 kmem_cache_free(nat_entry_slab
, e
);
122 int is_checkpointed_node(struct f2fs_sb_info
*sbi
, nid_t nid
)
124 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
128 read_lock(&nm_i
->nat_tree_lock
);
129 e
= __lookup_nat_cache(nm_i
, nid
);
130 if (e
&& !e
->checkpointed
)
132 read_unlock(&nm_i
->nat_tree_lock
);
136 bool fsync_mark_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
138 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
140 bool fsync_done
= false;
142 read_lock(&nm_i
->nat_tree_lock
);
143 e
= __lookup_nat_cache(nm_i
, nid
);
145 fsync_done
= e
->fsync_done
;
146 read_unlock(&nm_i
->nat_tree_lock
);
150 static struct nat_entry
*grab_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
)
152 struct nat_entry
*new;
154 new = kmem_cache_alloc(nat_entry_slab
, GFP_ATOMIC
);
157 if (radix_tree_insert(&nm_i
->nat_root
, nid
, new)) {
158 kmem_cache_free(nat_entry_slab
, new);
161 memset(new, 0, sizeof(struct nat_entry
));
162 nat_set_nid(new, nid
);
163 new->checkpointed
= true;
164 list_add_tail(&new->list
, &nm_i
->nat_entries
);
169 static void cache_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
,
170 struct f2fs_nat_entry
*ne
)
174 write_lock(&nm_i
->nat_tree_lock
);
175 e
= __lookup_nat_cache(nm_i
, nid
);
177 e
= grab_nat_entry(nm_i
, nid
);
179 write_unlock(&nm_i
->nat_tree_lock
);
182 nat_set_blkaddr(e
, le32_to_cpu(ne
->block_addr
));
183 nat_set_ino(e
, le32_to_cpu(ne
->ino
));
184 nat_set_version(e
, ne
->version
);
186 write_unlock(&nm_i
->nat_tree_lock
);
189 static void set_node_addr(struct f2fs_sb_info
*sbi
, struct node_info
*ni
,
190 block_t new_blkaddr
, bool fsync_done
)
192 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
195 write_lock(&nm_i
->nat_tree_lock
);
196 e
= __lookup_nat_cache(nm_i
, ni
->nid
);
198 e
= grab_nat_entry(nm_i
, ni
->nid
);
200 write_unlock(&nm_i
->nat_tree_lock
);
204 f2fs_bug_on(ni
->blk_addr
== NEW_ADDR
);
205 } else if (new_blkaddr
== NEW_ADDR
) {
207 * when nid is reallocated,
208 * previous nat entry can be remained in nat cache.
209 * So, reinitialize it with new information.
212 f2fs_bug_on(ni
->blk_addr
!= NULL_ADDR
);
216 f2fs_bug_on(nat_get_blkaddr(e
) != ni
->blk_addr
);
217 f2fs_bug_on(nat_get_blkaddr(e
) == NULL_ADDR
&&
218 new_blkaddr
== NULL_ADDR
);
219 f2fs_bug_on(nat_get_blkaddr(e
) == NEW_ADDR
&&
220 new_blkaddr
== NEW_ADDR
);
221 f2fs_bug_on(nat_get_blkaddr(e
) != NEW_ADDR
&&
222 nat_get_blkaddr(e
) != NULL_ADDR
&&
223 new_blkaddr
== NEW_ADDR
);
225 /* increament version no as node is removed */
226 if (nat_get_blkaddr(e
) != NEW_ADDR
&& new_blkaddr
== NULL_ADDR
) {
227 unsigned char version
= nat_get_version(e
);
228 nat_set_version(e
, inc_node_version(version
));
232 nat_set_blkaddr(e
, new_blkaddr
);
233 __set_nat_cache_dirty(nm_i
, e
);
235 /* update fsync_mark if its inode nat entry is still alive */
236 e
= __lookup_nat_cache(nm_i
, ni
->ino
);
238 e
->fsync_done
= fsync_done
;
239 write_unlock(&nm_i
->nat_tree_lock
);
242 int try_to_free_nats(struct f2fs_sb_info
*sbi
, int nr_shrink
)
244 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
246 if (available_free_memory(nm_i
, NAT_ENTRIES
))
249 write_lock(&nm_i
->nat_tree_lock
);
250 while (nr_shrink
&& !list_empty(&nm_i
->nat_entries
)) {
251 struct nat_entry
*ne
;
252 ne
= list_first_entry(&nm_i
->nat_entries
,
253 struct nat_entry
, list
);
254 __del_from_nat_cache(nm_i
, ne
);
257 write_unlock(&nm_i
->nat_tree_lock
);
262 * This function returns always success
264 void get_node_info(struct f2fs_sb_info
*sbi
, nid_t nid
, struct node_info
*ni
)
266 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
267 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
268 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
269 nid_t start_nid
= START_NID(nid
);
270 struct f2fs_nat_block
*nat_blk
;
271 struct page
*page
= NULL
;
272 struct f2fs_nat_entry ne
;
276 memset(&ne
, 0, sizeof(struct f2fs_nat_entry
));
279 /* Check nat cache */
280 read_lock(&nm_i
->nat_tree_lock
);
281 e
= __lookup_nat_cache(nm_i
, nid
);
283 ni
->ino
= nat_get_ino(e
);
284 ni
->blk_addr
= nat_get_blkaddr(e
);
285 ni
->version
= nat_get_version(e
);
287 read_unlock(&nm_i
->nat_tree_lock
);
291 /* Check current segment summary */
292 mutex_lock(&curseg
->curseg_mutex
);
293 i
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 0);
295 ne
= nat_in_journal(sum
, i
);
296 node_info_from_raw_nat(ni
, &ne
);
298 mutex_unlock(&curseg
->curseg_mutex
);
302 /* Fill node_info from nat page */
303 page
= get_current_nat_page(sbi
, start_nid
);
304 nat_blk
= (struct f2fs_nat_block
*)page_address(page
);
305 ne
= nat_blk
->entries
[nid
- start_nid
];
306 node_info_from_raw_nat(ni
, &ne
);
307 f2fs_put_page(page
, 1);
309 /* cache nat entry */
310 cache_nat_entry(NM_I(sbi
), nid
, &ne
);
314 * The maximum depth is four.
315 * Offset[0] will have raw inode offset.
317 static int get_node_path(struct f2fs_inode_info
*fi
, long block
,
318 int offset
[4], unsigned int noffset
[4])
320 const long direct_index
= ADDRS_PER_INODE(fi
);
321 const long direct_blks
= ADDRS_PER_BLOCK
;
322 const long dptrs_per_blk
= NIDS_PER_BLOCK
;
323 const long indirect_blks
= ADDRS_PER_BLOCK
* NIDS_PER_BLOCK
;
324 const long dindirect_blks
= indirect_blks
* NIDS_PER_BLOCK
;
330 if (block
< direct_index
) {
334 block
-= direct_index
;
335 if (block
< direct_blks
) {
336 offset
[n
++] = NODE_DIR1_BLOCK
;
342 block
-= direct_blks
;
343 if (block
< direct_blks
) {
344 offset
[n
++] = NODE_DIR2_BLOCK
;
350 block
-= direct_blks
;
351 if (block
< indirect_blks
) {
352 offset
[n
++] = NODE_IND1_BLOCK
;
354 offset
[n
++] = block
/ direct_blks
;
355 noffset
[n
] = 4 + offset
[n
- 1];
356 offset
[n
] = block
% direct_blks
;
360 block
-= indirect_blks
;
361 if (block
< indirect_blks
) {
362 offset
[n
++] = NODE_IND2_BLOCK
;
363 noffset
[n
] = 4 + dptrs_per_blk
;
364 offset
[n
++] = block
/ direct_blks
;
365 noffset
[n
] = 5 + dptrs_per_blk
+ offset
[n
- 1];
366 offset
[n
] = block
% direct_blks
;
370 block
-= indirect_blks
;
371 if (block
< dindirect_blks
) {
372 offset
[n
++] = NODE_DIND_BLOCK
;
373 noffset
[n
] = 5 + (dptrs_per_blk
* 2);
374 offset
[n
++] = block
/ indirect_blks
;
375 noffset
[n
] = 6 + (dptrs_per_blk
* 2) +
376 offset
[n
- 1] * (dptrs_per_blk
+ 1);
377 offset
[n
++] = (block
/ direct_blks
) % dptrs_per_blk
;
378 noffset
[n
] = 7 + (dptrs_per_blk
* 2) +
379 offset
[n
- 2] * (dptrs_per_blk
+ 1) +
381 offset
[n
] = block
% direct_blks
;
392 * Caller should call f2fs_put_dnode(dn).
393 * Also, it should grab and release a rwsem by calling f2fs_lock_op() and
394 * f2fs_unlock_op() only if ro is not set RDONLY_NODE.
395 * In the case of RDONLY_NODE, we don't need to care about mutex.
397 int get_dnode_of_data(struct dnode_of_data
*dn
, pgoff_t index
, int mode
)
399 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
400 struct page
*npage
[4];
403 unsigned int noffset
[4];
408 level
= get_node_path(F2FS_I(dn
->inode
), index
, offset
, noffset
);
410 nids
[0] = dn
->inode
->i_ino
;
411 npage
[0] = dn
->inode_page
;
414 npage
[0] = get_node_page(sbi
, nids
[0]);
415 if (IS_ERR(npage
[0]))
416 return PTR_ERR(npage
[0]);
420 nids
[1] = get_nid(parent
, offset
[0], true);
421 dn
->inode_page
= npage
[0];
422 dn
->inode_page_locked
= true;
424 /* get indirect or direct nodes */
425 for (i
= 1; i
<= level
; i
++) {
428 if (!nids
[i
] && mode
== ALLOC_NODE
) {
430 if (!alloc_nid(sbi
, &(nids
[i
]))) {
436 npage
[i
] = new_node_page(dn
, noffset
[i
], NULL
);
437 if (IS_ERR(npage
[i
])) {
438 alloc_nid_failed(sbi
, nids
[i
]);
439 err
= PTR_ERR(npage
[i
]);
443 set_nid(parent
, offset
[i
- 1], nids
[i
], i
== 1);
444 alloc_nid_done(sbi
, nids
[i
]);
446 } else if (mode
== LOOKUP_NODE_RA
&& i
== level
&& level
> 1) {
447 npage
[i
] = get_node_page_ra(parent
, offset
[i
- 1]);
448 if (IS_ERR(npage
[i
])) {
449 err
= PTR_ERR(npage
[i
]);
455 dn
->inode_page_locked
= false;
458 f2fs_put_page(parent
, 1);
462 npage
[i
] = get_node_page(sbi
, nids
[i
]);
463 if (IS_ERR(npage
[i
])) {
464 err
= PTR_ERR(npage
[i
]);
465 f2fs_put_page(npage
[0], 0);
471 nids
[i
+ 1] = get_nid(parent
, offset
[i
], false);
474 dn
->nid
= nids
[level
];
475 dn
->ofs_in_node
= offset
[level
];
476 dn
->node_page
= npage
[level
];
477 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
481 f2fs_put_page(parent
, 1);
483 f2fs_put_page(npage
[0], 0);
485 dn
->inode_page
= NULL
;
486 dn
->node_page
= NULL
;
490 static void truncate_node(struct dnode_of_data
*dn
)
492 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
495 get_node_info(sbi
, dn
->nid
, &ni
);
496 if (dn
->inode
->i_blocks
== 0) {
497 f2fs_bug_on(ni
.blk_addr
!= NULL_ADDR
);
500 f2fs_bug_on(ni
.blk_addr
== NULL_ADDR
);
502 /* Deallocate node address */
503 invalidate_blocks(sbi
, ni
.blk_addr
);
504 dec_valid_node_count(sbi
, dn
->inode
);
505 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
507 if (dn
->nid
== dn
->inode
->i_ino
) {
508 remove_orphan_inode(sbi
, dn
->nid
);
509 dec_valid_inode_count(sbi
);
514 clear_node_page_dirty(dn
->node_page
);
515 F2FS_SET_SB_DIRT(sbi
);
517 f2fs_put_page(dn
->node_page
, 1);
519 invalidate_mapping_pages(NODE_MAPPING(sbi
),
520 dn
->node_page
->index
, dn
->node_page
->index
);
522 dn
->node_page
= NULL
;
523 trace_f2fs_truncate_node(dn
->inode
, dn
->nid
, ni
.blk_addr
);
526 static int truncate_dnode(struct dnode_of_data
*dn
)
528 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
534 /* get direct node */
535 page
= get_node_page(sbi
, dn
->nid
);
536 if (IS_ERR(page
) && PTR_ERR(page
) == -ENOENT
)
538 else if (IS_ERR(page
))
539 return PTR_ERR(page
);
541 /* Make dnode_of_data for parameter */
542 dn
->node_page
= page
;
544 truncate_data_blocks(dn
);
549 static int truncate_nodes(struct dnode_of_data
*dn
, unsigned int nofs
,
552 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
553 struct dnode_of_data rdn
= *dn
;
555 struct f2fs_node
*rn
;
557 unsigned int child_nofs
;
562 return NIDS_PER_BLOCK
+ 1;
564 trace_f2fs_truncate_nodes_enter(dn
->inode
, dn
->nid
, dn
->data_blkaddr
);
566 page
= get_node_page(sbi
, dn
->nid
);
568 trace_f2fs_truncate_nodes_exit(dn
->inode
, PTR_ERR(page
));
569 return PTR_ERR(page
);
572 rn
= F2FS_NODE(page
);
574 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++, freed
++) {
575 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
579 ret
= truncate_dnode(&rdn
);
582 set_nid(page
, i
, 0, false);
585 child_nofs
= nofs
+ ofs
* (NIDS_PER_BLOCK
+ 1) + 1;
586 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++) {
587 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
588 if (child_nid
== 0) {
589 child_nofs
+= NIDS_PER_BLOCK
+ 1;
593 ret
= truncate_nodes(&rdn
, child_nofs
, 0, depth
- 1);
594 if (ret
== (NIDS_PER_BLOCK
+ 1)) {
595 set_nid(page
, i
, 0, false);
597 } else if (ret
< 0 && ret
!= -ENOENT
) {
605 /* remove current indirect node */
606 dn
->node_page
= page
;
610 f2fs_put_page(page
, 1);
612 trace_f2fs_truncate_nodes_exit(dn
->inode
, freed
);
616 f2fs_put_page(page
, 1);
617 trace_f2fs_truncate_nodes_exit(dn
->inode
, ret
);
621 static int truncate_partial_nodes(struct dnode_of_data
*dn
,
622 struct f2fs_inode
*ri
, int *offset
, int depth
)
624 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
625 struct page
*pages
[2];
632 nid
[0] = le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
636 /* get indirect nodes in the path */
637 for (i
= 0; i
< idx
+ 1; i
++) {
638 /* refernece count'll be increased */
639 pages
[i
] = get_node_page(sbi
, nid
[i
]);
640 if (IS_ERR(pages
[i
])) {
641 err
= PTR_ERR(pages
[i
]);
645 nid
[i
+ 1] = get_nid(pages
[i
], offset
[i
+ 1], false);
648 /* free direct nodes linked to a partial indirect node */
649 for (i
= offset
[idx
+ 1]; i
< NIDS_PER_BLOCK
; i
++) {
650 child_nid
= get_nid(pages
[idx
], i
, false);
654 err
= truncate_dnode(dn
);
657 set_nid(pages
[idx
], i
, 0, false);
660 if (offset
[idx
+ 1] == 0) {
661 dn
->node_page
= pages
[idx
];
665 f2fs_put_page(pages
[idx
], 1);
671 for (i
= idx
; i
>= 0; i
--)
672 f2fs_put_page(pages
[i
], 1);
674 trace_f2fs_truncate_partial_nodes(dn
->inode
, nid
, depth
, err
);
680 * All the block addresses of data and nodes should be nullified.
682 int truncate_inode_blocks(struct inode
*inode
, pgoff_t from
)
684 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
685 int err
= 0, cont
= 1;
686 int level
, offset
[4], noffset
[4];
687 unsigned int nofs
= 0;
688 struct f2fs_inode
*ri
;
689 struct dnode_of_data dn
;
692 trace_f2fs_truncate_inode_blocks_enter(inode
, from
);
694 level
= get_node_path(F2FS_I(inode
), from
, offset
, noffset
);
696 page
= get_node_page(sbi
, inode
->i_ino
);
698 trace_f2fs_truncate_inode_blocks_exit(inode
, PTR_ERR(page
));
699 return PTR_ERR(page
);
702 set_new_dnode(&dn
, inode
, page
, NULL
, 0);
705 ri
= F2FS_INODE(page
);
713 if (!offset
[level
- 1])
715 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
716 if (err
< 0 && err
!= -ENOENT
)
718 nofs
+= 1 + NIDS_PER_BLOCK
;
721 nofs
= 5 + 2 * NIDS_PER_BLOCK
;
722 if (!offset
[level
- 1])
724 err
= truncate_partial_nodes(&dn
, ri
, offset
, level
);
725 if (err
< 0 && err
!= -ENOENT
)
734 dn
.nid
= le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
736 case NODE_DIR1_BLOCK
:
737 case NODE_DIR2_BLOCK
:
738 err
= truncate_dnode(&dn
);
741 case NODE_IND1_BLOCK
:
742 case NODE_IND2_BLOCK
:
743 err
= truncate_nodes(&dn
, nofs
, offset
[1], 2);
746 case NODE_DIND_BLOCK
:
747 err
= truncate_nodes(&dn
, nofs
, offset
[1], 3);
754 if (err
< 0 && err
!= -ENOENT
)
756 if (offset
[1] == 0 &&
757 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]) {
759 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
760 f2fs_put_page(page
, 1);
763 f2fs_wait_on_page_writeback(page
, NODE
);
764 ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
] = 0;
765 set_page_dirty(page
);
773 f2fs_put_page(page
, 0);
774 trace_f2fs_truncate_inode_blocks_exit(inode
, err
);
775 return err
> 0 ? 0 : err
;
778 int truncate_xattr_node(struct inode
*inode
, struct page
*page
)
780 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
781 nid_t nid
= F2FS_I(inode
)->i_xattr_nid
;
782 struct dnode_of_data dn
;
788 npage
= get_node_page(sbi
, nid
);
790 return PTR_ERR(npage
);
792 F2FS_I(inode
)->i_xattr_nid
= 0;
794 /* need to do checkpoint during fsync */
795 F2FS_I(inode
)->xattr_ver
= cur_cp_version(F2FS_CKPT(sbi
));
797 set_new_dnode(&dn
, inode
, page
, npage
, nid
);
800 dn
.inode_page_locked
= true;
806 * Caller should grab and release a rwsem by calling f2fs_lock_op() and
809 void remove_inode_page(struct inode
*inode
)
811 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
813 nid_t ino
= inode
->i_ino
;
814 struct dnode_of_data dn
;
816 page
= get_node_page(sbi
, ino
);
820 if (truncate_xattr_node(inode
, page
)) {
821 f2fs_put_page(page
, 1);
824 /* 0 is possible, after f2fs_new_inode() is failed */
825 f2fs_bug_on(inode
->i_blocks
!= 0 && inode
->i_blocks
!= 1);
826 set_new_dnode(&dn
, inode
, page
, page
, ino
);
830 struct page
*new_inode_page(struct inode
*inode
, const struct qstr
*name
)
832 struct dnode_of_data dn
;
834 /* allocate inode page for new inode */
835 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
837 /* caller should f2fs_put_page(page, 1); */
838 return new_node_page(&dn
, 0, NULL
);
841 struct page
*new_node_page(struct dnode_of_data
*dn
,
842 unsigned int ofs
, struct page
*ipage
)
844 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
845 struct node_info old_ni
, new_ni
;
849 if (unlikely(is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
)))
850 return ERR_PTR(-EPERM
);
852 page
= grab_cache_page_write_begin(NODE_MAPPING(sbi
),
853 dn
->nid
, AOP_FLAG_NOFS
);
855 return ERR_PTR(-ENOMEM
);
857 if (unlikely(!inc_valid_node_count(sbi
, dn
->inode
))) {
862 get_node_info(sbi
, dn
->nid
, &old_ni
);
864 /* Reinitialize old_ni with new node page */
865 f2fs_bug_on(old_ni
.blk_addr
!= NULL_ADDR
);
867 new_ni
.ino
= dn
->inode
->i_ino
;
868 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
870 fill_node_footer(page
, dn
->nid
, dn
->inode
->i_ino
, ofs
, true);
871 set_cold_node(dn
->inode
, page
);
872 SetPageUptodate(page
);
873 set_page_dirty(page
);
875 if (f2fs_has_xattr_block(ofs
))
876 F2FS_I(dn
->inode
)->i_xattr_nid
= dn
->nid
;
878 dn
->node_page
= page
;
880 update_inode(dn
->inode
, ipage
);
884 inc_valid_inode_count(sbi
);
889 clear_node_page_dirty(page
);
890 f2fs_put_page(page
, 1);
895 * Caller should do after getting the following values.
896 * 0: f2fs_put_page(page, 0)
897 * LOCKED_PAGE: f2fs_put_page(page, 1)
900 static int read_node_page(struct page
*page
, int rw
)
902 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
905 get_node_info(sbi
, page
->index
, &ni
);
907 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
908 f2fs_put_page(page
, 1);
912 if (PageUptodate(page
))
915 return f2fs_submit_page_bio(sbi
, page
, ni
.blk_addr
, rw
);
919 * Readahead a node page
921 void ra_node_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
926 apage
= find_get_page(NODE_MAPPING(sbi
), nid
);
927 if (apage
&& PageUptodate(apage
)) {
928 f2fs_put_page(apage
, 0);
931 f2fs_put_page(apage
, 0);
933 apage
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
937 err
= read_node_page(apage
, READA
);
939 f2fs_put_page(apage
, 0);
940 else if (err
== LOCKED_PAGE
)
941 f2fs_put_page(apage
, 1);
944 struct page
*get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
)
949 page
= grab_cache_page_write_begin(NODE_MAPPING(sbi
),
952 return ERR_PTR(-ENOMEM
);
954 err
= read_node_page(page
, READ_SYNC
);
957 else if (err
== LOCKED_PAGE
)
961 if (unlikely(!PageUptodate(page
))) {
962 f2fs_put_page(page
, 1);
963 return ERR_PTR(-EIO
);
965 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
966 f2fs_put_page(page
, 1);
970 f2fs_bug_on(nid
!= nid_of_node(page
));
971 mark_page_accessed(page
);
976 * Return a locked page for the desired node page.
977 * And, readahead MAX_RA_NODE number of node pages.
979 struct page
*get_node_page_ra(struct page
*parent
, int start
)
981 struct f2fs_sb_info
*sbi
= F2FS_SB(parent
->mapping
->host
->i_sb
);
982 struct blk_plug plug
;
987 /* First, try getting the desired direct node. */
988 nid
= get_nid(parent
, start
, false);
990 return ERR_PTR(-ENOENT
);
992 page
= grab_cache_page(NODE_MAPPING(sbi
), nid
);
994 return ERR_PTR(-ENOMEM
);
996 err
= read_node_page(page
, READ_SYNC
);
999 else if (err
== LOCKED_PAGE
)
1002 blk_start_plug(&plug
);
1004 /* Then, try readahead for siblings of the desired node */
1005 end
= start
+ MAX_RA_NODE
;
1006 end
= min(end
, NIDS_PER_BLOCK
);
1007 for (i
= start
+ 1; i
< end
; i
++) {
1008 nid
= get_nid(parent
, i
, false);
1011 ra_node_page(sbi
, nid
);
1014 blk_finish_plug(&plug
);
1017 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1018 f2fs_put_page(page
, 1);
1022 if (unlikely(!PageUptodate(page
))) {
1023 f2fs_put_page(page
, 1);
1024 return ERR_PTR(-EIO
);
1026 mark_page_accessed(page
);
1030 void sync_inode_page(struct dnode_of_data
*dn
)
1032 if (IS_INODE(dn
->node_page
) || dn
->inode_page
== dn
->node_page
) {
1033 update_inode(dn
->inode
, dn
->node_page
);
1034 } else if (dn
->inode_page
) {
1035 if (!dn
->inode_page_locked
)
1036 lock_page(dn
->inode_page
);
1037 update_inode(dn
->inode
, dn
->inode_page
);
1038 if (!dn
->inode_page_locked
)
1039 unlock_page(dn
->inode_page
);
1041 update_inode_page(dn
->inode
);
1045 int sync_node_pages(struct f2fs_sb_info
*sbi
, nid_t ino
,
1046 struct writeback_control
*wbc
)
1049 struct pagevec pvec
;
1050 int step
= ino
? 2 : 0;
1051 int nwritten
= 0, wrote
= 0;
1053 pagevec_init(&pvec
, 0);
1059 while (index
<= end
) {
1061 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1062 PAGECACHE_TAG_DIRTY
,
1063 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1067 for (i
= 0; i
< nr_pages
; i
++) {
1068 struct page
*page
= pvec
.pages
[i
];
1071 * flushing sequence with step:
1076 if (step
== 0 && IS_DNODE(page
))
1078 if (step
== 1 && (!IS_DNODE(page
) ||
1079 is_cold_node(page
)))
1081 if (step
== 2 && (!IS_DNODE(page
) ||
1082 !is_cold_node(page
)))
1087 * we should not skip writing node pages.
1089 if (ino
&& ino_of_node(page
) == ino
)
1091 else if (!trylock_page(page
))
1094 if (unlikely(page
->mapping
!= NODE_MAPPING(sbi
))) {
1099 if (ino
&& ino_of_node(page
) != ino
)
1100 goto continue_unlock
;
1102 if (!PageDirty(page
)) {
1103 /* someone wrote it for us */
1104 goto continue_unlock
;
1107 if (!clear_page_dirty_for_io(page
))
1108 goto continue_unlock
;
1110 /* called by fsync() */
1111 if (ino
&& IS_DNODE(page
)) {
1112 int mark
= !is_checkpointed_node(sbi
, ino
);
1113 set_fsync_mark(page
, 1);
1115 set_dentry_mark(page
, mark
);
1118 set_fsync_mark(page
, 0);
1119 set_dentry_mark(page
, 0);
1121 NODE_MAPPING(sbi
)->a_ops
->writepage(page
, wbc
);
1124 if (--wbc
->nr_to_write
== 0)
1127 pagevec_release(&pvec
);
1130 if (wbc
->nr_to_write
== 0) {
1142 f2fs_submit_merged_bio(sbi
, NODE
, WRITE
);
1146 int wait_on_node_pages_writeback(struct f2fs_sb_info
*sbi
, nid_t ino
)
1148 pgoff_t index
= 0, end
= LONG_MAX
;
1149 struct pagevec pvec
;
1150 int ret2
= 0, ret
= 0;
1152 pagevec_init(&pvec
, 0);
1154 while (index
<= end
) {
1156 nr_pages
= pagevec_lookup_tag(&pvec
, NODE_MAPPING(sbi
), &index
,
1157 PAGECACHE_TAG_WRITEBACK
,
1158 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1162 for (i
= 0; i
< nr_pages
; i
++) {
1163 struct page
*page
= pvec
.pages
[i
];
1165 /* until radix tree lookup accepts end_index */
1166 if (unlikely(page
->index
> end
))
1169 if (ino
&& ino_of_node(page
) == ino
) {
1170 f2fs_wait_on_page_writeback(page
, NODE
);
1171 if (TestClearPageError(page
))
1175 pagevec_release(&pvec
);
1179 if (unlikely(test_and_clear_bit(AS_ENOSPC
, &NODE_MAPPING(sbi
)->flags
)))
1181 if (unlikely(test_and_clear_bit(AS_EIO
, &NODE_MAPPING(sbi
)->flags
)))
1188 static int f2fs_write_node_page(struct page
*page
,
1189 struct writeback_control
*wbc
)
1191 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
1194 struct node_info ni
;
1195 struct f2fs_io_info fio
= {
1197 .rw
= (wbc
->sync_mode
== WB_SYNC_ALL
) ? WRITE_SYNC
: WRITE
,
1200 if (unlikely(sbi
->por_doing
))
1203 f2fs_wait_on_page_writeback(page
, NODE
);
1205 /* get old block addr of this node page */
1206 nid
= nid_of_node(page
);
1207 f2fs_bug_on(page
->index
!= nid
);
1209 get_node_info(sbi
, nid
, &ni
);
1211 /* This page is already truncated */
1212 if (unlikely(ni
.blk_addr
== NULL_ADDR
)) {
1213 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1218 if (wbc
->for_reclaim
)
1221 mutex_lock(&sbi
->node_write
);
1222 set_page_writeback(page
);
1223 write_node_page(sbi
, page
, &fio
, nid
, ni
.blk_addr
, &new_addr
);
1224 set_node_addr(sbi
, &ni
, new_addr
, is_fsync_dnode(page
));
1225 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1226 mutex_unlock(&sbi
->node_write
);
1231 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1232 wbc
->pages_skipped
++;
1233 account_page_redirty(page
);
1234 set_page_dirty(page
);
1235 return AOP_WRITEPAGE_ACTIVATE
;
1238 static int f2fs_write_node_pages(struct address_space
*mapping
,
1239 struct writeback_control
*wbc
)
1241 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
1244 /* balancing f2fs's metadata in background */
1245 f2fs_balance_fs_bg(sbi
);
1247 /* collect a number of dirty node pages and write together */
1248 if (get_pages(sbi
, F2FS_DIRTY_NODES
) < nr_pages_to_skip(sbi
, NODE
))
1251 diff
= nr_pages_to_write(sbi
, NODE
, wbc
);
1252 wbc
->sync_mode
= WB_SYNC_NONE
;
1253 sync_node_pages(sbi
, 0, wbc
);
1254 wbc
->nr_to_write
= max((long)0, wbc
->nr_to_write
- diff
);
1258 wbc
->pages_skipped
+= get_pages(sbi
, F2FS_DIRTY_NODES
);
1262 static int f2fs_set_node_page_dirty(struct page
*page
)
1264 struct address_space
*mapping
= page
->mapping
;
1265 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
1267 trace_f2fs_set_page_dirty(page
, NODE
);
1269 SetPageUptodate(page
);
1270 if (!PageDirty(page
)) {
1271 __set_page_dirty_nobuffers(page
);
1272 inc_page_count(sbi
, F2FS_DIRTY_NODES
);
1273 SetPagePrivate(page
);
1279 static void f2fs_invalidate_node_page(struct page
*page
, unsigned int offset
,
1280 unsigned int length
)
1282 struct inode
*inode
= page
->mapping
->host
;
1283 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
1284 if (PageDirty(page
))
1285 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1286 ClearPagePrivate(page
);
1289 static int f2fs_release_node_page(struct page
*page
, gfp_t wait
)
1291 ClearPagePrivate(page
);
1296 * Structure of the f2fs node operations
1298 const struct address_space_operations f2fs_node_aops
= {
1299 .writepage
= f2fs_write_node_page
,
1300 .writepages
= f2fs_write_node_pages
,
1301 .set_page_dirty
= f2fs_set_node_page_dirty
,
1302 .invalidatepage
= f2fs_invalidate_node_page
,
1303 .releasepage
= f2fs_release_node_page
,
1306 static struct free_nid
*__lookup_free_nid_list(struct f2fs_nm_info
*nm_i
,
1309 return radix_tree_lookup(&nm_i
->free_nid_root
, n
);
1312 static void __del_from_free_nid_list(struct f2fs_nm_info
*nm_i
,
1316 radix_tree_delete(&nm_i
->free_nid_root
, i
->nid
);
1317 kmem_cache_free(free_nid_slab
, i
);
1320 static int add_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
, bool build
)
1323 struct nat_entry
*ne
;
1324 bool allocated
= false;
1326 if (!available_free_memory(nm_i
, FREE_NIDS
))
1329 /* 0 nid should not be used */
1330 if (unlikely(nid
== 0))
1334 /* do not add allocated nids */
1335 read_lock(&nm_i
->nat_tree_lock
);
1336 ne
= __lookup_nat_cache(nm_i
, nid
);
1338 (!ne
->checkpointed
|| nat_get_blkaddr(ne
) != NULL_ADDR
))
1340 read_unlock(&nm_i
->nat_tree_lock
);
1345 i
= f2fs_kmem_cache_alloc(free_nid_slab
, GFP_NOFS
);
1349 spin_lock(&nm_i
->free_nid_list_lock
);
1350 if (radix_tree_insert(&nm_i
->free_nid_root
, i
->nid
, i
)) {
1351 spin_unlock(&nm_i
->free_nid_list_lock
);
1352 kmem_cache_free(free_nid_slab
, i
);
1355 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
1357 spin_unlock(&nm_i
->free_nid_list_lock
);
1361 static void remove_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
)
1364 spin_lock(&nm_i
->free_nid_list_lock
);
1365 i
= __lookup_free_nid_list(nm_i
, nid
);
1366 if (i
&& i
->state
== NID_NEW
) {
1367 __del_from_free_nid_list(nm_i
, i
);
1370 spin_unlock(&nm_i
->free_nid_list_lock
);
1373 static void scan_nat_page(struct f2fs_nm_info
*nm_i
,
1374 struct page
*nat_page
, nid_t start_nid
)
1376 struct f2fs_nat_block
*nat_blk
= page_address(nat_page
);
1380 i
= start_nid
% NAT_ENTRY_PER_BLOCK
;
1382 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++, start_nid
++) {
1384 if (unlikely(start_nid
>= nm_i
->max_nid
))
1387 blk_addr
= le32_to_cpu(nat_blk
->entries
[i
].block_addr
);
1388 f2fs_bug_on(blk_addr
== NEW_ADDR
);
1389 if (blk_addr
== NULL_ADDR
) {
1390 if (add_free_nid(nm_i
, start_nid
, true) < 0)
1396 static void build_free_nids(struct f2fs_sb_info
*sbi
)
1398 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1399 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1400 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1402 nid_t nid
= nm_i
->next_scan_nid
;
1404 /* Enough entries */
1405 if (nm_i
->fcnt
> NAT_ENTRY_PER_BLOCK
)
1408 /* readahead nat pages to be scanned */
1409 ra_meta_pages(sbi
, NAT_BLOCK_OFFSET(nid
), FREE_NID_PAGES
, META_NAT
);
1412 struct page
*page
= get_current_nat_page(sbi
, nid
);
1414 scan_nat_page(nm_i
, page
, nid
);
1415 f2fs_put_page(page
, 1);
1417 nid
+= (NAT_ENTRY_PER_BLOCK
- (nid
% NAT_ENTRY_PER_BLOCK
));
1418 if (unlikely(nid
>= nm_i
->max_nid
))
1421 if (i
++ == FREE_NID_PAGES
)
1425 /* go to the next free nat pages to find free nids abundantly */
1426 nm_i
->next_scan_nid
= nid
;
1428 /* find free nids from current sum_pages */
1429 mutex_lock(&curseg
->curseg_mutex
);
1430 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1431 block_t addr
= le32_to_cpu(nat_in_journal(sum
, i
).block_addr
);
1432 nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1433 if (addr
== NULL_ADDR
)
1434 add_free_nid(nm_i
, nid
, true);
1436 remove_free_nid(nm_i
, nid
);
1438 mutex_unlock(&curseg
->curseg_mutex
);
1442 * If this function returns success, caller can obtain a new nid
1443 * from second parameter of this function.
1444 * The returned nid could be used ino as well as nid when inode is created.
1446 bool alloc_nid(struct f2fs_sb_info
*sbi
, nid_t
*nid
)
1448 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1449 struct free_nid
*i
= NULL
;
1450 struct list_head
*this;
1452 if (unlikely(sbi
->total_valid_node_count
+ 1 >= nm_i
->max_nid
))
1455 spin_lock(&nm_i
->free_nid_list_lock
);
1457 /* We should not use stale free nids created by build_free_nids */
1458 if (nm_i
->fcnt
&& !on_build_free_nids(nm_i
)) {
1459 f2fs_bug_on(list_empty(&nm_i
->free_nid_list
));
1460 list_for_each(this, &nm_i
->free_nid_list
) {
1461 i
= list_entry(this, struct free_nid
, list
);
1462 if (i
->state
== NID_NEW
)
1466 f2fs_bug_on(i
->state
!= NID_NEW
);
1468 i
->state
= NID_ALLOC
;
1470 spin_unlock(&nm_i
->free_nid_list_lock
);
1473 spin_unlock(&nm_i
->free_nid_list_lock
);
1475 /* Let's scan nat pages and its caches to get free nids */
1476 mutex_lock(&nm_i
->build_lock
);
1477 build_free_nids(sbi
);
1478 mutex_unlock(&nm_i
->build_lock
);
1483 * alloc_nid() should be called prior to this function.
1485 void alloc_nid_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
1487 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1490 spin_lock(&nm_i
->free_nid_list_lock
);
1491 i
= __lookup_free_nid_list(nm_i
, nid
);
1492 f2fs_bug_on(!i
|| i
->state
!= NID_ALLOC
);
1493 __del_from_free_nid_list(nm_i
, i
);
1494 spin_unlock(&nm_i
->free_nid_list_lock
);
1498 * alloc_nid() should be called prior to this function.
1500 void alloc_nid_failed(struct f2fs_sb_info
*sbi
, nid_t nid
)
1502 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1508 spin_lock(&nm_i
->free_nid_list_lock
);
1509 i
= __lookup_free_nid_list(nm_i
, nid
);
1510 f2fs_bug_on(!i
|| i
->state
!= NID_ALLOC
);
1511 if (!available_free_memory(nm_i
, FREE_NIDS
)) {
1512 __del_from_free_nid_list(nm_i
, i
);
1517 spin_unlock(&nm_i
->free_nid_list_lock
);
1520 void recover_node_page(struct f2fs_sb_info
*sbi
, struct page
*page
,
1521 struct f2fs_summary
*sum
, struct node_info
*ni
,
1522 block_t new_blkaddr
)
1524 rewrite_node_page(sbi
, page
, sum
, ni
->blk_addr
, new_blkaddr
);
1525 set_node_addr(sbi
, ni
, new_blkaddr
, false);
1526 clear_node_page_dirty(page
);
1529 void recover_inline_xattr(struct inode
*inode
, struct page
*page
)
1531 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
1532 void *src_addr
, *dst_addr
;
1535 struct f2fs_inode
*ri
;
1537 if (!f2fs_has_inline_xattr(inode
))
1540 if (!IS_INODE(page
))
1543 ri
= F2FS_INODE(page
);
1544 if (!(ri
->i_inline
& F2FS_INLINE_XATTR
))
1547 ipage
= get_node_page(sbi
, inode
->i_ino
);
1548 f2fs_bug_on(IS_ERR(ipage
));
1550 dst_addr
= inline_xattr_addr(ipage
);
1551 src_addr
= inline_xattr_addr(page
);
1552 inline_size
= inline_xattr_size(inode
);
1554 memcpy(dst_addr
, src_addr
, inline_size
);
1556 update_inode(inode
, ipage
);
1557 f2fs_put_page(ipage
, 1);
1560 bool recover_xattr_data(struct inode
*inode
, struct page
*page
, block_t blkaddr
)
1562 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
1563 nid_t prev_xnid
= F2FS_I(inode
)->i_xattr_nid
;
1564 nid_t new_xnid
= nid_of_node(page
);
1565 struct node_info ni
;
1567 recover_inline_xattr(inode
, page
);
1569 if (!f2fs_has_xattr_block(ofs_of_node(page
)))
1572 /* 1: invalidate the previous xattr nid */
1576 /* Deallocate node address */
1577 get_node_info(sbi
, prev_xnid
, &ni
);
1578 f2fs_bug_on(ni
.blk_addr
== NULL_ADDR
);
1579 invalidate_blocks(sbi
, ni
.blk_addr
);
1580 dec_valid_node_count(sbi
, inode
);
1581 set_node_addr(sbi
, &ni
, NULL_ADDR
, false);
1584 /* 2: allocate new xattr nid */
1585 if (unlikely(!inc_valid_node_count(sbi
, inode
)))
1588 remove_free_nid(NM_I(sbi
), new_xnid
);
1589 get_node_info(sbi
, new_xnid
, &ni
);
1590 ni
.ino
= inode
->i_ino
;
1591 set_node_addr(sbi
, &ni
, NEW_ADDR
, false);
1592 F2FS_I(inode
)->i_xattr_nid
= new_xnid
;
1594 /* 3: update xattr blkaddr */
1595 refresh_sit_entry(sbi
, NEW_ADDR
, blkaddr
);
1596 set_node_addr(sbi
, &ni
, blkaddr
, false);
1598 update_inode_page(inode
);
1602 int recover_inode_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
1604 struct f2fs_inode
*src
, *dst
;
1605 nid_t ino
= ino_of_node(page
);
1606 struct node_info old_ni
, new_ni
;
1609 ipage
= grab_cache_page(NODE_MAPPING(sbi
), ino
);
1613 /* Should not use this inode from free nid list */
1614 remove_free_nid(NM_I(sbi
), ino
);
1616 get_node_info(sbi
, ino
, &old_ni
);
1617 SetPageUptodate(ipage
);
1618 fill_node_footer(ipage
, ino
, ino
, 0, true);
1620 src
= F2FS_INODE(page
);
1621 dst
= F2FS_INODE(ipage
);
1623 memcpy(dst
, src
, (unsigned long)&src
->i_ext
- (unsigned long)src
);
1625 dst
->i_blocks
= cpu_to_le64(1);
1626 dst
->i_links
= cpu_to_le32(1);
1627 dst
->i_xattr_nid
= 0;
1632 if (unlikely(!inc_valid_node_count(sbi
, NULL
)))
1634 set_node_addr(sbi
, &new_ni
, NEW_ADDR
, false);
1635 inc_valid_inode_count(sbi
);
1636 f2fs_put_page(ipage
, 1);
1641 * ra_sum_pages() merge contiguous pages into one bio and submit.
1642 * these pre-readed pages are linked in pages list.
1644 static int ra_sum_pages(struct f2fs_sb_info
*sbi
, struct list_head
*pages
,
1645 int start
, int nrpages
)
1648 int page_idx
= start
;
1649 struct f2fs_io_info fio
= {
1651 .rw
= READ_SYNC
| REQ_META
| REQ_PRIO
1654 for (; page_idx
< start
+ nrpages
; page_idx
++) {
1655 /* alloc temporal page for read node summary info*/
1656 page
= alloc_page(GFP_F2FS_ZERO
);
1661 page
->index
= page_idx
;
1662 list_add_tail(&page
->lru
, pages
);
1665 list_for_each_entry(page
, pages
, lru
)
1666 f2fs_submit_page_mbio(sbi
, page
, page
->index
, &fio
);
1668 f2fs_submit_merged_bio(sbi
, META
, READ
);
1670 return page_idx
- start
;
1673 int restore_node_summary(struct f2fs_sb_info
*sbi
,
1674 unsigned int segno
, struct f2fs_summary_block
*sum
)
1676 struct f2fs_node
*rn
;
1677 struct f2fs_summary
*sum_entry
;
1678 struct page
*page
, *tmp
;
1680 int bio_blocks
= MAX_BIO_BLOCKS(max_hw_blocks(sbi
));
1681 int i
, last_offset
, nrpages
, err
= 0;
1682 LIST_HEAD(page_list
);
1684 /* scan the node segment */
1685 last_offset
= sbi
->blocks_per_seg
;
1686 addr
= START_BLOCK(sbi
, segno
);
1687 sum_entry
= &sum
->entries
[0];
1689 for (i
= 0; !err
&& i
< last_offset
; i
+= nrpages
, addr
+= nrpages
) {
1690 nrpages
= min(last_offset
- i
, bio_blocks
);
1692 /* read ahead node pages */
1693 nrpages
= ra_sum_pages(sbi
, &page_list
, addr
, nrpages
);
1697 list_for_each_entry_safe(page
, tmp
, &page_list
, lru
) {
1702 if (unlikely(!PageUptodate(page
))) {
1705 rn
= F2FS_NODE(page
);
1706 sum_entry
->nid
= rn
->footer
.nid
;
1707 sum_entry
->version
= 0;
1708 sum_entry
->ofs_in_node
= 0;
1713 list_del(&page
->lru
);
1714 __free_pages(page
, 0);
1720 static bool flush_nats_in_journal(struct f2fs_sb_info
*sbi
)
1722 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1723 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1724 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1727 mutex_lock(&curseg
->curseg_mutex
);
1729 if (nats_in_cursum(sum
) < NAT_JOURNAL_ENTRIES
) {
1730 mutex_unlock(&curseg
->curseg_mutex
);
1734 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1735 struct nat_entry
*ne
;
1736 struct f2fs_nat_entry raw_ne
;
1737 nid_t nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1739 raw_ne
= nat_in_journal(sum
, i
);
1741 write_lock(&nm_i
->nat_tree_lock
);
1742 ne
= __lookup_nat_cache(nm_i
, nid
);
1744 __set_nat_cache_dirty(nm_i
, ne
);
1745 write_unlock(&nm_i
->nat_tree_lock
);
1748 ne
= grab_nat_entry(nm_i
, nid
);
1750 write_unlock(&nm_i
->nat_tree_lock
);
1753 nat_set_blkaddr(ne
, le32_to_cpu(raw_ne
.block_addr
));
1754 nat_set_ino(ne
, le32_to_cpu(raw_ne
.ino
));
1755 nat_set_version(ne
, raw_ne
.version
);
1756 __set_nat_cache_dirty(nm_i
, ne
);
1757 write_unlock(&nm_i
->nat_tree_lock
);
1759 update_nats_in_cursum(sum
, -i
);
1760 mutex_unlock(&curseg
->curseg_mutex
);
1765 * This function is called during the checkpointing process.
1767 void flush_nat_entries(struct f2fs_sb_info
*sbi
)
1769 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1770 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1771 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1772 struct list_head
*cur
, *n
;
1773 struct page
*page
= NULL
;
1774 struct f2fs_nat_block
*nat_blk
= NULL
;
1775 nid_t start_nid
= 0, end_nid
= 0;
1778 flushed
= flush_nats_in_journal(sbi
);
1781 mutex_lock(&curseg
->curseg_mutex
);
1783 /* 1) flush dirty nat caches */
1784 list_for_each_safe(cur
, n
, &nm_i
->dirty_nat_entries
) {
1785 struct nat_entry
*ne
;
1787 struct f2fs_nat_entry raw_ne
;
1789 block_t new_blkaddr
;
1791 ne
= list_entry(cur
, struct nat_entry
, list
);
1792 nid
= nat_get_nid(ne
);
1794 if (nat_get_blkaddr(ne
) == NEW_ADDR
)
1799 /* if there is room for nat enries in curseg->sumpage */
1800 offset
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 1);
1802 raw_ne
= nat_in_journal(sum
, offset
);
1806 if (!page
|| (start_nid
> nid
|| nid
> end_nid
)) {
1808 f2fs_put_page(page
, 1);
1811 start_nid
= START_NID(nid
);
1812 end_nid
= start_nid
+ NAT_ENTRY_PER_BLOCK
- 1;
1815 * get nat block with dirty flag, increased reference
1816 * count, mapped and lock
1818 page
= get_next_nat_page(sbi
, start_nid
);
1819 nat_blk
= page_address(page
);
1822 f2fs_bug_on(!nat_blk
);
1823 raw_ne
= nat_blk
->entries
[nid
- start_nid
];
1825 new_blkaddr
= nat_get_blkaddr(ne
);
1827 raw_ne
.ino
= cpu_to_le32(nat_get_ino(ne
));
1828 raw_ne
.block_addr
= cpu_to_le32(new_blkaddr
);
1829 raw_ne
.version
= nat_get_version(ne
);
1832 nat_blk
->entries
[nid
- start_nid
] = raw_ne
;
1834 nat_in_journal(sum
, offset
) = raw_ne
;
1835 nid_in_journal(sum
, offset
) = cpu_to_le32(nid
);
1838 if (nat_get_blkaddr(ne
) == NULL_ADDR
&&
1839 add_free_nid(NM_I(sbi
), nid
, false) <= 0) {
1840 write_lock(&nm_i
->nat_tree_lock
);
1841 __del_from_nat_cache(nm_i
, ne
);
1842 write_unlock(&nm_i
->nat_tree_lock
);
1844 write_lock(&nm_i
->nat_tree_lock
);
1845 __clear_nat_cache_dirty(nm_i
, ne
);
1846 write_unlock(&nm_i
->nat_tree_lock
);
1850 mutex_unlock(&curseg
->curseg_mutex
);
1851 f2fs_put_page(page
, 1);
1854 static int init_node_manager(struct f2fs_sb_info
*sbi
)
1856 struct f2fs_super_block
*sb_raw
= F2FS_RAW_SUPER(sbi
);
1857 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1858 unsigned char *version_bitmap
;
1859 unsigned int nat_segs
, nat_blocks
;
1861 nm_i
->nat_blkaddr
= le32_to_cpu(sb_raw
->nat_blkaddr
);
1863 /* segment_count_nat includes pair segment so divide to 2. */
1864 nat_segs
= le32_to_cpu(sb_raw
->segment_count_nat
) >> 1;
1865 nat_blocks
= nat_segs
<< le32_to_cpu(sb_raw
->log_blocks_per_seg
);
1867 /* not used nids: 0, node, meta, (and root counted as valid node) */
1868 nm_i
->max_nid
= NAT_ENTRY_PER_BLOCK
* nat_blocks
- 3;
1871 nm_i
->ram_thresh
= DEF_RAM_THRESHOLD
;
1873 INIT_RADIX_TREE(&nm_i
->free_nid_root
, GFP_ATOMIC
);
1874 INIT_LIST_HEAD(&nm_i
->free_nid_list
);
1875 INIT_RADIX_TREE(&nm_i
->nat_root
, GFP_ATOMIC
);
1876 INIT_LIST_HEAD(&nm_i
->nat_entries
);
1877 INIT_LIST_HEAD(&nm_i
->dirty_nat_entries
);
1879 mutex_init(&nm_i
->build_lock
);
1880 spin_lock_init(&nm_i
->free_nid_list_lock
);
1881 rwlock_init(&nm_i
->nat_tree_lock
);
1883 nm_i
->next_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
1884 nm_i
->bitmap_size
= __bitmap_size(sbi
, NAT_BITMAP
);
1885 version_bitmap
= __bitmap_ptr(sbi
, NAT_BITMAP
);
1886 if (!version_bitmap
)
1889 nm_i
->nat_bitmap
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
1891 if (!nm_i
->nat_bitmap
)
1896 int build_node_manager(struct f2fs_sb_info
*sbi
)
1900 sbi
->nm_info
= kzalloc(sizeof(struct f2fs_nm_info
), GFP_KERNEL
);
1904 err
= init_node_manager(sbi
);
1908 build_free_nids(sbi
);
1912 void destroy_node_manager(struct f2fs_sb_info
*sbi
)
1914 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1915 struct free_nid
*i
, *next_i
;
1916 struct nat_entry
*natvec
[NATVEC_SIZE
];
1923 /* destroy free nid list */
1924 spin_lock(&nm_i
->free_nid_list_lock
);
1925 list_for_each_entry_safe(i
, next_i
, &nm_i
->free_nid_list
, list
) {
1926 f2fs_bug_on(i
->state
== NID_ALLOC
);
1927 __del_from_free_nid_list(nm_i
, i
);
1930 f2fs_bug_on(nm_i
->fcnt
);
1931 spin_unlock(&nm_i
->free_nid_list_lock
);
1933 /* destroy nat cache */
1934 write_lock(&nm_i
->nat_tree_lock
);
1935 while ((found
= __gang_lookup_nat_cache(nm_i
,
1936 nid
, NATVEC_SIZE
, natvec
))) {
1938 nid
= nat_get_nid(natvec
[found
- 1]) + 1;
1939 for (idx
= 0; idx
< found
; idx
++)
1940 __del_from_nat_cache(nm_i
, natvec
[idx
]);
1942 f2fs_bug_on(nm_i
->nat_cnt
);
1943 write_unlock(&nm_i
->nat_tree_lock
);
1945 kfree(nm_i
->nat_bitmap
);
1946 sbi
->nm_info
= NULL
;
1950 int __init
create_node_manager_caches(void)
1952 nat_entry_slab
= f2fs_kmem_cache_create("nat_entry",
1953 sizeof(struct nat_entry
));
1954 if (!nat_entry_slab
)
1957 free_nid_slab
= f2fs_kmem_cache_create("free_nid",
1958 sizeof(struct free_nid
));
1959 if (!free_nid_slab
) {
1960 kmem_cache_destroy(nat_entry_slab
);
1966 void destroy_node_manager_caches(void)
1968 kmem_cache_destroy(free_nid_slab
);
1969 kmem_cache_destroy(nat_entry_slab
);