4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/mpage.h>
14 #include <linux/backing-dev.h>
15 #include <linux/blkdev.h>
16 #include <linux/pagevec.h>
17 #include <linux/swap.h>
22 #include <trace/events/f2fs.h>
24 static struct kmem_cache
*nat_entry_slab
;
25 static struct kmem_cache
*free_nid_slab
;
27 static void clear_node_page_dirty(struct page
*page
)
29 struct address_space
*mapping
= page
->mapping
;
30 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
31 unsigned int long flags
;
33 if (PageDirty(page
)) {
34 spin_lock_irqsave(&mapping
->tree_lock
, flags
);
35 radix_tree_tag_clear(&mapping
->page_tree
,
38 spin_unlock_irqrestore(&mapping
->tree_lock
, flags
);
40 clear_page_dirty_for_io(page
);
41 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
43 ClearPageUptodate(page
);
46 static struct page
*get_current_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
48 pgoff_t index
= current_nat_addr(sbi
, nid
);
49 return get_meta_page(sbi
, index
);
52 static struct page
*get_next_nat_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
54 struct page
*src_page
;
55 struct page
*dst_page
;
60 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
62 src_off
= current_nat_addr(sbi
, nid
);
63 dst_off
= next_nat_addr(sbi
, src_off
);
65 /* get current nat block page with lock */
66 src_page
= get_meta_page(sbi
, src_off
);
68 /* Dirty src_page means that it is already the new target NAT page. */
69 if (PageDirty(src_page
))
72 dst_page
= grab_meta_page(sbi
, dst_off
);
74 src_addr
= page_address(src_page
);
75 dst_addr
= page_address(dst_page
);
76 memcpy(dst_addr
, src_addr
, PAGE_CACHE_SIZE
);
77 set_page_dirty(dst_page
);
78 f2fs_put_page(src_page
, 1);
80 set_to_next_nat(nm_i
, nid
);
88 static void ra_nat_pages(struct f2fs_sb_info
*sbi
, int nid
)
90 struct address_space
*mapping
= sbi
->meta_inode
->i_mapping
;
91 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
97 blk_start_plug(&plug
);
99 for (i
= 0; i
< FREE_NID_PAGES
; i
++, nid
+= NAT_ENTRY_PER_BLOCK
) {
100 if (nid
>= nm_i
->max_nid
)
102 index
= current_nat_addr(sbi
, nid
);
104 page
= grab_cache_page(mapping
, index
);
107 if (PageUptodate(page
)) {
108 f2fs_put_page(page
, 1);
111 if (f2fs_readpage(sbi
, page
, index
, READ
))
114 f2fs_put_page(page
, 0);
116 blk_finish_plug(&plug
);
119 static struct nat_entry
*__lookup_nat_cache(struct f2fs_nm_info
*nm_i
, nid_t n
)
121 return radix_tree_lookup(&nm_i
->nat_root
, n
);
124 static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info
*nm_i
,
125 nid_t start
, unsigned int nr
, struct nat_entry
**ep
)
127 return radix_tree_gang_lookup(&nm_i
->nat_root
, (void **)ep
, start
, nr
);
130 static void __del_from_nat_cache(struct f2fs_nm_info
*nm_i
, struct nat_entry
*e
)
133 radix_tree_delete(&nm_i
->nat_root
, nat_get_nid(e
));
135 kmem_cache_free(nat_entry_slab
, e
);
138 int is_checkpointed_node(struct f2fs_sb_info
*sbi
, nid_t nid
)
140 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
144 read_lock(&nm_i
->nat_tree_lock
);
145 e
= __lookup_nat_cache(nm_i
, nid
);
146 if (e
&& !e
->checkpointed
)
148 read_unlock(&nm_i
->nat_tree_lock
);
152 static struct nat_entry
*grab_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
)
154 struct nat_entry
*new;
156 new = kmem_cache_alloc(nat_entry_slab
, GFP_ATOMIC
);
159 if (radix_tree_insert(&nm_i
->nat_root
, nid
, new)) {
160 kmem_cache_free(nat_entry_slab
, new);
163 memset(new, 0, sizeof(struct nat_entry
));
164 nat_set_nid(new, nid
);
165 list_add_tail(&new->list
, &nm_i
->nat_entries
);
170 static void cache_nat_entry(struct f2fs_nm_info
*nm_i
, nid_t nid
,
171 struct f2fs_nat_entry
*ne
)
175 write_lock(&nm_i
->nat_tree_lock
);
176 e
= __lookup_nat_cache(nm_i
, nid
);
178 e
= grab_nat_entry(nm_i
, nid
);
180 write_unlock(&nm_i
->nat_tree_lock
);
183 nat_set_blkaddr(e
, le32_to_cpu(ne
->block_addr
));
184 nat_set_ino(e
, le32_to_cpu(ne
->ino
));
185 nat_set_version(e
, ne
->version
);
186 e
->checkpointed
= true;
188 write_unlock(&nm_i
->nat_tree_lock
);
191 static void set_node_addr(struct f2fs_sb_info
*sbi
, struct node_info
*ni
,
194 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
197 write_lock(&nm_i
->nat_tree_lock
);
198 e
= __lookup_nat_cache(nm_i
, ni
->nid
);
200 e
= grab_nat_entry(nm_i
, ni
->nid
);
202 write_unlock(&nm_i
->nat_tree_lock
);
206 e
->checkpointed
= true;
207 BUG_ON(ni
->blk_addr
== NEW_ADDR
);
208 } else if (new_blkaddr
== NEW_ADDR
) {
210 * when nid is reallocated,
211 * previous nat entry can be remained in nat cache.
212 * So, reinitialize it with new information.
215 BUG_ON(ni
->blk_addr
!= NULL_ADDR
);
218 if (new_blkaddr
== NEW_ADDR
)
219 e
->checkpointed
= false;
222 BUG_ON(nat_get_blkaddr(e
) != ni
->blk_addr
);
223 BUG_ON(nat_get_blkaddr(e
) == NULL_ADDR
&&
224 new_blkaddr
== NULL_ADDR
);
225 BUG_ON(nat_get_blkaddr(e
) == NEW_ADDR
&&
226 new_blkaddr
== NEW_ADDR
);
227 BUG_ON(nat_get_blkaddr(e
) != NEW_ADDR
&&
228 nat_get_blkaddr(e
) != NULL_ADDR
&&
229 new_blkaddr
== NEW_ADDR
);
231 /* increament version no as node is removed */
232 if (nat_get_blkaddr(e
) != NEW_ADDR
&& new_blkaddr
== NULL_ADDR
) {
233 unsigned char version
= nat_get_version(e
);
234 nat_set_version(e
, inc_node_version(version
));
238 nat_set_blkaddr(e
, new_blkaddr
);
239 __set_nat_cache_dirty(nm_i
, e
);
240 write_unlock(&nm_i
->nat_tree_lock
);
243 static int try_to_free_nats(struct f2fs_sb_info
*sbi
, int nr_shrink
)
245 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
247 if (nm_i
->nat_cnt
<= NM_WOUT_THRESHOLD
)
250 write_lock(&nm_i
->nat_tree_lock
);
251 while (nr_shrink
&& !list_empty(&nm_i
->nat_entries
)) {
252 struct nat_entry
*ne
;
253 ne
= list_first_entry(&nm_i
->nat_entries
,
254 struct nat_entry
, list
);
255 __del_from_nat_cache(nm_i
, ne
);
258 write_unlock(&nm_i
->nat_tree_lock
);
263 * This function returns always success
265 void get_node_info(struct f2fs_sb_info
*sbi
, nid_t nid
, struct node_info
*ni
)
267 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
268 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
269 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
270 nid_t start_nid
= START_NID(nid
);
271 struct f2fs_nat_block
*nat_blk
;
272 struct page
*page
= NULL
;
273 struct f2fs_nat_entry ne
;
277 memset(&ne
, 0, sizeof(struct f2fs_nat_entry
));
280 /* Check nat cache */
281 read_lock(&nm_i
->nat_tree_lock
);
282 e
= __lookup_nat_cache(nm_i
, nid
);
284 ni
->ino
= nat_get_ino(e
);
285 ni
->blk_addr
= nat_get_blkaddr(e
);
286 ni
->version
= nat_get_version(e
);
288 read_unlock(&nm_i
->nat_tree_lock
);
292 /* Check current segment summary */
293 mutex_lock(&curseg
->curseg_mutex
);
294 i
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 0);
296 ne
= nat_in_journal(sum
, i
);
297 node_info_from_raw_nat(ni
, &ne
);
299 mutex_unlock(&curseg
->curseg_mutex
);
303 /* Fill node_info from nat page */
304 page
= get_current_nat_page(sbi
, start_nid
);
305 nat_blk
= (struct f2fs_nat_block
*)page_address(page
);
306 ne
= nat_blk
->entries
[nid
- start_nid
];
307 node_info_from_raw_nat(ni
, &ne
);
308 f2fs_put_page(page
, 1);
310 /* cache nat entry */
311 cache_nat_entry(NM_I(sbi
), nid
, &ne
);
315 * The maximum depth is four.
316 * Offset[0] will have raw inode offset.
318 static int get_node_path(long block
, int offset
[4], unsigned int noffset
[4])
320 const long direct_index
= ADDRS_PER_INODE
;
321 const long direct_blks
= ADDRS_PER_BLOCK
;
322 const long dptrs_per_blk
= NIDS_PER_BLOCK
;
323 const long indirect_blks
= ADDRS_PER_BLOCK
* NIDS_PER_BLOCK
;
324 const long dindirect_blks
= indirect_blks
* NIDS_PER_BLOCK
;
330 if (block
< direct_index
) {
334 block
-= direct_index
;
335 if (block
< direct_blks
) {
336 offset
[n
++] = NODE_DIR1_BLOCK
;
342 block
-= direct_blks
;
343 if (block
< direct_blks
) {
344 offset
[n
++] = NODE_DIR2_BLOCK
;
350 block
-= direct_blks
;
351 if (block
< indirect_blks
) {
352 offset
[n
++] = NODE_IND1_BLOCK
;
354 offset
[n
++] = block
/ direct_blks
;
355 noffset
[n
] = 4 + offset
[n
- 1];
356 offset
[n
] = block
% direct_blks
;
360 block
-= indirect_blks
;
361 if (block
< indirect_blks
) {
362 offset
[n
++] = NODE_IND2_BLOCK
;
363 noffset
[n
] = 4 + dptrs_per_blk
;
364 offset
[n
++] = block
/ direct_blks
;
365 noffset
[n
] = 5 + dptrs_per_blk
+ offset
[n
- 1];
366 offset
[n
] = block
% direct_blks
;
370 block
-= indirect_blks
;
371 if (block
< dindirect_blks
) {
372 offset
[n
++] = NODE_DIND_BLOCK
;
373 noffset
[n
] = 5 + (dptrs_per_blk
* 2);
374 offset
[n
++] = block
/ indirect_blks
;
375 noffset
[n
] = 6 + (dptrs_per_blk
* 2) +
376 offset
[n
- 1] * (dptrs_per_blk
+ 1);
377 offset
[n
++] = (block
/ direct_blks
) % dptrs_per_blk
;
378 noffset
[n
] = 7 + (dptrs_per_blk
* 2) +
379 offset
[n
- 2] * (dptrs_per_blk
+ 1) +
381 offset
[n
] = block
% direct_blks
;
392 * Caller should call f2fs_put_dnode(dn).
393 * Also, it should grab and release a mutex by calling mutex_lock_op() and
394 * mutex_unlock_op() only if ro is not set RDONLY_NODE.
395 * In the case of RDONLY_NODE, we don't need to care about mutex.
397 int get_dnode_of_data(struct dnode_of_data
*dn
, pgoff_t index
, int mode
)
399 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
400 struct page
*npage
[4];
403 unsigned int noffset
[4];
408 level
= get_node_path(index
, offset
, noffset
);
410 nids
[0] = dn
->inode
->i_ino
;
411 npage
[0] = get_node_page(sbi
, nids
[0]);
412 if (IS_ERR(npage
[0]))
413 return PTR_ERR(npage
[0]);
417 nids
[1] = get_nid(parent
, offset
[0], true);
418 dn
->inode_page
= npage
[0];
419 dn
->inode_page_locked
= true;
421 /* get indirect or direct nodes */
422 for (i
= 1; i
<= level
; i
++) {
425 if (!nids
[i
] && mode
== ALLOC_NODE
) {
427 if (!alloc_nid(sbi
, &(nids
[i
]))) {
433 npage
[i
] = new_node_page(dn
, noffset
[i
]);
434 if (IS_ERR(npage
[i
])) {
435 alloc_nid_failed(sbi
, nids
[i
]);
436 err
= PTR_ERR(npage
[i
]);
440 set_nid(parent
, offset
[i
- 1], nids
[i
], i
== 1);
441 alloc_nid_done(sbi
, nids
[i
]);
443 } else if (mode
== LOOKUP_NODE_RA
&& i
== level
&& level
> 1) {
444 npage
[i
] = get_node_page_ra(parent
, offset
[i
- 1]);
445 if (IS_ERR(npage
[i
])) {
446 err
= PTR_ERR(npage
[i
]);
452 dn
->inode_page_locked
= false;
455 f2fs_put_page(parent
, 1);
459 npage
[i
] = get_node_page(sbi
, nids
[i
]);
460 if (IS_ERR(npage
[i
])) {
461 err
= PTR_ERR(npage
[i
]);
462 f2fs_put_page(npage
[0], 0);
468 nids
[i
+ 1] = get_nid(parent
, offset
[i
], false);
471 dn
->nid
= nids
[level
];
472 dn
->ofs_in_node
= offset
[level
];
473 dn
->node_page
= npage
[level
];
474 dn
->data_blkaddr
= datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
478 f2fs_put_page(parent
, 1);
480 f2fs_put_page(npage
[0], 0);
482 dn
->inode_page
= NULL
;
483 dn
->node_page
= NULL
;
487 static void truncate_node(struct dnode_of_data
*dn
)
489 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
492 get_node_info(sbi
, dn
->nid
, &ni
);
493 if (dn
->inode
->i_blocks
== 0) {
494 BUG_ON(ni
.blk_addr
!= NULL_ADDR
);
497 BUG_ON(ni
.blk_addr
== NULL_ADDR
);
499 /* Deallocate node address */
500 invalidate_blocks(sbi
, ni
.blk_addr
);
501 dec_valid_node_count(sbi
, dn
->inode
, 1);
502 set_node_addr(sbi
, &ni
, NULL_ADDR
);
504 if (dn
->nid
== dn
->inode
->i_ino
) {
505 remove_orphan_inode(sbi
, dn
->nid
);
506 dec_valid_inode_count(sbi
);
511 clear_node_page_dirty(dn
->node_page
);
512 F2FS_SET_SB_DIRT(sbi
);
514 f2fs_put_page(dn
->node_page
, 1);
515 dn
->node_page
= NULL
;
516 trace_f2fs_truncate_node(dn
->inode
, dn
->nid
, ni
.blk_addr
);
519 static int truncate_dnode(struct dnode_of_data
*dn
)
521 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
527 /* get direct node */
528 page
= get_node_page(sbi
, dn
->nid
);
529 if (IS_ERR(page
) && PTR_ERR(page
) == -ENOENT
)
531 else if (IS_ERR(page
))
532 return PTR_ERR(page
);
534 /* Make dnode_of_data for parameter */
535 dn
->node_page
= page
;
537 truncate_data_blocks(dn
);
542 static int truncate_nodes(struct dnode_of_data
*dn
, unsigned int nofs
,
545 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
546 struct dnode_of_data rdn
= *dn
;
548 struct f2fs_node
*rn
;
550 unsigned int child_nofs
;
555 return NIDS_PER_BLOCK
+ 1;
557 trace_f2fs_truncate_nodes_enter(dn
->inode
, dn
->nid
, dn
->data_blkaddr
);
559 page
= get_node_page(sbi
, dn
->nid
);
561 trace_f2fs_truncate_nodes_exit(dn
->inode
, PTR_ERR(page
));
562 return PTR_ERR(page
);
565 rn
= (struct f2fs_node
*)page_address(page
);
567 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++, freed
++) {
568 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
572 ret
= truncate_dnode(&rdn
);
575 set_nid(page
, i
, 0, false);
578 child_nofs
= nofs
+ ofs
* (NIDS_PER_BLOCK
+ 1) + 1;
579 for (i
= ofs
; i
< NIDS_PER_BLOCK
; i
++) {
580 child_nid
= le32_to_cpu(rn
->in
.nid
[i
]);
581 if (child_nid
== 0) {
582 child_nofs
+= NIDS_PER_BLOCK
+ 1;
586 ret
= truncate_nodes(&rdn
, child_nofs
, 0, depth
- 1);
587 if (ret
== (NIDS_PER_BLOCK
+ 1)) {
588 set_nid(page
, i
, 0, false);
590 } else if (ret
< 0 && ret
!= -ENOENT
) {
598 /* remove current indirect node */
599 dn
->node_page
= page
;
603 f2fs_put_page(page
, 1);
605 trace_f2fs_truncate_nodes_exit(dn
->inode
, freed
);
609 f2fs_put_page(page
, 1);
610 trace_f2fs_truncate_nodes_exit(dn
->inode
, ret
);
614 static int truncate_partial_nodes(struct dnode_of_data
*dn
,
615 struct f2fs_inode
*ri
, int *offset
, int depth
)
617 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
618 struct page
*pages
[2];
625 nid
[0] = le32_to_cpu(ri
->i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
629 /* get indirect nodes in the path */
630 for (i
= 0; i
< depth
- 1; i
++) {
631 /* refernece count'll be increased */
632 pages
[i
] = get_node_page(sbi
, nid
[i
]);
633 if (IS_ERR(pages
[i
])) {
635 err
= PTR_ERR(pages
[i
]);
638 nid
[i
+ 1] = get_nid(pages
[i
], offset
[i
+ 1], false);
641 /* free direct nodes linked to a partial indirect node */
642 for (i
= offset
[depth
- 1]; i
< NIDS_PER_BLOCK
; i
++) {
643 child_nid
= get_nid(pages
[idx
], i
, false);
647 err
= truncate_dnode(dn
);
650 set_nid(pages
[idx
], i
, 0, false);
653 if (offset
[depth
- 1] == 0) {
654 dn
->node_page
= pages
[idx
];
658 f2fs_put_page(pages
[idx
], 1);
661 offset
[depth
- 1] = 0;
663 for (i
= depth
- 3; i
>= 0; i
--)
664 f2fs_put_page(pages
[i
], 1);
666 trace_f2fs_truncate_partial_nodes(dn
->inode
, nid
, depth
, err
);
672 * All the block addresses of data and nodes should be nullified.
674 int truncate_inode_blocks(struct inode
*inode
, pgoff_t from
)
676 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
677 struct address_space
*node_mapping
= sbi
->node_inode
->i_mapping
;
678 int err
= 0, cont
= 1;
679 int level
, offset
[4], noffset
[4];
680 unsigned int nofs
= 0;
681 struct f2fs_node
*rn
;
682 struct dnode_of_data dn
;
685 trace_f2fs_truncate_inode_blocks_enter(inode
, from
);
687 level
= get_node_path(from
, offset
, noffset
);
689 page
= get_node_page(sbi
, inode
->i_ino
);
691 trace_f2fs_truncate_inode_blocks_exit(inode
, PTR_ERR(page
));
692 return PTR_ERR(page
);
695 set_new_dnode(&dn
, inode
, page
, NULL
, 0);
698 rn
= page_address(page
);
706 if (!offset
[level
- 1])
708 err
= truncate_partial_nodes(&dn
, &rn
->i
, offset
, level
);
709 if (err
< 0 && err
!= -ENOENT
)
711 nofs
+= 1 + NIDS_PER_BLOCK
;
714 nofs
= 5 + 2 * NIDS_PER_BLOCK
;
715 if (!offset
[level
- 1])
717 err
= truncate_partial_nodes(&dn
, &rn
->i
, offset
, level
);
718 if (err
< 0 && err
!= -ENOENT
)
727 dn
.nid
= le32_to_cpu(rn
->i
.i_nid
[offset
[0] - NODE_DIR1_BLOCK
]);
729 case NODE_DIR1_BLOCK
:
730 case NODE_DIR2_BLOCK
:
731 err
= truncate_dnode(&dn
);
734 case NODE_IND1_BLOCK
:
735 case NODE_IND2_BLOCK
:
736 err
= truncate_nodes(&dn
, nofs
, offset
[1], 2);
739 case NODE_DIND_BLOCK
:
740 err
= truncate_nodes(&dn
, nofs
, offset
[1], 3);
747 if (err
< 0 && err
!= -ENOENT
)
749 if (offset
[1] == 0 &&
750 rn
->i
.i_nid
[offset
[0] - NODE_DIR1_BLOCK
]) {
752 if (page
->mapping
!= node_mapping
) {
753 f2fs_put_page(page
, 1);
756 wait_on_page_writeback(page
);
757 rn
->i
.i_nid
[offset
[0] - NODE_DIR1_BLOCK
] = 0;
758 set_page_dirty(page
);
766 f2fs_put_page(page
, 0);
767 trace_f2fs_truncate_inode_blocks_exit(inode
, err
);
768 return err
> 0 ? 0 : err
;
772 * Caller should grab and release a mutex by calling mutex_lock_op() and
775 int remove_inode_page(struct inode
*inode
)
777 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
779 nid_t ino
= inode
->i_ino
;
780 struct dnode_of_data dn
;
782 page
= get_node_page(sbi
, ino
);
784 return PTR_ERR(page
);
786 if (F2FS_I(inode
)->i_xattr_nid
) {
787 nid_t nid
= F2FS_I(inode
)->i_xattr_nid
;
788 struct page
*npage
= get_node_page(sbi
, nid
);
791 return PTR_ERR(npage
);
793 F2FS_I(inode
)->i_xattr_nid
= 0;
794 set_new_dnode(&dn
, inode
, page
, npage
, nid
);
795 dn
.inode_page_locked
= 1;
799 /* 0 is possible, after f2fs_new_inode() is failed */
800 BUG_ON(inode
->i_blocks
!= 0 && inode
->i_blocks
!= 1);
801 set_new_dnode(&dn
, inode
, page
, page
, ino
);
806 int new_inode_page(struct inode
*inode
, const struct qstr
*name
)
809 struct dnode_of_data dn
;
811 /* allocate inode page for new inode */
812 set_new_dnode(&dn
, inode
, NULL
, NULL
, inode
->i_ino
);
813 page
= new_node_page(&dn
, 0);
814 init_dent_inode(name
, page
);
816 return PTR_ERR(page
);
817 f2fs_put_page(page
, 1);
821 struct page
*new_node_page(struct dnode_of_data
*dn
, unsigned int ofs
)
823 struct f2fs_sb_info
*sbi
= F2FS_SB(dn
->inode
->i_sb
);
824 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
825 struct node_info old_ni
, new_ni
;
829 if (is_inode_flag_set(F2FS_I(dn
->inode
), FI_NO_ALLOC
))
830 return ERR_PTR(-EPERM
);
832 page
= grab_cache_page(mapping
, dn
->nid
);
834 return ERR_PTR(-ENOMEM
);
836 get_node_info(sbi
, dn
->nid
, &old_ni
);
838 SetPageUptodate(page
);
839 fill_node_footer(page
, dn
->nid
, dn
->inode
->i_ino
, ofs
, true);
841 /* Reinitialize old_ni with new node page */
842 BUG_ON(old_ni
.blk_addr
!= NULL_ADDR
);
844 new_ni
.ino
= dn
->inode
->i_ino
;
846 if (!inc_valid_node_count(sbi
, dn
->inode
, 1)) {
850 set_node_addr(sbi
, &new_ni
, NEW_ADDR
);
851 set_cold_node(dn
->inode
, page
);
853 dn
->node_page
= page
;
855 set_page_dirty(page
);
857 inc_valid_inode_count(sbi
);
862 clear_node_page_dirty(page
);
863 f2fs_put_page(page
, 1);
868 * Caller should do after getting the following values.
869 * 0: f2fs_put_page(page, 0)
870 * LOCKED_PAGE: f2fs_put_page(page, 1)
873 static int read_node_page(struct page
*page
, int type
)
875 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
878 get_node_info(sbi
, page
->index
, &ni
);
880 if (ni
.blk_addr
== NULL_ADDR
) {
881 f2fs_put_page(page
, 1);
885 if (PageUptodate(page
))
888 return f2fs_readpage(sbi
, page
, ni
.blk_addr
, type
);
892 * Readahead a node page
894 void ra_node_page(struct f2fs_sb_info
*sbi
, nid_t nid
)
896 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
900 apage
= find_get_page(mapping
, nid
);
901 if (apage
&& PageUptodate(apage
)) {
902 f2fs_put_page(apage
, 0);
905 f2fs_put_page(apage
, 0);
907 apage
= grab_cache_page(mapping
, nid
);
911 err
= read_node_page(apage
, READA
);
913 f2fs_put_page(apage
, 0);
914 else if (err
== LOCKED_PAGE
)
915 f2fs_put_page(apage
, 1);
919 struct page
*get_node_page(struct f2fs_sb_info
*sbi
, pgoff_t nid
)
921 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
925 page
= grab_cache_page(mapping
, nid
);
927 return ERR_PTR(-ENOMEM
);
929 err
= read_node_page(page
, READ_SYNC
);
932 else if (err
== LOCKED_PAGE
)
936 if (!PageUptodate(page
)) {
937 f2fs_put_page(page
, 1);
938 return ERR_PTR(-EIO
);
940 if (page
->mapping
!= mapping
) {
941 f2fs_put_page(page
, 1);
945 BUG_ON(nid
!= nid_of_node(page
));
946 mark_page_accessed(page
);
951 * Return a locked page for the desired node page.
952 * And, readahead MAX_RA_NODE number of node pages.
954 struct page
*get_node_page_ra(struct page
*parent
, int start
)
956 struct f2fs_sb_info
*sbi
= F2FS_SB(parent
->mapping
->host
->i_sb
);
957 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
958 struct blk_plug plug
;
963 /* First, try getting the desired direct node. */
964 nid
= get_nid(parent
, start
, false);
966 return ERR_PTR(-ENOENT
);
968 page
= grab_cache_page(mapping
, nid
);
970 return ERR_PTR(-ENOMEM
);
972 err
= read_node_page(page
, READ_SYNC
);
975 else if (err
== LOCKED_PAGE
)
978 blk_start_plug(&plug
);
980 /* Then, try readahead for siblings of the desired node */
981 end
= start
+ MAX_RA_NODE
;
982 end
= min(end
, NIDS_PER_BLOCK
);
983 for (i
= start
+ 1; i
< end
; i
++) {
984 nid
= get_nid(parent
, i
, false);
987 ra_node_page(sbi
, nid
);
990 blk_finish_plug(&plug
);
993 if (page
->mapping
!= mapping
) {
994 f2fs_put_page(page
, 1);
998 if (!PageUptodate(page
)) {
999 f2fs_put_page(page
, 1);
1000 return ERR_PTR(-EIO
);
1002 mark_page_accessed(page
);
1006 void sync_inode_page(struct dnode_of_data
*dn
)
1008 if (IS_INODE(dn
->node_page
) || dn
->inode_page
== dn
->node_page
) {
1009 update_inode(dn
->inode
, dn
->node_page
);
1010 } else if (dn
->inode_page
) {
1011 if (!dn
->inode_page_locked
)
1012 lock_page(dn
->inode_page
);
1013 update_inode(dn
->inode
, dn
->inode_page
);
1014 if (!dn
->inode_page_locked
)
1015 unlock_page(dn
->inode_page
);
1017 update_inode_page(dn
->inode
);
1021 int sync_node_pages(struct f2fs_sb_info
*sbi
, nid_t ino
,
1022 struct writeback_control
*wbc
)
1024 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
1026 struct pagevec pvec
;
1027 int step
= ino
? 2 : 0;
1028 int nwritten
= 0, wrote
= 0;
1030 pagevec_init(&pvec
, 0);
1036 while (index
<= end
) {
1038 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &index
,
1039 PAGECACHE_TAG_DIRTY
,
1040 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
-1) + 1);
1044 for (i
= 0; i
< nr_pages
; i
++) {
1045 struct page
*page
= pvec
.pages
[i
];
1048 * flushing sequence with step:
1053 if (step
== 0 && IS_DNODE(page
))
1055 if (step
== 1 && (!IS_DNODE(page
) ||
1056 is_cold_node(page
)))
1058 if (step
== 2 && (!IS_DNODE(page
) ||
1059 !is_cold_node(page
)))
1064 * we should not skip writing node pages.
1066 if (ino
&& ino_of_node(page
) == ino
)
1068 else if (!trylock_page(page
))
1071 if (unlikely(page
->mapping
!= mapping
)) {
1076 if (ino
&& ino_of_node(page
) != ino
)
1077 goto continue_unlock
;
1079 if (!PageDirty(page
)) {
1080 /* someone wrote it for us */
1081 goto continue_unlock
;
1084 if (!clear_page_dirty_for_io(page
))
1085 goto continue_unlock
;
1087 /* called by fsync() */
1088 if (ino
&& IS_DNODE(page
)) {
1089 int mark
= !is_checkpointed_node(sbi
, ino
);
1090 set_fsync_mark(page
, 1);
1092 set_dentry_mark(page
, mark
);
1095 set_fsync_mark(page
, 0);
1096 set_dentry_mark(page
, 0);
1098 mapping
->a_ops
->writepage(page
, wbc
);
1101 if (--wbc
->nr_to_write
== 0)
1104 pagevec_release(&pvec
);
1107 if (wbc
->nr_to_write
== 0) {
1119 f2fs_submit_bio(sbi
, NODE
, wbc
->sync_mode
== WB_SYNC_ALL
);
1124 static int f2fs_write_node_page(struct page
*page
,
1125 struct writeback_control
*wbc
)
1127 struct f2fs_sb_info
*sbi
= F2FS_SB(page
->mapping
->host
->i_sb
);
1130 struct node_info ni
;
1132 wait_on_page_writeback(page
);
1134 /* get old block addr of this node page */
1135 nid
= nid_of_node(page
);
1136 BUG_ON(page
->index
!= nid
);
1138 get_node_info(sbi
, nid
, &ni
);
1140 /* This page is already truncated */
1141 if (ni
.blk_addr
== NULL_ADDR
) {
1142 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1147 if (wbc
->for_reclaim
) {
1148 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1149 wbc
->pages_skipped
++;
1150 set_page_dirty(page
);
1151 return AOP_WRITEPAGE_ACTIVATE
;
1154 mutex_lock(&sbi
->node_write
);
1155 set_page_writeback(page
);
1156 write_node_page(sbi
, page
, nid
, ni
.blk_addr
, &new_addr
);
1157 set_node_addr(sbi
, &ni
, new_addr
);
1158 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1159 mutex_unlock(&sbi
->node_write
);
1165 * It is very important to gather dirty pages and write at once, so that we can
1166 * submit a big bio without interfering other data writes.
1167 * Be default, 512 pages (2MB), a segment size, is quite reasonable.
1169 #define COLLECT_DIRTY_NODES 512
1170 static int f2fs_write_node_pages(struct address_space
*mapping
,
1171 struct writeback_control
*wbc
)
1173 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
1174 long nr_to_write
= wbc
->nr_to_write
;
1176 /* First check balancing cached NAT entries */
1177 if (try_to_free_nats(sbi
, NAT_ENTRY_PER_BLOCK
)) {
1178 f2fs_sync_fs(sbi
->sb
, true);
1182 /* collect a number of dirty node pages and write together */
1183 if (get_pages(sbi
, F2FS_DIRTY_NODES
) < COLLECT_DIRTY_NODES
)
1186 /* if mounting is failed, skip writing node pages */
1187 wbc
->nr_to_write
= max_hw_blocks(sbi
);
1188 sync_node_pages(sbi
, 0, wbc
);
1189 wbc
->nr_to_write
= nr_to_write
- (max_hw_blocks(sbi
) - wbc
->nr_to_write
);
1193 static int f2fs_set_node_page_dirty(struct page
*page
)
1195 struct address_space
*mapping
= page
->mapping
;
1196 struct f2fs_sb_info
*sbi
= F2FS_SB(mapping
->host
->i_sb
);
1198 SetPageUptodate(page
);
1199 if (!PageDirty(page
)) {
1200 __set_page_dirty_nobuffers(page
);
1201 inc_page_count(sbi
, F2FS_DIRTY_NODES
);
1202 SetPagePrivate(page
);
1208 static void f2fs_invalidate_node_page(struct page
*page
, unsigned long offset
)
1210 struct inode
*inode
= page
->mapping
->host
;
1211 struct f2fs_sb_info
*sbi
= F2FS_SB(inode
->i_sb
);
1212 if (PageDirty(page
))
1213 dec_page_count(sbi
, F2FS_DIRTY_NODES
);
1214 ClearPagePrivate(page
);
1217 static int f2fs_release_node_page(struct page
*page
, gfp_t wait
)
1219 ClearPagePrivate(page
);
1224 * Structure of the f2fs node operations
1226 const struct address_space_operations f2fs_node_aops
= {
1227 .writepage
= f2fs_write_node_page
,
1228 .writepages
= f2fs_write_node_pages
,
1229 .set_page_dirty
= f2fs_set_node_page_dirty
,
1230 .invalidatepage
= f2fs_invalidate_node_page
,
1231 .releasepage
= f2fs_release_node_page
,
1234 static struct free_nid
*__lookup_free_nid_list(nid_t n
, struct list_head
*head
)
1236 struct list_head
*this;
1238 list_for_each(this, head
) {
1239 i
= list_entry(this, struct free_nid
, list
);
1246 static void __del_from_free_nid_list(struct free_nid
*i
)
1249 kmem_cache_free(free_nid_slab
, i
);
1252 static int add_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
)
1256 if (nm_i
->fcnt
> 2 * MAX_FREE_NIDS
)
1259 /* 0 nid should not be used */
1263 i
= kmem_cache_alloc(free_nid_slab
, GFP_NOFS
);
1271 spin_lock(&nm_i
->free_nid_list_lock
);
1272 if (__lookup_free_nid_list(nid
, &nm_i
->free_nid_list
)) {
1273 spin_unlock(&nm_i
->free_nid_list_lock
);
1274 kmem_cache_free(free_nid_slab
, i
);
1277 list_add_tail(&i
->list
, &nm_i
->free_nid_list
);
1279 spin_unlock(&nm_i
->free_nid_list_lock
);
1283 static void remove_free_nid(struct f2fs_nm_info
*nm_i
, nid_t nid
)
1286 spin_lock(&nm_i
->free_nid_list_lock
);
1287 i
= __lookup_free_nid_list(nid
, &nm_i
->free_nid_list
);
1288 if (i
&& i
->state
== NID_NEW
) {
1289 __del_from_free_nid_list(i
);
1292 spin_unlock(&nm_i
->free_nid_list_lock
);
1295 static void scan_nat_page(struct f2fs_nm_info
*nm_i
,
1296 struct page
*nat_page
, nid_t start_nid
)
1298 struct f2fs_nat_block
*nat_blk
= page_address(nat_page
);
1302 i
= start_nid
% NAT_ENTRY_PER_BLOCK
;
1304 for (; i
< NAT_ENTRY_PER_BLOCK
; i
++, start_nid
++) {
1306 if (start_nid
>= nm_i
->max_nid
)
1309 blk_addr
= le32_to_cpu(nat_blk
->entries
[i
].block_addr
);
1310 BUG_ON(blk_addr
== NEW_ADDR
);
1311 if (blk_addr
== NULL_ADDR
) {
1312 if (add_free_nid(nm_i
, start_nid
) < 0)
1318 static void build_free_nids(struct f2fs_sb_info
*sbi
)
1320 struct free_nid
*fnid
, *next_fnid
;
1321 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1322 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1323 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1325 nid_t nid
= nm_i
->next_scan_nid
;
1327 /* Enough entries */
1328 if (nm_i
->fcnt
> NAT_ENTRY_PER_BLOCK
)
1331 /* readahead nat pages to be scanned */
1332 ra_nat_pages(sbi
, nid
);
1335 struct page
*page
= get_current_nat_page(sbi
, nid
);
1337 scan_nat_page(nm_i
, page
, nid
);
1338 f2fs_put_page(page
, 1);
1340 nid
+= (NAT_ENTRY_PER_BLOCK
- (nid
% NAT_ENTRY_PER_BLOCK
));
1341 if (nid
>= nm_i
->max_nid
)
1344 if (i
++ == FREE_NID_PAGES
)
1348 /* go to the next free nat pages to find free nids abundantly */
1349 nm_i
->next_scan_nid
= nid
;
1351 /* find free nids from current sum_pages */
1352 mutex_lock(&curseg
->curseg_mutex
);
1353 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1354 block_t addr
= le32_to_cpu(nat_in_journal(sum
, i
).block_addr
);
1355 nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1356 if (addr
== NULL_ADDR
)
1357 add_free_nid(nm_i
, nid
);
1359 remove_free_nid(nm_i
, nid
);
1361 mutex_unlock(&curseg
->curseg_mutex
);
1363 /* remove the free nids from current allocated nids */
1364 list_for_each_entry_safe(fnid
, next_fnid
, &nm_i
->free_nid_list
, list
) {
1365 struct nat_entry
*ne
;
1367 read_lock(&nm_i
->nat_tree_lock
);
1368 ne
= __lookup_nat_cache(nm_i
, fnid
->nid
);
1369 if (ne
&& nat_get_blkaddr(ne
) != NULL_ADDR
)
1370 remove_free_nid(nm_i
, fnid
->nid
);
1371 read_unlock(&nm_i
->nat_tree_lock
);
1376 * If this function returns success, caller can obtain a new nid
1377 * from second parameter of this function.
1378 * The returned nid could be used ino as well as nid when inode is created.
1380 bool alloc_nid(struct f2fs_sb_info
*sbi
, nid_t
*nid
)
1382 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1383 struct free_nid
*i
= NULL
;
1384 struct list_head
*this;
1386 if (sbi
->total_valid_node_count
+ 1 >= nm_i
->max_nid
)
1389 spin_lock(&nm_i
->free_nid_list_lock
);
1391 /* We should not use stale free nids created by build_free_nids */
1392 if (nm_i
->fcnt
&& !sbi
->on_build_free_nids
) {
1393 BUG_ON(list_empty(&nm_i
->free_nid_list
));
1394 list_for_each(this, &nm_i
->free_nid_list
) {
1395 i
= list_entry(this, struct free_nid
, list
);
1396 if (i
->state
== NID_NEW
)
1400 BUG_ON(i
->state
!= NID_NEW
);
1402 i
->state
= NID_ALLOC
;
1404 spin_unlock(&nm_i
->free_nid_list_lock
);
1407 spin_unlock(&nm_i
->free_nid_list_lock
);
1409 /* Let's scan nat pages and its caches to get free nids */
1410 mutex_lock(&nm_i
->build_lock
);
1411 sbi
->on_build_free_nids
= 1;
1412 build_free_nids(sbi
);
1413 sbi
->on_build_free_nids
= 0;
1414 mutex_unlock(&nm_i
->build_lock
);
1419 * alloc_nid() should be called prior to this function.
1421 void alloc_nid_done(struct f2fs_sb_info
*sbi
, nid_t nid
)
1423 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1426 spin_lock(&nm_i
->free_nid_list_lock
);
1427 i
= __lookup_free_nid_list(nid
, &nm_i
->free_nid_list
);
1428 BUG_ON(!i
|| i
->state
!= NID_ALLOC
);
1429 __del_from_free_nid_list(i
);
1430 spin_unlock(&nm_i
->free_nid_list_lock
);
1434 * alloc_nid() should be called prior to this function.
1436 void alloc_nid_failed(struct f2fs_sb_info
*sbi
, nid_t nid
)
1438 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1441 spin_lock(&nm_i
->free_nid_list_lock
);
1442 i
= __lookup_free_nid_list(nid
, &nm_i
->free_nid_list
);
1443 BUG_ON(!i
|| i
->state
!= NID_ALLOC
);
1444 if (nm_i
->fcnt
> 2 * MAX_FREE_NIDS
) {
1445 __del_from_free_nid_list(i
);
1450 spin_unlock(&nm_i
->free_nid_list_lock
);
1453 void recover_node_page(struct f2fs_sb_info
*sbi
, struct page
*page
,
1454 struct f2fs_summary
*sum
, struct node_info
*ni
,
1455 block_t new_blkaddr
)
1457 rewrite_node_page(sbi
, page
, sum
, ni
->blk_addr
, new_blkaddr
);
1458 set_node_addr(sbi
, ni
, new_blkaddr
);
1459 clear_node_page_dirty(page
);
1462 int recover_inode_page(struct f2fs_sb_info
*sbi
, struct page
*page
)
1464 struct address_space
*mapping
= sbi
->node_inode
->i_mapping
;
1465 struct f2fs_node
*src
, *dst
;
1466 nid_t ino
= ino_of_node(page
);
1467 struct node_info old_ni
, new_ni
;
1470 ipage
= grab_cache_page(mapping
, ino
);
1474 /* Should not use this inode from free nid list */
1475 remove_free_nid(NM_I(sbi
), ino
);
1477 get_node_info(sbi
, ino
, &old_ni
);
1478 SetPageUptodate(ipage
);
1479 fill_node_footer(ipage
, ino
, ino
, 0, true);
1481 src
= (struct f2fs_node
*)page_address(page
);
1482 dst
= (struct f2fs_node
*)page_address(ipage
);
1484 memcpy(dst
, src
, (unsigned long)&src
->i
.i_ext
- (unsigned long)&src
->i
);
1486 dst
->i
.i_blocks
= cpu_to_le64(1);
1487 dst
->i
.i_links
= cpu_to_le32(1);
1488 dst
->i
.i_xattr_nid
= 0;
1493 set_node_addr(sbi
, &new_ni
, NEW_ADDR
);
1494 inc_valid_inode_count(sbi
);
1496 f2fs_put_page(ipage
, 1);
1500 int restore_node_summary(struct f2fs_sb_info
*sbi
,
1501 unsigned int segno
, struct f2fs_summary_block
*sum
)
1503 struct f2fs_node
*rn
;
1504 struct f2fs_summary
*sum_entry
;
1509 /* alloc temporal page for read node */
1510 page
= alloc_page(GFP_NOFS
| __GFP_ZERO
);
1512 return PTR_ERR(page
);
1515 /* scan the node segment */
1516 last_offset
= sbi
->blocks_per_seg
;
1517 addr
= START_BLOCK(sbi
, segno
);
1518 sum_entry
= &sum
->entries
[0];
1520 for (i
= 0; i
< last_offset
; i
++, sum_entry
++) {
1522 * In order to read next node page,
1523 * we must clear PageUptodate flag.
1525 ClearPageUptodate(page
);
1527 if (f2fs_readpage(sbi
, page
, addr
, READ_SYNC
))
1531 rn
= (struct f2fs_node
*)page_address(page
);
1532 sum_entry
->nid
= rn
->footer
.nid
;
1533 sum_entry
->version
= 0;
1534 sum_entry
->ofs_in_node
= 0;
1539 __free_pages(page
, 0);
1543 static bool flush_nats_in_journal(struct f2fs_sb_info
*sbi
)
1545 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1546 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1547 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1550 mutex_lock(&curseg
->curseg_mutex
);
1552 if (nats_in_cursum(sum
) < NAT_JOURNAL_ENTRIES
) {
1553 mutex_unlock(&curseg
->curseg_mutex
);
1557 for (i
= 0; i
< nats_in_cursum(sum
); i
++) {
1558 struct nat_entry
*ne
;
1559 struct f2fs_nat_entry raw_ne
;
1560 nid_t nid
= le32_to_cpu(nid_in_journal(sum
, i
));
1562 raw_ne
= nat_in_journal(sum
, i
);
1564 write_lock(&nm_i
->nat_tree_lock
);
1565 ne
= __lookup_nat_cache(nm_i
, nid
);
1567 __set_nat_cache_dirty(nm_i
, ne
);
1568 write_unlock(&nm_i
->nat_tree_lock
);
1571 ne
= grab_nat_entry(nm_i
, nid
);
1573 write_unlock(&nm_i
->nat_tree_lock
);
1576 nat_set_blkaddr(ne
, le32_to_cpu(raw_ne
.block_addr
));
1577 nat_set_ino(ne
, le32_to_cpu(raw_ne
.ino
));
1578 nat_set_version(ne
, raw_ne
.version
);
1579 __set_nat_cache_dirty(nm_i
, ne
);
1580 write_unlock(&nm_i
->nat_tree_lock
);
1582 update_nats_in_cursum(sum
, -i
);
1583 mutex_unlock(&curseg
->curseg_mutex
);
1588 * This function is called during the checkpointing process.
1590 void flush_nat_entries(struct f2fs_sb_info
*sbi
)
1592 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1593 struct curseg_info
*curseg
= CURSEG_I(sbi
, CURSEG_HOT_DATA
);
1594 struct f2fs_summary_block
*sum
= curseg
->sum_blk
;
1595 struct list_head
*cur
, *n
;
1596 struct page
*page
= NULL
;
1597 struct f2fs_nat_block
*nat_blk
= NULL
;
1598 nid_t start_nid
= 0, end_nid
= 0;
1601 flushed
= flush_nats_in_journal(sbi
);
1604 mutex_lock(&curseg
->curseg_mutex
);
1606 /* 1) flush dirty nat caches */
1607 list_for_each_safe(cur
, n
, &nm_i
->dirty_nat_entries
) {
1608 struct nat_entry
*ne
;
1610 struct f2fs_nat_entry raw_ne
;
1612 block_t new_blkaddr
;
1614 ne
= list_entry(cur
, struct nat_entry
, list
);
1615 nid
= nat_get_nid(ne
);
1617 if (nat_get_blkaddr(ne
) == NEW_ADDR
)
1622 /* if there is room for nat enries in curseg->sumpage */
1623 offset
= lookup_journal_in_cursum(sum
, NAT_JOURNAL
, nid
, 1);
1625 raw_ne
= nat_in_journal(sum
, offset
);
1629 if (!page
|| (start_nid
> nid
|| nid
> end_nid
)) {
1631 f2fs_put_page(page
, 1);
1634 start_nid
= START_NID(nid
);
1635 end_nid
= start_nid
+ NAT_ENTRY_PER_BLOCK
- 1;
1638 * get nat block with dirty flag, increased reference
1639 * count, mapped and lock
1641 page
= get_next_nat_page(sbi
, start_nid
);
1642 nat_blk
= page_address(page
);
1646 raw_ne
= nat_blk
->entries
[nid
- start_nid
];
1648 new_blkaddr
= nat_get_blkaddr(ne
);
1650 raw_ne
.ino
= cpu_to_le32(nat_get_ino(ne
));
1651 raw_ne
.block_addr
= cpu_to_le32(new_blkaddr
);
1652 raw_ne
.version
= nat_get_version(ne
);
1655 nat_blk
->entries
[nid
- start_nid
] = raw_ne
;
1657 nat_in_journal(sum
, offset
) = raw_ne
;
1658 nid_in_journal(sum
, offset
) = cpu_to_le32(nid
);
1661 if (nat_get_blkaddr(ne
) == NULL_ADDR
&&
1662 add_free_nid(NM_I(sbi
), nid
) <= 0) {
1663 write_lock(&nm_i
->nat_tree_lock
);
1664 __del_from_nat_cache(nm_i
, ne
);
1665 write_unlock(&nm_i
->nat_tree_lock
);
1667 write_lock(&nm_i
->nat_tree_lock
);
1668 __clear_nat_cache_dirty(nm_i
, ne
);
1669 ne
->checkpointed
= true;
1670 write_unlock(&nm_i
->nat_tree_lock
);
1674 mutex_unlock(&curseg
->curseg_mutex
);
1675 f2fs_put_page(page
, 1);
1677 /* 2) shrink nat caches if necessary */
1678 try_to_free_nats(sbi
, nm_i
->nat_cnt
- NM_WOUT_THRESHOLD
);
1681 static int init_node_manager(struct f2fs_sb_info
*sbi
)
1683 struct f2fs_super_block
*sb_raw
= F2FS_RAW_SUPER(sbi
);
1684 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1685 unsigned char *version_bitmap
;
1686 unsigned int nat_segs
, nat_blocks
;
1688 nm_i
->nat_blkaddr
= le32_to_cpu(sb_raw
->nat_blkaddr
);
1690 /* segment_count_nat includes pair segment so divide to 2. */
1691 nat_segs
= le32_to_cpu(sb_raw
->segment_count_nat
) >> 1;
1692 nat_blocks
= nat_segs
<< le32_to_cpu(sb_raw
->log_blocks_per_seg
);
1693 nm_i
->max_nid
= NAT_ENTRY_PER_BLOCK
* nat_blocks
;
1697 INIT_LIST_HEAD(&nm_i
->free_nid_list
);
1698 INIT_RADIX_TREE(&nm_i
->nat_root
, GFP_ATOMIC
);
1699 INIT_LIST_HEAD(&nm_i
->nat_entries
);
1700 INIT_LIST_HEAD(&nm_i
->dirty_nat_entries
);
1702 mutex_init(&nm_i
->build_lock
);
1703 spin_lock_init(&nm_i
->free_nid_list_lock
);
1704 rwlock_init(&nm_i
->nat_tree_lock
);
1706 nm_i
->next_scan_nid
= le32_to_cpu(sbi
->ckpt
->next_free_nid
);
1707 nm_i
->bitmap_size
= __bitmap_size(sbi
, NAT_BITMAP
);
1708 version_bitmap
= __bitmap_ptr(sbi
, NAT_BITMAP
);
1709 if (!version_bitmap
)
1712 nm_i
->nat_bitmap
= kmemdup(version_bitmap
, nm_i
->bitmap_size
,
1714 if (!nm_i
->nat_bitmap
)
1719 int build_node_manager(struct f2fs_sb_info
*sbi
)
1723 sbi
->nm_info
= kzalloc(sizeof(struct f2fs_nm_info
), GFP_KERNEL
);
1727 err
= init_node_manager(sbi
);
1731 build_free_nids(sbi
);
1735 void destroy_node_manager(struct f2fs_sb_info
*sbi
)
1737 struct f2fs_nm_info
*nm_i
= NM_I(sbi
);
1738 struct free_nid
*i
, *next_i
;
1739 struct nat_entry
*natvec
[NATVEC_SIZE
];
1746 /* destroy free nid list */
1747 spin_lock(&nm_i
->free_nid_list_lock
);
1748 list_for_each_entry_safe(i
, next_i
, &nm_i
->free_nid_list
, list
) {
1749 BUG_ON(i
->state
== NID_ALLOC
);
1750 __del_from_free_nid_list(i
);
1754 spin_unlock(&nm_i
->free_nid_list_lock
);
1756 /* destroy nat cache */
1757 write_lock(&nm_i
->nat_tree_lock
);
1758 while ((found
= __gang_lookup_nat_cache(nm_i
,
1759 nid
, NATVEC_SIZE
, natvec
))) {
1761 for (idx
= 0; idx
< found
; idx
++) {
1762 struct nat_entry
*e
= natvec
[idx
];
1763 nid
= nat_get_nid(e
) + 1;
1764 __del_from_nat_cache(nm_i
, e
);
1767 BUG_ON(nm_i
->nat_cnt
);
1768 write_unlock(&nm_i
->nat_tree_lock
);
1770 kfree(nm_i
->nat_bitmap
);
1771 sbi
->nm_info
= NULL
;
1775 int __init
create_node_manager_caches(void)
1777 nat_entry_slab
= f2fs_kmem_cache_create("nat_entry",
1778 sizeof(struct nat_entry
), NULL
);
1779 if (!nat_entry_slab
)
1782 free_nid_slab
= f2fs_kmem_cache_create("free_nid",
1783 sizeof(struct free_nid
), NULL
);
1784 if (!free_nid_slab
) {
1785 kmem_cache_destroy(nat_entry_slab
);
1791 void destroy_node_manager_caches(void)
1793 kmem_cache_destroy(free_nid_slab
);
1794 kmem_cache_destroy(nat_entry_slab
);