]>
Commit | Line | Data |
---|---|---|
0a8165d7 | 1 | /* |
e05df3b1 JK |
2 | * fs/f2fs/node.c |
3 | * | |
4 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
5 | * http://www.samsung.com/ | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <linux/fs.h> | |
12 | #include <linux/f2fs_fs.h> | |
13 | #include <linux/mpage.h> | |
14 | #include <linux/backing-dev.h> | |
15 | #include <linux/blkdev.h> | |
16 | #include <linux/pagevec.h> | |
17 | #include <linux/swap.h> | |
18 | ||
19 | #include "f2fs.h" | |
20 | #include "node.h" | |
21 | #include "segment.h" | |
51dd6249 | 22 | #include <trace/events/f2fs.h> |
e05df3b1 JK |
23 | |
24 | static struct kmem_cache *nat_entry_slab; | |
25 | static struct kmem_cache *free_nid_slab; | |
26 | ||
27 | static void clear_node_page_dirty(struct page *page) | |
28 | { | |
29 | struct address_space *mapping = page->mapping; | |
30 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); | |
31 | unsigned int long flags; | |
32 | ||
33 | if (PageDirty(page)) { | |
34 | spin_lock_irqsave(&mapping->tree_lock, flags); | |
35 | radix_tree_tag_clear(&mapping->page_tree, | |
36 | page_index(page), | |
37 | PAGECACHE_TAG_DIRTY); | |
38 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | |
39 | ||
40 | clear_page_dirty_for_io(page); | |
41 | dec_page_count(sbi, F2FS_DIRTY_NODES); | |
42 | } | |
43 | ClearPageUptodate(page); | |
44 | } | |
45 | ||
46 | static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid) | |
47 | { | |
48 | pgoff_t index = current_nat_addr(sbi, nid); | |
49 | return get_meta_page(sbi, index); | |
50 | } | |
51 | ||
52 | static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) | |
53 | { | |
54 | struct page *src_page; | |
55 | struct page *dst_page; | |
56 | pgoff_t src_off; | |
57 | pgoff_t dst_off; | |
58 | void *src_addr; | |
59 | void *dst_addr; | |
60 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
61 | ||
62 | src_off = current_nat_addr(sbi, nid); | |
63 | dst_off = next_nat_addr(sbi, src_off); | |
64 | ||
65 | /* get current nat block page with lock */ | |
66 | src_page = get_meta_page(sbi, src_off); | |
67 | ||
68 | /* Dirty src_page means that it is already the new target NAT page. */ | |
69 | if (PageDirty(src_page)) | |
70 | return src_page; | |
71 | ||
72 | dst_page = grab_meta_page(sbi, dst_off); | |
73 | ||
74 | src_addr = page_address(src_page); | |
75 | dst_addr = page_address(dst_page); | |
76 | memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); | |
77 | set_page_dirty(dst_page); | |
78 | f2fs_put_page(src_page, 1); | |
79 | ||
80 | set_to_next_nat(nm_i, nid); | |
81 | ||
82 | return dst_page; | |
83 | } | |
84 | ||
0a8165d7 | 85 | /* |
e05df3b1 JK |
86 | * Readahead NAT pages |
87 | */ | |
88 | static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid) | |
89 | { | |
90 | struct address_space *mapping = sbi->meta_inode->i_mapping; | |
91 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
92 | struct page *page; | |
93 | pgoff_t index; | |
94 | int i; | |
458e6197 JK |
95 | struct f2fs_io_info fio = { |
96 | .type = META, | |
97 | .rw = READ_SYNC, | |
98 | .rw_flag = REQ_META | REQ_PRIO | |
99 | }; | |
100 | ||
e05df3b1 JK |
101 | |
102 | for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) { | |
cfb271d4 | 103 | if (unlikely(nid >= nm_i->max_nid)) |
e05df3b1 JK |
104 | nid = 0; |
105 | index = current_nat_addr(sbi, nid); | |
106 | ||
107 | page = grab_cache_page(mapping, index); | |
108 | if (!page) | |
109 | continue; | |
393ff91f | 110 | if (PageUptodate(page)) { |
7107e0a9 | 111 | mark_page_accessed(page); |
e05df3b1 JK |
112 | f2fs_put_page(page, 1); |
113 | continue; | |
114 | } | |
458e6197 | 115 | f2fs_submit_page_mbio(sbi, page, index, &fio); |
7107e0a9 | 116 | mark_page_accessed(page); |
369a708c | 117 | f2fs_put_page(page, 0); |
e05df3b1 | 118 | } |
458e6197 | 119 | f2fs_submit_merged_bio(sbi, META, READ); |
e05df3b1 JK |
120 | } |
121 | ||
122 | static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n) | |
123 | { | |
124 | return radix_tree_lookup(&nm_i->nat_root, n); | |
125 | } | |
126 | ||
127 | static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i, | |
128 | nid_t start, unsigned int nr, struct nat_entry **ep) | |
129 | { | |
130 | return radix_tree_gang_lookup(&nm_i->nat_root, (void **)ep, start, nr); | |
131 | } | |
132 | ||
133 | static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e) | |
134 | { | |
135 | list_del(&e->list); | |
136 | radix_tree_delete(&nm_i->nat_root, nat_get_nid(e)); | |
137 | nm_i->nat_cnt--; | |
138 | kmem_cache_free(nat_entry_slab, e); | |
139 | } | |
140 | ||
141 | int is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) | |
142 | { | |
143 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
144 | struct nat_entry *e; | |
145 | int is_cp = 1; | |
146 | ||
147 | read_lock(&nm_i->nat_tree_lock); | |
148 | e = __lookup_nat_cache(nm_i, nid); | |
149 | if (e && !e->checkpointed) | |
150 | is_cp = 0; | |
151 | read_unlock(&nm_i->nat_tree_lock); | |
152 | return is_cp; | |
153 | } | |
154 | ||
155 | static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) | |
156 | { | |
157 | struct nat_entry *new; | |
158 | ||
159 | new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); | |
160 | if (!new) | |
161 | return NULL; | |
162 | if (radix_tree_insert(&nm_i->nat_root, nid, new)) { | |
163 | kmem_cache_free(nat_entry_slab, new); | |
164 | return NULL; | |
165 | } | |
166 | memset(new, 0, sizeof(struct nat_entry)); | |
167 | nat_set_nid(new, nid); | |
168 | list_add_tail(&new->list, &nm_i->nat_entries); | |
169 | nm_i->nat_cnt++; | |
170 | return new; | |
171 | } | |
172 | ||
173 | static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, | |
174 | struct f2fs_nat_entry *ne) | |
175 | { | |
176 | struct nat_entry *e; | |
177 | retry: | |
178 | write_lock(&nm_i->nat_tree_lock); | |
179 | e = __lookup_nat_cache(nm_i, nid); | |
180 | if (!e) { | |
181 | e = grab_nat_entry(nm_i, nid); | |
182 | if (!e) { | |
183 | write_unlock(&nm_i->nat_tree_lock); | |
184 | goto retry; | |
185 | } | |
186 | nat_set_blkaddr(e, le32_to_cpu(ne->block_addr)); | |
187 | nat_set_ino(e, le32_to_cpu(ne->ino)); | |
188 | nat_set_version(e, ne->version); | |
189 | e->checkpointed = true; | |
190 | } | |
191 | write_unlock(&nm_i->nat_tree_lock); | |
192 | } | |
193 | ||
194 | static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, | |
195 | block_t new_blkaddr) | |
196 | { | |
197 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
198 | struct nat_entry *e; | |
199 | retry: | |
200 | write_lock(&nm_i->nat_tree_lock); | |
201 | e = __lookup_nat_cache(nm_i, ni->nid); | |
202 | if (!e) { | |
203 | e = grab_nat_entry(nm_i, ni->nid); | |
204 | if (!e) { | |
205 | write_unlock(&nm_i->nat_tree_lock); | |
206 | goto retry; | |
207 | } | |
208 | e->ni = *ni; | |
209 | e->checkpointed = true; | |
5d56b671 | 210 | f2fs_bug_on(ni->blk_addr == NEW_ADDR); |
e05df3b1 JK |
211 | } else if (new_blkaddr == NEW_ADDR) { |
212 | /* | |
213 | * when nid is reallocated, | |
214 | * previous nat entry can be remained in nat cache. | |
215 | * So, reinitialize it with new information. | |
216 | */ | |
217 | e->ni = *ni; | |
5d56b671 | 218 | f2fs_bug_on(ni->blk_addr != NULL_ADDR); |
e05df3b1 JK |
219 | } |
220 | ||
221 | if (new_blkaddr == NEW_ADDR) | |
222 | e->checkpointed = false; | |
223 | ||
224 | /* sanity check */ | |
5d56b671 JK |
225 | f2fs_bug_on(nat_get_blkaddr(e) != ni->blk_addr); |
226 | f2fs_bug_on(nat_get_blkaddr(e) == NULL_ADDR && | |
e05df3b1 | 227 | new_blkaddr == NULL_ADDR); |
5d56b671 | 228 | f2fs_bug_on(nat_get_blkaddr(e) == NEW_ADDR && |
e05df3b1 | 229 | new_blkaddr == NEW_ADDR); |
5d56b671 | 230 | f2fs_bug_on(nat_get_blkaddr(e) != NEW_ADDR && |
e05df3b1 JK |
231 | nat_get_blkaddr(e) != NULL_ADDR && |
232 | new_blkaddr == NEW_ADDR); | |
233 | ||
234 | /* increament version no as node is removed */ | |
235 | if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { | |
236 | unsigned char version = nat_get_version(e); | |
237 | nat_set_version(e, inc_node_version(version)); | |
238 | } | |
239 | ||
240 | /* change address */ | |
241 | nat_set_blkaddr(e, new_blkaddr); | |
242 | __set_nat_cache_dirty(nm_i, e); | |
243 | write_unlock(&nm_i->nat_tree_lock); | |
244 | } | |
245 | ||
4660f9c0 | 246 | int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) |
e05df3b1 JK |
247 | { |
248 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
249 | ||
6cac3759 | 250 | if (nm_i->nat_cnt <= NM_WOUT_THRESHOLD) |
e05df3b1 JK |
251 | return 0; |
252 | ||
253 | write_lock(&nm_i->nat_tree_lock); | |
254 | while (nr_shrink && !list_empty(&nm_i->nat_entries)) { | |
255 | struct nat_entry *ne; | |
256 | ne = list_first_entry(&nm_i->nat_entries, | |
257 | struct nat_entry, list); | |
258 | __del_from_nat_cache(nm_i, ne); | |
259 | nr_shrink--; | |
260 | } | |
261 | write_unlock(&nm_i->nat_tree_lock); | |
262 | return nr_shrink; | |
263 | } | |
264 | ||
0a8165d7 | 265 | /* |
e05df3b1 JK |
266 | * This function returns always success |
267 | */ | |
268 | void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) | |
269 | { | |
270 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
271 | struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); | |
272 | struct f2fs_summary_block *sum = curseg->sum_blk; | |
273 | nid_t start_nid = START_NID(nid); | |
274 | struct f2fs_nat_block *nat_blk; | |
275 | struct page *page = NULL; | |
276 | struct f2fs_nat_entry ne; | |
277 | struct nat_entry *e; | |
278 | int i; | |
279 | ||
be4124f8 | 280 | memset(&ne, 0, sizeof(struct f2fs_nat_entry)); |
e05df3b1 JK |
281 | ni->nid = nid; |
282 | ||
283 | /* Check nat cache */ | |
284 | read_lock(&nm_i->nat_tree_lock); | |
285 | e = __lookup_nat_cache(nm_i, nid); | |
286 | if (e) { | |
287 | ni->ino = nat_get_ino(e); | |
288 | ni->blk_addr = nat_get_blkaddr(e); | |
289 | ni->version = nat_get_version(e); | |
290 | } | |
291 | read_unlock(&nm_i->nat_tree_lock); | |
292 | if (e) | |
293 | return; | |
294 | ||
295 | /* Check current segment summary */ | |
296 | mutex_lock(&curseg->curseg_mutex); | |
297 | i = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 0); | |
298 | if (i >= 0) { | |
299 | ne = nat_in_journal(sum, i); | |
300 | node_info_from_raw_nat(ni, &ne); | |
301 | } | |
302 | mutex_unlock(&curseg->curseg_mutex); | |
303 | if (i >= 0) | |
304 | goto cache; | |
305 | ||
306 | /* Fill node_info from nat page */ | |
307 | page = get_current_nat_page(sbi, start_nid); | |
308 | nat_blk = (struct f2fs_nat_block *)page_address(page); | |
309 | ne = nat_blk->entries[nid - start_nid]; | |
310 | node_info_from_raw_nat(ni, &ne); | |
311 | f2fs_put_page(page, 1); | |
312 | cache: | |
313 | /* cache nat entry */ | |
314 | cache_nat_entry(NM_I(sbi), nid, &ne); | |
315 | } | |
316 | ||
0a8165d7 | 317 | /* |
e05df3b1 JK |
318 | * The maximum depth is four. |
319 | * Offset[0] will have raw inode offset. | |
320 | */ | |
de93653f JK |
321 | static int get_node_path(struct f2fs_inode_info *fi, long block, |
322 | int offset[4], unsigned int noffset[4]) | |
e05df3b1 | 323 | { |
de93653f | 324 | const long direct_index = ADDRS_PER_INODE(fi); |
e05df3b1 JK |
325 | const long direct_blks = ADDRS_PER_BLOCK; |
326 | const long dptrs_per_blk = NIDS_PER_BLOCK; | |
327 | const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK; | |
328 | const long dindirect_blks = indirect_blks * NIDS_PER_BLOCK; | |
329 | int n = 0; | |
330 | int level = 0; | |
331 | ||
332 | noffset[0] = 0; | |
333 | ||
334 | if (block < direct_index) { | |
25c0a6e5 | 335 | offset[n] = block; |
e05df3b1 JK |
336 | goto got; |
337 | } | |
338 | block -= direct_index; | |
339 | if (block < direct_blks) { | |
340 | offset[n++] = NODE_DIR1_BLOCK; | |
341 | noffset[n] = 1; | |
25c0a6e5 | 342 | offset[n] = block; |
e05df3b1 JK |
343 | level = 1; |
344 | goto got; | |
345 | } | |
346 | block -= direct_blks; | |
347 | if (block < direct_blks) { | |
348 | offset[n++] = NODE_DIR2_BLOCK; | |
349 | noffset[n] = 2; | |
25c0a6e5 | 350 | offset[n] = block; |
e05df3b1 JK |
351 | level = 1; |
352 | goto got; | |
353 | } | |
354 | block -= direct_blks; | |
355 | if (block < indirect_blks) { | |
356 | offset[n++] = NODE_IND1_BLOCK; | |
357 | noffset[n] = 3; | |
358 | offset[n++] = block / direct_blks; | |
359 | noffset[n] = 4 + offset[n - 1]; | |
25c0a6e5 | 360 | offset[n] = block % direct_blks; |
e05df3b1 JK |
361 | level = 2; |
362 | goto got; | |
363 | } | |
364 | block -= indirect_blks; | |
365 | if (block < indirect_blks) { | |
366 | offset[n++] = NODE_IND2_BLOCK; | |
367 | noffset[n] = 4 + dptrs_per_blk; | |
368 | offset[n++] = block / direct_blks; | |
369 | noffset[n] = 5 + dptrs_per_blk + offset[n - 1]; | |
25c0a6e5 | 370 | offset[n] = block % direct_blks; |
e05df3b1 JK |
371 | level = 2; |
372 | goto got; | |
373 | } | |
374 | block -= indirect_blks; | |
375 | if (block < dindirect_blks) { | |
376 | offset[n++] = NODE_DIND_BLOCK; | |
377 | noffset[n] = 5 + (dptrs_per_blk * 2); | |
378 | offset[n++] = block / indirect_blks; | |
379 | noffset[n] = 6 + (dptrs_per_blk * 2) + | |
380 | offset[n - 1] * (dptrs_per_blk + 1); | |
381 | offset[n++] = (block / direct_blks) % dptrs_per_blk; | |
382 | noffset[n] = 7 + (dptrs_per_blk * 2) + | |
383 | offset[n - 2] * (dptrs_per_blk + 1) + | |
384 | offset[n - 1]; | |
25c0a6e5 | 385 | offset[n] = block % direct_blks; |
e05df3b1 JK |
386 | level = 3; |
387 | goto got; | |
388 | } else { | |
389 | BUG(); | |
390 | } | |
391 | got: | |
392 | return level; | |
393 | } | |
394 | ||
395 | /* | |
396 | * Caller should call f2fs_put_dnode(dn). | |
39936837 JK |
397 | * Also, it should grab and release a mutex by calling mutex_lock_op() and |
398 | * mutex_unlock_op() only if ro is not set RDONLY_NODE. | |
399 | * In the case of RDONLY_NODE, we don't need to care about mutex. | |
e05df3b1 | 400 | */ |
266e97a8 | 401 | int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode) |
e05df3b1 JK |
402 | { |
403 | struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); | |
404 | struct page *npage[4]; | |
405 | struct page *parent; | |
406 | int offset[4]; | |
407 | unsigned int noffset[4]; | |
408 | nid_t nids[4]; | |
409 | int level, i; | |
410 | int err = 0; | |
411 | ||
de93653f | 412 | level = get_node_path(F2FS_I(dn->inode), index, offset, noffset); |
e05df3b1 JK |
413 | |
414 | nids[0] = dn->inode->i_ino; | |
1646cfac | 415 | npage[0] = dn->inode_page; |
e05df3b1 | 416 | |
1646cfac JK |
417 | if (!npage[0]) { |
418 | npage[0] = get_node_page(sbi, nids[0]); | |
419 | if (IS_ERR(npage[0])) | |
420 | return PTR_ERR(npage[0]); | |
421 | } | |
e05df3b1 | 422 | parent = npage[0]; |
52c2db3f CL |
423 | if (level != 0) |
424 | nids[1] = get_nid(parent, offset[0], true); | |
e05df3b1 JK |
425 | dn->inode_page = npage[0]; |
426 | dn->inode_page_locked = true; | |
427 | ||
428 | /* get indirect or direct nodes */ | |
429 | for (i = 1; i <= level; i++) { | |
430 | bool done = false; | |
431 | ||
266e97a8 | 432 | if (!nids[i] && mode == ALLOC_NODE) { |
e05df3b1 JK |
433 | /* alloc new node */ |
434 | if (!alloc_nid(sbi, &(nids[i]))) { | |
e05df3b1 JK |
435 | err = -ENOSPC; |
436 | goto release_pages; | |
437 | } | |
438 | ||
439 | dn->nid = nids[i]; | |
8ae8f162 | 440 | npage[i] = new_node_page(dn, noffset[i], NULL); |
e05df3b1 JK |
441 | if (IS_ERR(npage[i])) { |
442 | alloc_nid_failed(sbi, nids[i]); | |
e05df3b1 JK |
443 | err = PTR_ERR(npage[i]); |
444 | goto release_pages; | |
445 | } | |
446 | ||
447 | set_nid(parent, offset[i - 1], nids[i], i == 1); | |
448 | alloc_nid_done(sbi, nids[i]); | |
e05df3b1 | 449 | done = true; |
266e97a8 | 450 | } else if (mode == LOOKUP_NODE_RA && i == level && level > 1) { |
e05df3b1 JK |
451 | npage[i] = get_node_page_ra(parent, offset[i - 1]); |
452 | if (IS_ERR(npage[i])) { | |
453 | err = PTR_ERR(npage[i]); | |
454 | goto release_pages; | |
455 | } | |
456 | done = true; | |
457 | } | |
458 | if (i == 1) { | |
459 | dn->inode_page_locked = false; | |
460 | unlock_page(parent); | |
461 | } else { | |
462 | f2fs_put_page(parent, 1); | |
463 | } | |
464 | ||
465 | if (!done) { | |
466 | npage[i] = get_node_page(sbi, nids[i]); | |
467 | if (IS_ERR(npage[i])) { | |
468 | err = PTR_ERR(npage[i]); | |
469 | f2fs_put_page(npage[0], 0); | |
470 | goto release_out; | |
471 | } | |
472 | } | |
473 | if (i < level) { | |
474 | parent = npage[i]; | |
475 | nids[i + 1] = get_nid(parent, offset[i], false); | |
476 | } | |
477 | } | |
478 | dn->nid = nids[level]; | |
479 | dn->ofs_in_node = offset[level]; | |
480 | dn->node_page = npage[level]; | |
481 | dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); | |
482 | return 0; | |
483 | ||
484 | release_pages: | |
485 | f2fs_put_page(parent, 1); | |
486 | if (i > 1) | |
487 | f2fs_put_page(npage[0], 0); | |
488 | release_out: | |
489 | dn->inode_page = NULL; | |
490 | dn->node_page = NULL; | |
491 | return err; | |
492 | } | |
493 | ||
494 | static void truncate_node(struct dnode_of_data *dn) | |
495 | { | |
496 | struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); | |
497 | struct node_info ni; | |
498 | ||
499 | get_node_info(sbi, dn->nid, &ni); | |
71e9fec5 | 500 | if (dn->inode->i_blocks == 0) { |
5d56b671 | 501 | f2fs_bug_on(ni.blk_addr != NULL_ADDR); |
71e9fec5 JK |
502 | goto invalidate; |
503 | } | |
5d56b671 | 504 | f2fs_bug_on(ni.blk_addr == NULL_ADDR); |
e05df3b1 | 505 | |
e05df3b1 | 506 | /* Deallocate node address */ |
71e9fec5 | 507 | invalidate_blocks(sbi, ni.blk_addr); |
ef86d709 | 508 | dec_valid_node_count(sbi, dn->inode); |
e05df3b1 JK |
509 | set_node_addr(sbi, &ni, NULL_ADDR); |
510 | ||
511 | if (dn->nid == dn->inode->i_ino) { | |
512 | remove_orphan_inode(sbi, dn->nid); | |
513 | dec_valid_inode_count(sbi); | |
514 | } else { | |
515 | sync_inode_page(dn); | |
516 | } | |
71e9fec5 | 517 | invalidate: |
e05df3b1 JK |
518 | clear_node_page_dirty(dn->node_page); |
519 | F2FS_SET_SB_DIRT(sbi); | |
520 | ||
521 | f2fs_put_page(dn->node_page, 1); | |
522 | dn->node_page = NULL; | |
51dd6249 | 523 | trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr); |
e05df3b1 JK |
524 | } |
525 | ||
526 | static int truncate_dnode(struct dnode_of_data *dn) | |
527 | { | |
528 | struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); | |
529 | struct page *page; | |
530 | ||
531 | if (dn->nid == 0) | |
532 | return 1; | |
533 | ||
534 | /* get direct node */ | |
535 | page = get_node_page(sbi, dn->nid); | |
536 | if (IS_ERR(page) && PTR_ERR(page) == -ENOENT) | |
537 | return 1; | |
538 | else if (IS_ERR(page)) | |
539 | return PTR_ERR(page); | |
540 | ||
541 | /* Make dnode_of_data for parameter */ | |
542 | dn->node_page = page; | |
543 | dn->ofs_in_node = 0; | |
544 | truncate_data_blocks(dn); | |
545 | truncate_node(dn); | |
546 | return 1; | |
547 | } | |
548 | ||
549 | static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, | |
550 | int ofs, int depth) | |
551 | { | |
552 | struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); | |
553 | struct dnode_of_data rdn = *dn; | |
554 | struct page *page; | |
555 | struct f2fs_node *rn; | |
556 | nid_t child_nid; | |
557 | unsigned int child_nofs; | |
558 | int freed = 0; | |
559 | int i, ret; | |
560 | ||
561 | if (dn->nid == 0) | |
562 | return NIDS_PER_BLOCK + 1; | |
563 | ||
51dd6249 NJ |
564 | trace_f2fs_truncate_nodes_enter(dn->inode, dn->nid, dn->data_blkaddr); |
565 | ||
e05df3b1 | 566 | page = get_node_page(sbi, dn->nid); |
51dd6249 NJ |
567 | if (IS_ERR(page)) { |
568 | trace_f2fs_truncate_nodes_exit(dn->inode, PTR_ERR(page)); | |
e05df3b1 | 569 | return PTR_ERR(page); |
51dd6249 | 570 | } |
e05df3b1 | 571 | |
45590710 | 572 | rn = F2FS_NODE(page); |
e05df3b1 JK |
573 | if (depth < 3) { |
574 | for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { | |
575 | child_nid = le32_to_cpu(rn->in.nid[i]); | |
576 | if (child_nid == 0) | |
577 | continue; | |
578 | rdn.nid = child_nid; | |
579 | ret = truncate_dnode(&rdn); | |
580 | if (ret < 0) | |
581 | goto out_err; | |
582 | set_nid(page, i, 0, false); | |
583 | } | |
584 | } else { | |
585 | child_nofs = nofs + ofs * (NIDS_PER_BLOCK + 1) + 1; | |
586 | for (i = ofs; i < NIDS_PER_BLOCK; i++) { | |
587 | child_nid = le32_to_cpu(rn->in.nid[i]); | |
588 | if (child_nid == 0) { | |
589 | child_nofs += NIDS_PER_BLOCK + 1; | |
590 | continue; | |
591 | } | |
592 | rdn.nid = child_nid; | |
593 | ret = truncate_nodes(&rdn, child_nofs, 0, depth - 1); | |
594 | if (ret == (NIDS_PER_BLOCK + 1)) { | |
595 | set_nid(page, i, 0, false); | |
596 | child_nofs += ret; | |
597 | } else if (ret < 0 && ret != -ENOENT) { | |
598 | goto out_err; | |
599 | } | |
600 | } | |
601 | freed = child_nofs; | |
602 | } | |
603 | ||
604 | if (!ofs) { | |
605 | /* remove current indirect node */ | |
606 | dn->node_page = page; | |
607 | truncate_node(dn); | |
608 | freed++; | |
609 | } else { | |
610 | f2fs_put_page(page, 1); | |
611 | } | |
51dd6249 | 612 | trace_f2fs_truncate_nodes_exit(dn->inode, freed); |
e05df3b1 JK |
613 | return freed; |
614 | ||
615 | out_err: | |
616 | f2fs_put_page(page, 1); | |
51dd6249 | 617 | trace_f2fs_truncate_nodes_exit(dn->inode, ret); |
e05df3b1 JK |
618 | return ret; |
619 | } | |
620 | ||
621 | static int truncate_partial_nodes(struct dnode_of_data *dn, | |
622 | struct f2fs_inode *ri, int *offset, int depth) | |
623 | { | |
624 | struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); | |
625 | struct page *pages[2]; | |
626 | nid_t nid[3]; | |
627 | nid_t child_nid; | |
628 | int err = 0; | |
629 | int i; | |
630 | int idx = depth - 2; | |
631 | ||
632 | nid[0] = le32_to_cpu(ri->i_nid[offset[0] - NODE_DIR1_BLOCK]); | |
633 | if (!nid[0]) | |
634 | return 0; | |
635 | ||
636 | /* get indirect nodes in the path */ | |
637 | for (i = 0; i < depth - 1; i++) { | |
638 | /* refernece count'll be increased */ | |
639 | pages[i] = get_node_page(sbi, nid[i]); | |
640 | if (IS_ERR(pages[i])) { | |
641 | depth = i + 1; | |
642 | err = PTR_ERR(pages[i]); | |
643 | goto fail; | |
644 | } | |
645 | nid[i + 1] = get_nid(pages[i], offset[i + 1], false); | |
646 | } | |
647 | ||
648 | /* free direct nodes linked to a partial indirect node */ | |
649 | for (i = offset[depth - 1]; i < NIDS_PER_BLOCK; i++) { | |
650 | child_nid = get_nid(pages[idx], i, false); | |
651 | if (!child_nid) | |
652 | continue; | |
653 | dn->nid = child_nid; | |
654 | err = truncate_dnode(dn); | |
655 | if (err < 0) | |
656 | goto fail; | |
657 | set_nid(pages[idx], i, 0, false); | |
658 | } | |
659 | ||
660 | if (offset[depth - 1] == 0) { | |
661 | dn->node_page = pages[idx]; | |
662 | dn->nid = nid[idx]; | |
663 | truncate_node(dn); | |
664 | } else { | |
665 | f2fs_put_page(pages[idx], 1); | |
666 | } | |
667 | offset[idx]++; | |
668 | offset[depth - 1] = 0; | |
669 | fail: | |
670 | for (i = depth - 3; i >= 0; i--) | |
671 | f2fs_put_page(pages[i], 1); | |
51dd6249 NJ |
672 | |
673 | trace_f2fs_truncate_partial_nodes(dn->inode, nid, depth, err); | |
674 | ||
e05df3b1 JK |
675 | return err; |
676 | } | |
677 | ||
0a8165d7 | 678 | /* |
e05df3b1 JK |
679 | * All the block addresses of data and nodes should be nullified. |
680 | */ | |
681 | int truncate_inode_blocks(struct inode *inode, pgoff_t from) | |
682 | { | |
683 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | |
afcb7ca0 | 684 | struct address_space *node_mapping = sbi->node_inode->i_mapping; |
e05df3b1 JK |
685 | int err = 0, cont = 1; |
686 | int level, offset[4], noffset[4]; | |
7dd690c8 | 687 | unsigned int nofs = 0; |
e05df3b1 JK |
688 | struct f2fs_node *rn; |
689 | struct dnode_of_data dn; | |
690 | struct page *page; | |
691 | ||
51dd6249 NJ |
692 | trace_f2fs_truncate_inode_blocks_enter(inode, from); |
693 | ||
de93653f | 694 | level = get_node_path(F2FS_I(inode), from, offset, noffset); |
afcb7ca0 | 695 | restart: |
e05df3b1 | 696 | page = get_node_page(sbi, inode->i_ino); |
51dd6249 NJ |
697 | if (IS_ERR(page)) { |
698 | trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); | |
e05df3b1 | 699 | return PTR_ERR(page); |
51dd6249 | 700 | } |
e05df3b1 JK |
701 | |
702 | set_new_dnode(&dn, inode, page, NULL, 0); | |
703 | unlock_page(page); | |
704 | ||
45590710 | 705 | rn = F2FS_NODE(page); |
e05df3b1 JK |
706 | switch (level) { |
707 | case 0: | |
708 | case 1: | |
709 | nofs = noffset[1]; | |
710 | break; | |
711 | case 2: | |
712 | nofs = noffset[1]; | |
713 | if (!offset[level - 1]) | |
714 | goto skip_partial; | |
715 | err = truncate_partial_nodes(&dn, &rn->i, offset, level); | |
716 | if (err < 0 && err != -ENOENT) | |
717 | goto fail; | |
718 | nofs += 1 + NIDS_PER_BLOCK; | |
719 | break; | |
720 | case 3: | |
721 | nofs = 5 + 2 * NIDS_PER_BLOCK; | |
722 | if (!offset[level - 1]) | |
723 | goto skip_partial; | |
724 | err = truncate_partial_nodes(&dn, &rn->i, offset, level); | |
725 | if (err < 0 && err != -ENOENT) | |
726 | goto fail; | |
727 | break; | |
728 | default: | |
729 | BUG(); | |
730 | } | |
731 | ||
732 | skip_partial: | |
733 | while (cont) { | |
734 | dn.nid = le32_to_cpu(rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]); | |
735 | switch (offset[0]) { | |
736 | case NODE_DIR1_BLOCK: | |
737 | case NODE_DIR2_BLOCK: | |
738 | err = truncate_dnode(&dn); | |
739 | break; | |
740 | ||
741 | case NODE_IND1_BLOCK: | |
742 | case NODE_IND2_BLOCK: | |
743 | err = truncate_nodes(&dn, nofs, offset[1], 2); | |
744 | break; | |
745 | ||
746 | case NODE_DIND_BLOCK: | |
747 | err = truncate_nodes(&dn, nofs, offset[1], 3); | |
748 | cont = 0; | |
749 | break; | |
750 | ||
751 | default: | |
752 | BUG(); | |
753 | } | |
754 | if (err < 0 && err != -ENOENT) | |
755 | goto fail; | |
756 | if (offset[1] == 0 && | |
757 | rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK]) { | |
758 | lock_page(page); | |
6bacf52f | 759 | if (unlikely(page->mapping != node_mapping)) { |
afcb7ca0 JK |
760 | f2fs_put_page(page, 1); |
761 | goto restart; | |
762 | } | |
e05df3b1 JK |
763 | wait_on_page_writeback(page); |
764 | rn->i.i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; | |
765 | set_page_dirty(page); | |
766 | unlock_page(page); | |
767 | } | |
768 | offset[1] = 0; | |
769 | offset[0]++; | |
770 | nofs += err; | |
771 | } | |
772 | fail: | |
773 | f2fs_put_page(page, 0); | |
51dd6249 | 774 | trace_f2fs_truncate_inode_blocks_exit(inode, err); |
e05df3b1 JK |
775 | return err > 0 ? 0 : err; |
776 | } | |
777 | ||
4f16fb0f JK |
778 | int truncate_xattr_node(struct inode *inode, struct page *page) |
779 | { | |
780 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | |
781 | nid_t nid = F2FS_I(inode)->i_xattr_nid; | |
782 | struct dnode_of_data dn; | |
783 | struct page *npage; | |
784 | ||
785 | if (!nid) | |
786 | return 0; | |
787 | ||
788 | npage = get_node_page(sbi, nid); | |
789 | if (IS_ERR(npage)) | |
790 | return PTR_ERR(npage); | |
791 | ||
792 | F2FS_I(inode)->i_xattr_nid = 0; | |
65985d93 JK |
793 | |
794 | /* need to do checkpoint during fsync */ | |
795 | F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi)); | |
796 | ||
4f16fb0f JK |
797 | set_new_dnode(&dn, inode, page, npage, nid); |
798 | ||
799 | if (page) | |
01d2d1aa | 800 | dn.inode_page_locked = true; |
4f16fb0f JK |
801 | truncate_node(&dn); |
802 | return 0; | |
803 | } | |
804 | ||
39936837 JK |
805 | /* |
806 | * Caller should grab and release a mutex by calling mutex_lock_op() and | |
807 | * mutex_unlock_op(). | |
808 | */ | |
58e674d6 | 809 | void remove_inode_page(struct inode *inode) |
e05df3b1 JK |
810 | { |
811 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | |
812 | struct page *page; | |
813 | nid_t ino = inode->i_ino; | |
814 | struct dnode_of_data dn; | |
815 | ||
e05df3b1 | 816 | page = get_node_page(sbi, ino); |
39936837 | 817 | if (IS_ERR(page)) |
58e674d6 | 818 | return; |
e05df3b1 | 819 | |
58e674d6 | 820 | if (truncate_xattr_node(inode, page)) { |
4f16fb0f | 821 | f2fs_put_page(page, 1); |
58e674d6 | 822 | return; |
e05df3b1 | 823 | } |
71e9fec5 | 824 | /* 0 is possible, after f2fs_new_inode() is failed */ |
5d56b671 | 825 | f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1); |
71e9fec5 JK |
826 | set_new_dnode(&dn, inode, page, page, ino); |
827 | truncate_node(&dn); | |
e05df3b1 JK |
828 | } |
829 | ||
44a83ff6 | 830 | struct page *new_inode_page(struct inode *inode, const struct qstr *name) |
e05df3b1 | 831 | { |
e05df3b1 JK |
832 | struct dnode_of_data dn; |
833 | ||
834 | /* allocate inode page for new inode */ | |
835 | set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); | |
44a83ff6 JK |
836 | |
837 | /* caller should f2fs_put_page(page, 1); */ | |
8ae8f162 | 838 | return new_node_page(&dn, 0, NULL); |
e05df3b1 JK |
839 | } |
840 | ||
8ae8f162 JK |
841 | struct page *new_node_page(struct dnode_of_data *dn, |
842 | unsigned int ofs, struct page *ipage) | |
e05df3b1 JK |
843 | { |
844 | struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb); | |
845 | struct address_space *mapping = sbi->node_inode->i_mapping; | |
846 | struct node_info old_ni, new_ni; | |
847 | struct page *page; | |
848 | int err; | |
849 | ||
6bacf52f | 850 | if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) |
e05df3b1 JK |
851 | return ERR_PTR(-EPERM); |
852 | ||
853 | page = grab_cache_page(mapping, dn->nid); | |
854 | if (!page) | |
855 | return ERR_PTR(-ENOMEM); | |
856 | ||
6bacf52f | 857 | if (unlikely(!inc_valid_node_count(sbi, dn->inode))) { |
9c02740c JK |
858 | err = -ENOSPC; |
859 | goto fail; | |
860 | } | |
e05df3b1 | 861 | |
9c02740c | 862 | get_node_info(sbi, dn->nid, &old_ni); |
e05df3b1 JK |
863 | |
864 | /* Reinitialize old_ni with new node page */ | |
5d56b671 | 865 | f2fs_bug_on(old_ni.blk_addr != NULL_ADDR); |
e05df3b1 JK |
866 | new_ni = old_ni; |
867 | new_ni.ino = dn->inode->i_ino; | |
e05df3b1 | 868 | set_node_addr(sbi, &new_ni, NEW_ADDR); |
9c02740c JK |
869 | |
870 | fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true); | |
398b1ac5 | 871 | set_cold_node(dn->inode, page); |
9c02740c JK |
872 | SetPageUptodate(page); |
873 | set_page_dirty(page); | |
e05df3b1 | 874 | |
479bd73a JK |
875 | if (ofs == XATTR_NODE_OFFSET) |
876 | F2FS_I(dn->inode)->i_xattr_nid = dn->nid; | |
877 | ||
e05df3b1 | 878 | dn->node_page = page; |
8ae8f162 JK |
879 | if (ipage) |
880 | update_inode(dn->inode, ipage); | |
881 | else | |
882 | sync_inode_page(dn); | |
e05df3b1 JK |
883 | if (ofs == 0) |
884 | inc_valid_inode_count(sbi); | |
885 | ||
886 | return page; | |
887 | ||
888 | fail: | |
71e9fec5 | 889 | clear_node_page_dirty(page); |
e05df3b1 JK |
890 | f2fs_put_page(page, 1); |
891 | return ERR_PTR(err); | |
892 | } | |
893 | ||
56ae674c JK |
894 | /* |
895 | * Caller should do after getting the following values. | |
896 | * 0: f2fs_put_page(page, 0) | |
897 | * LOCKED_PAGE: f2fs_put_page(page, 1) | |
898 | * error: nothing | |
899 | */ | |
93dfe2ac | 900 | static int read_node_page(struct page *page, int rw) |
e05df3b1 JK |
901 | { |
902 | struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); | |
903 | struct node_info ni; | |
904 | ||
905 | get_node_info(sbi, page->index, &ni); | |
906 | ||
6bacf52f | 907 | if (unlikely(ni.blk_addr == NULL_ADDR)) { |
393ff91f | 908 | f2fs_put_page(page, 1); |
e05df3b1 | 909 | return -ENOENT; |
393ff91f JK |
910 | } |
911 | ||
56ae674c JK |
912 | if (PageUptodate(page)) |
913 | return LOCKED_PAGE; | |
393ff91f | 914 | |
93dfe2ac | 915 | return f2fs_submit_page_bio(sbi, page, ni.blk_addr, rw); |
e05df3b1 JK |
916 | } |
917 | ||
0a8165d7 | 918 | /* |
e05df3b1 JK |
919 | * Readahead a node page |
920 | */ | |
921 | void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) | |
922 | { | |
923 | struct address_space *mapping = sbi->node_inode->i_mapping; | |
924 | struct page *apage; | |
56ae674c | 925 | int err; |
e05df3b1 JK |
926 | |
927 | apage = find_get_page(mapping, nid); | |
393ff91f JK |
928 | if (apage && PageUptodate(apage)) { |
929 | f2fs_put_page(apage, 0); | |
930 | return; | |
931 | } | |
e05df3b1 JK |
932 | f2fs_put_page(apage, 0); |
933 | ||
934 | apage = grab_cache_page(mapping, nid); | |
935 | if (!apage) | |
936 | return; | |
937 | ||
56ae674c JK |
938 | err = read_node_page(apage, READA); |
939 | if (err == 0) | |
393ff91f | 940 | f2fs_put_page(apage, 0); |
56ae674c JK |
941 | else if (err == LOCKED_PAGE) |
942 | f2fs_put_page(apage, 1); | |
e05df3b1 JK |
943 | } |
944 | ||
945 | struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid) | |
946 | { | |
e05df3b1 | 947 | struct address_space *mapping = sbi->node_inode->i_mapping; |
56ae674c JK |
948 | struct page *page; |
949 | int err; | |
afcb7ca0 | 950 | repeat: |
e05df3b1 JK |
951 | page = grab_cache_page(mapping, nid); |
952 | if (!page) | |
953 | return ERR_PTR(-ENOMEM); | |
954 | ||
955 | err = read_node_page(page, READ_SYNC); | |
56ae674c | 956 | if (err < 0) |
e05df3b1 | 957 | return ERR_PTR(err); |
56ae674c JK |
958 | else if (err == LOCKED_PAGE) |
959 | goto got_it; | |
e05df3b1 | 960 | |
393ff91f | 961 | lock_page(page); |
6bacf52f | 962 | if (unlikely(!PageUptodate(page))) { |
393ff91f JK |
963 | f2fs_put_page(page, 1); |
964 | return ERR_PTR(-EIO); | |
965 | } | |
6bacf52f | 966 | if (unlikely(page->mapping != mapping)) { |
afcb7ca0 JK |
967 | f2fs_put_page(page, 1); |
968 | goto repeat; | |
969 | } | |
56ae674c | 970 | got_it: |
5d56b671 | 971 | f2fs_bug_on(nid != nid_of_node(page)); |
e05df3b1 JK |
972 | mark_page_accessed(page); |
973 | return page; | |
974 | } | |
975 | ||
0a8165d7 | 976 | /* |
e05df3b1 JK |
977 | * Return a locked page for the desired node page. |
978 | * And, readahead MAX_RA_NODE number of node pages. | |
979 | */ | |
980 | struct page *get_node_page_ra(struct page *parent, int start) | |
981 | { | |
982 | struct f2fs_sb_info *sbi = F2FS_SB(parent->mapping->host->i_sb); | |
983 | struct address_space *mapping = sbi->node_inode->i_mapping; | |
c718379b | 984 | struct blk_plug plug; |
e05df3b1 | 985 | struct page *page; |
56ae674c JK |
986 | int err, i, end; |
987 | nid_t nid; | |
e05df3b1 JK |
988 | |
989 | /* First, try getting the desired direct node. */ | |
990 | nid = get_nid(parent, start, false); | |
991 | if (!nid) | |
992 | return ERR_PTR(-ENOENT); | |
afcb7ca0 | 993 | repeat: |
e05df3b1 JK |
994 | page = grab_cache_page(mapping, nid); |
995 | if (!page) | |
996 | return ERR_PTR(-ENOMEM); | |
997 | ||
66d36a29 | 998 | err = read_node_page(page, READ_SYNC); |
56ae674c | 999 | if (err < 0) |
e05df3b1 | 1000 | return ERR_PTR(err); |
56ae674c JK |
1001 | else if (err == LOCKED_PAGE) |
1002 | goto page_hit; | |
e05df3b1 | 1003 | |
c718379b JK |
1004 | blk_start_plug(&plug); |
1005 | ||
e05df3b1 JK |
1006 | /* Then, try readahead for siblings of the desired node */ |
1007 | end = start + MAX_RA_NODE; | |
1008 | end = min(end, NIDS_PER_BLOCK); | |
1009 | for (i = start + 1; i < end; i++) { | |
1010 | nid = get_nid(parent, i, false); | |
1011 | if (!nid) | |
1012 | continue; | |
1013 | ra_node_page(sbi, nid); | |
1014 | } | |
1015 | ||
c718379b JK |
1016 | blk_finish_plug(&plug); |
1017 | ||
e05df3b1 | 1018 | lock_page(page); |
6bacf52f | 1019 | if (unlikely(page->mapping != mapping)) { |
afcb7ca0 JK |
1020 | f2fs_put_page(page, 1); |
1021 | goto repeat; | |
1022 | } | |
e0f56cb4 | 1023 | page_hit: |
6bacf52f | 1024 | if (unlikely(!PageUptodate(page))) { |
e05df3b1 JK |
1025 | f2fs_put_page(page, 1); |
1026 | return ERR_PTR(-EIO); | |
1027 | } | |
393ff91f | 1028 | mark_page_accessed(page); |
e05df3b1 JK |
1029 | return page; |
1030 | } | |
1031 | ||
1032 | void sync_inode_page(struct dnode_of_data *dn) | |
1033 | { | |
1034 | if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) { | |
1035 | update_inode(dn->inode, dn->node_page); | |
1036 | } else if (dn->inode_page) { | |
1037 | if (!dn->inode_page_locked) | |
1038 | lock_page(dn->inode_page); | |
1039 | update_inode(dn->inode, dn->inode_page); | |
1040 | if (!dn->inode_page_locked) | |
1041 | unlock_page(dn->inode_page); | |
1042 | } else { | |
39936837 | 1043 | update_inode_page(dn->inode); |
e05df3b1 JK |
1044 | } |
1045 | } | |
1046 | ||
1047 | int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, | |
1048 | struct writeback_control *wbc) | |
1049 | { | |
1050 | struct address_space *mapping = sbi->node_inode->i_mapping; | |
1051 | pgoff_t index, end; | |
1052 | struct pagevec pvec; | |
1053 | int step = ino ? 2 : 0; | |
1054 | int nwritten = 0, wrote = 0; | |
1055 | ||
1056 | pagevec_init(&pvec, 0); | |
1057 | ||
1058 | next_step: | |
1059 | index = 0; | |
1060 | end = LONG_MAX; | |
1061 | ||
1062 | while (index <= end) { | |
1063 | int i, nr_pages; | |
1064 | nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | |
1065 | PAGECACHE_TAG_DIRTY, | |
1066 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); | |
1067 | if (nr_pages == 0) | |
1068 | break; | |
1069 | ||
1070 | for (i = 0; i < nr_pages; i++) { | |
1071 | struct page *page = pvec.pages[i]; | |
1072 | ||
1073 | /* | |
1074 | * flushing sequence with step: | |
1075 | * 0. indirect nodes | |
1076 | * 1. dentry dnodes | |
1077 | * 2. file dnodes | |
1078 | */ | |
1079 | if (step == 0 && IS_DNODE(page)) | |
1080 | continue; | |
1081 | if (step == 1 && (!IS_DNODE(page) || | |
1082 | is_cold_node(page))) | |
1083 | continue; | |
1084 | if (step == 2 && (!IS_DNODE(page) || | |
1085 | !is_cold_node(page))) | |
1086 | continue; | |
1087 | ||
1088 | /* | |
1089 | * If an fsync mode, | |
1090 | * we should not skip writing node pages. | |
1091 | */ | |
1092 | if (ino && ino_of_node(page) == ino) | |
1093 | lock_page(page); | |
1094 | else if (!trylock_page(page)) | |
1095 | continue; | |
1096 | ||
1097 | if (unlikely(page->mapping != mapping)) { | |
1098 | continue_unlock: | |
1099 | unlock_page(page); | |
1100 | continue; | |
1101 | } | |
1102 | if (ino && ino_of_node(page) != ino) | |
1103 | goto continue_unlock; | |
1104 | ||
1105 | if (!PageDirty(page)) { | |
1106 | /* someone wrote it for us */ | |
1107 | goto continue_unlock; | |
1108 | } | |
1109 | ||
1110 | if (!clear_page_dirty_for_io(page)) | |
1111 | goto continue_unlock; | |
1112 | ||
1113 | /* called by fsync() */ | |
1114 | if (ino && IS_DNODE(page)) { | |
1115 | int mark = !is_checkpointed_node(sbi, ino); | |
1116 | set_fsync_mark(page, 1); | |
1117 | if (IS_INODE(page)) | |
1118 | set_dentry_mark(page, mark); | |
1119 | nwritten++; | |
1120 | } else { | |
1121 | set_fsync_mark(page, 0); | |
1122 | set_dentry_mark(page, 0); | |
1123 | } | |
1124 | mapping->a_ops->writepage(page, wbc); | |
1125 | wrote++; | |
1126 | ||
1127 | if (--wbc->nr_to_write == 0) | |
1128 | break; | |
1129 | } | |
1130 | pagevec_release(&pvec); | |
1131 | cond_resched(); | |
1132 | ||
1133 | if (wbc->nr_to_write == 0) { | |
1134 | step = 2; | |
1135 | break; | |
1136 | } | |
1137 | } | |
1138 | ||
1139 | if (step < 2) { | |
1140 | step++; | |
1141 | goto next_step; | |
1142 | } | |
1143 | ||
1144 | if (wrote) | |
458e6197 | 1145 | f2fs_submit_merged_bio(sbi, NODE, WRITE); |
e05df3b1 JK |
1146 | return nwritten; |
1147 | } | |
1148 | ||
cfe58f9d JK |
1149 | int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino) |
1150 | { | |
1151 | struct address_space *mapping = sbi->node_inode->i_mapping; | |
1152 | pgoff_t index = 0, end = LONG_MAX; | |
1153 | struct pagevec pvec; | |
1154 | int nr_pages; | |
1155 | int ret2 = 0, ret = 0; | |
1156 | ||
1157 | pagevec_init(&pvec, 0); | |
1158 | while ((index <= end) && | |
1159 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | |
1160 | PAGECACHE_TAG_WRITEBACK, | |
1161 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { | |
1162 | unsigned i; | |
1163 | ||
1164 | for (i = 0; i < nr_pages; i++) { | |
1165 | struct page *page = pvec.pages[i]; | |
1166 | ||
1167 | /* until radix tree lookup accepts end_index */ | |
cfb271d4 | 1168 | if (unlikely(page->index > end)) |
cfe58f9d JK |
1169 | continue; |
1170 | ||
4bf08ff6 | 1171 | if (ino && ino_of_node(page) == ino) { |
cfe58f9d | 1172 | wait_on_page_writeback(page); |
4bf08ff6 CY |
1173 | if (TestClearPageError(page)) |
1174 | ret = -EIO; | |
1175 | } | |
cfe58f9d JK |
1176 | } |
1177 | pagevec_release(&pvec); | |
1178 | cond_resched(); | |
1179 | } | |
1180 | ||
6bacf52f | 1181 | if (unlikely(test_and_clear_bit(AS_ENOSPC, &mapping->flags))) |
cfe58f9d | 1182 | ret2 = -ENOSPC; |
6bacf52f | 1183 | if (unlikely(test_and_clear_bit(AS_EIO, &mapping->flags))) |
cfe58f9d JK |
1184 | ret2 = -EIO; |
1185 | if (!ret) | |
1186 | ret = ret2; | |
1187 | return ret; | |
1188 | } | |
1189 | ||
e05df3b1 JK |
1190 | static int f2fs_write_node_page(struct page *page, |
1191 | struct writeback_control *wbc) | |
1192 | { | |
1193 | struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb); | |
1194 | nid_t nid; | |
e05df3b1 JK |
1195 | block_t new_addr; |
1196 | struct node_info ni; | |
1197 | ||
cfb271d4 | 1198 | if (unlikely(sbi->por_doing)) |
87a9bd26 JK |
1199 | goto redirty_out; |
1200 | ||
e05df3b1 JK |
1201 | wait_on_page_writeback(page); |
1202 | ||
e05df3b1 JK |
1203 | /* get old block addr of this node page */ |
1204 | nid = nid_of_node(page); | |
5d56b671 | 1205 | f2fs_bug_on(page->index != nid); |
e05df3b1 JK |
1206 | |
1207 | get_node_info(sbi, nid, &ni); | |
1208 | ||
1209 | /* This page is already truncated */ | |
6bacf52f | 1210 | if (unlikely(ni.blk_addr == NULL_ADDR)) { |
39936837 JK |
1211 | dec_page_count(sbi, F2FS_DIRTY_NODES); |
1212 | unlock_page(page); | |
1213 | return 0; | |
1214 | } | |
e05df3b1 | 1215 | |
87a9bd26 JK |
1216 | if (wbc->for_reclaim) |
1217 | goto redirty_out; | |
08d8058b | 1218 | |
39936837 | 1219 | mutex_lock(&sbi->node_write); |
e05df3b1 | 1220 | set_page_writeback(page); |
e05df3b1 JK |
1221 | write_node_page(sbi, page, nid, ni.blk_addr, &new_addr); |
1222 | set_node_addr(sbi, &ni, new_addr); | |
1223 | dec_page_count(sbi, F2FS_DIRTY_NODES); | |
39936837 | 1224 | mutex_unlock(&sbi->node_write); |
e05df3b1 JK |
1225 | unlock_page(page); |
1226 | return 0; | |
87a9bd26 JK |
1227 | |
1228 | redirty_out: | |
1229 | dec_page_count(sbi, F2FS_DIRTY_NODES); | |
1230 | wbc->pages_skipped++; | |
1231 | set_page_dirty(page); | |
1232 | return AOP_WRITEPAGE_ACTIVATE; | |
e05df3b1 JK |
1233 | } |
1234 | ||
a7fdffbd JK |
1235 | /* |
1236 | * It is very important to gather dirty pages and write at once, so that we can | |
1237 | * submit a big bio without interfering other data writes. | |
423e95cc | 1238 | * Be default, 512 pages (2MB) * 3 node types, is more reasonable. |
a7fdffbd | 1239 | */ |
423e95cc | 1240 | #define COLLECT_DIRTY_NODES 1536 |
e05df3b1 JK |
1241 | static int f2fs_write_node_pages(struct address_space *mapping, |
1242 | struct writeback_control *wbc) | |
1243 | { | |
1244 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); | |
e05df3b1 JK |
1245 | long nr_to_write = wbc->nr_to_write; |
1246 | ||
4660f9c0 JK |
1247 | /* balancing f2fs's metadata in background */ |
1248 | f2fs_balance_fs_bg(sbi); | |
e05df3b1 | 1249 | |
a7fdffbd JK |
1250 | /* collect a number of dirty node pages and write together */ |
1251 | if (get_pages(sbi, F2FS_DIRTY_NODES) < COLLECT_DIRTY_NODES) | |
1252 | return 0; | |
1253 | ||
e05df3b1 | 1254 | /* if mounting is failed, skip writing node pages */ |
423e95cc | 1255 | wbc->nr_to_write = 3 * max_hw_blocks(sbi); |
e05df3b1 | 1256 | sync_node_pages(sbi, 0, wbc); |
423e95cc JK |
1257 | wbc->nr_to_write = nr_to_write - (3 * max_hw_blocks(sbi) - |
1258 | wbc->nr_to_write); | |
e05df3b1 JK |
1259 | return 0; |
1260 | } | |
1261 | ||
1262 | static int f2fs_set_node_page_dirty(struct page *page) | |
1263 | { | |
1264 | struct address_space *mapping = page->mapping; | |
1265 | struct f2fs_sb_info *sbi = F2FS_SB(mapping->host->i_sb); | |
1266 | ||
26c6b887 JK |
1267 | trace_f2fs_set_page_dirty(page, NODE); |
1268 | ||
e05df3b1 JK |
1269 | SetPageUptodate(page); |
1270 | if (!PageDirty(page)) { | |
1271 | __set_page_dirty_nobuffers(page); | |
1272 | inc_page_count(sbi, F2FS_DIRTY_NODES); | |
1273 | SetPagePrivate(page); | |
1274 | return 1; | |
1275 | } | |
1276 | return 0; | |
1277 | } | |
1278 | ||
d47992f8 LC |
1279 | static void f2fs_invalidate_node_page(struct page *page, unsigned int offset, |
1280 | unsigned int length) | |
e05df3b1 JK |
1281 | { |
1282 | struct inode *inode = page->mapping->host; | |
1283 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | |
1284 | if (PageDirty(page)) | |
1285 | dec_page_count(sbi, F2FS_DIRTY_NODES); | |
1286 | ClearPagePrivate(page); | |
1287 | } | |
1288 | ||
1289 | static int f2fs_release_node_page(struct page *page, gfp_t wait) | |
1290 | { | |
1291 | ClearPagePrivate(page); | |
c3850aa1 | 1292 | return 1; |
e05df3b1 JK |
1293 | } |
1294 | ||
0a8165d7 | 1295 | /* |
e05df3b1 JK |
1296 | * Structure of the f2fs node operations |
1297 | */ | |
1298 | const struct address_space_operations f2fs_node_aops = { | |
1299 | .writepage = f2fs_write_node_page, | |
1300 | .writepages = f2fs_write_node_pages, | |
1301 | .set_page_dirty = f2fs_set_node_page_dirty, | |
1302 | .invalidatepage = f2fs_invalidate_node_page, | |
1303 | .releasepage = f2fs_release_node_page, | |
1304 | }; | |
1305 | ||
1306 | static struct free_nid *__lookup_free_nid_list(nid_t n, struct list_head *head) | |
1307 | { | |
1308 | struct list_head *this; | |
3aa770a9 | 1309 | struct free_nid *i; |
e05df3b1 JK |
1310 | list_for_each(this, head) { |
1311 | i = list_entry(this, struct free_nid, list); | |
1312 | if (i->nid == n) | |
3aa770a9 | 1313 | return i; |
e05df3b1 | 1314 | } |
3aa770a9 | 1315 | return NULL; |
e05df3b1 JK |
1316 | } |
1317 | ||
1318 | static void __del_from_free_nid_list(struct free_nid *i) | |
1319 | { | |
1320 | list_del(&i->list); | |
1321 | kmem_cache_free(free_nid_slab, i); | |
1322 | } | |
1323 | ||
59bbd474 | 1324 | static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build) |
e05df3b1 JK |
1325 | { |
1326 | struct free_nid *i; | |
59bbd474 JK |
1327 | struct nat_entry *ne; |
1328 | bool allocated = false; | |
e05df3b1 JK |
1329 | |
1330 | if (nm_i->fcnt > 2 * MAX_FREE_NIDS) | |
23d38844 | 1331 | return -1; |
9198aceb JK |
1332 | |
1333 | /* 0 nid should not be used */ | |
cfb271d4 | 1334 | if (unlikely(nid == 0)) |
9198aceb | 1335 | return 0; |
59bbd474 | 1336 | |
7bd59381 GZ |
1337 | if (build) { |
1338 | /* do not add allocated nids */ | |
1339 | read_lock(&nm_i->nat_tree_lock); | |
1340 | ne = __lookup_nat_cache(nm_i, nid); | |
1341 | if (ne && nat_get_blkaddr(ne) != NULL_ADDR) | |
1342 | allocated = true; | |
1343 | read_unlock(&nm_i->nat_tree_lock); | |
1344 | if (allocated) | |
1345 | return 0; | |
e05df3b1 | 1346 | } |
7bd59381 GZ |
1347 | |
1348 | i = f2fs_kmem_cache_alloc(free_nid_slab, GFP_NOFS); | |
e05df3b1 JK |
1349 | i->nid = nid; |
1350 | i->state = NID_NEW; | |
1351 | ||
1352 | spin_lock(&nm_i->free_nid_list_lock); | |
1353 | if (__lookup_free_nid_list(nid, &nm_i->free_nid_list)) { | |
1354 | spin_unlock(&nm_i->free_nid_list_lock); | |
1355 | kmem_cache_free(free_nid_slab, i); | |
1356 | return 0; | |
1357 | } | |
1358 | list_add_tail(&i->list, &nm_i->free_nid_list); | |
1359 | nm_i->fcnt++; | |
1360 | spin_unlock(&nm_i->free_nid_list_lock); | |
1361 | return 1; | |
1362 | } | |
1363 | ||
1364 | static void remove_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) | |
1365 | { | |
1366 | struct free_nid *i; | |
1367 | spin_lock(&nm_i->free_nid_list_lock); | |
1368 | i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); | |
1369 | if (i && i->state == NID_NEW) { | |
1370 | __del_from_free_nid_list(i); | |
1371 | nm_i->fcnt--; | |
1372 | } | |
1373 | spin_unlock(&nm_i->free_nid_list_lock); | |
1374 | } | |
1375 | ||
8760952d | 1376 | static void scan_nat_page(struct f2fs_nm_info *nm_i, |
e05df3b1 JK |
1377 | struct page *nat_page, nid_t start_nid) |
1378 | { | |
1379 | struct f2fs_nat_block *nat_blk = page_address(nat_page); | |
1380 | block_t blk_addr; | |
e05df3b1 JK |
1381 | int i; |
1382 | ||
e05df3b1 JK |
1383 | i = start_nid % NAT_ENTRY_PER_BLOCK; |
1384 | ||
1385 | for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { | |
23d38844 | 1386 | |
cfb271d4 | 1387 | if (unlikely(start_nid >= nm_i->max_nid)) |
04431c44 | 1388 | break; |
23d38844 HL |
1389 | |
1390 | blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); | |
5d56b671 | 1391 | f2fs_bug_on(blk_addr == NEW_ADDR); |
23d38844 | 1392 | if (blk_addr == NULL_ADDR) { |
59bbd474 | 1393 | if (add_free_nid(nm_i, start_nid, true) < 0) |
23d38844 HL |
1394 | break; |
1395 | } | |
e05df3b1 | 1396 | } |
e05df3b1 JK |
1397 | } |
1398 | ||
1399 | static void build_free_nids(struct f2fs_sb_info *sbi) | |
1400 | { | |
e05df3b1 JK |
1401 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
1402 | struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); | |
1403 | struct f2fs_summary_block *sum = curseg->sum_blk; | |
8760952d | 1404 | int i = 0; |
55008d84 | 1405 | nid_t nid = nm_i->next_scan_nid; |
e05df3b1 | 1406 | |
55008d84 JK |
1407 | /* Enough entries */ |
1408 | if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK) | |
1409 | return; | |
e05df3b1 | 1410 | |
55008d84 | 1411 | /* readahead nat pages to be scanned */ |
e05df3b1 JK |
1412 | ra_nat_pages(sbi, nid); |
1413 | ||
1414 | while (1) { | |
1415 | struct page *page = get_current_nat_page(sbi, nid); | |
1416 | ||
8760952d | 1417 | scan_nat_page(nm_i, page, nid); |
e05df3b1 JK |
1418 | f2fs_put_page(page, 1); |
1419 | ||
1420 | nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); | |
cfb271d4 | 1421 | if (unlikely(nid >= nm_i->max_nid)) |
e05df3b1 | 1422 | nid = 0; |
55008d84 JK |
1423 | |
1424 | if (i++ == FREE_NID_PAGES) | |
e05df3b1 JK |
1425 | break; |
1426 | } | |
1427 | ||
55008d84 JK |
1428 | /* go to the next free nat pages to find free nids abundantly */ |
1429 | nm_i->next_scan_nid = nid; | |
e05df3b1 JK |
1430 | |
1431 | /* find free nids from current sum_pages */ | |
1432 | mutex_lock(&curseg->curseg_mutex); | |
1433 | for (i = 0; i < nats_in_cursum(sum); i++) { | |
1434 | block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); | |
1435 | nid = le32_to_cpu(nid_in_journal(sum, i)); | |
1436 | if (addr == NULL_ADDR) | |
59bbd474 | 1437 | add_free_nid(nm_i, nid, true); |
e05df3b1 JK |
1438 | else |
1439 | remove_free_nid(nm_i, nid); | |
1440 | } | |
1441 | mutex_unlock(&curseg->curseg_mutex); | |
e05df3b1 JK |
1442 | } |
1443 | ||
1444 | /* | |
1445 | * If this function returns success, caller can obtain a new nid | |
1446 | * from second parameter of this function. | |
1447 | * The returned nid could be used ino as well as nid when inode is created. | |
1448 | */ | |
1449 | bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) | |
1450 | { | |
1451 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
1452 | struct free_nid *i = NULL; | |
1453 | struct list_head *this; | |
1454 | retry: | |
cfb271d4 | 1455 | if (unlikely(sbi->total_valid_node_count + 1 >= nm_i->max_nid)) |
55008d84 | 1456 | return false; |
e05df3b1 | 1457 | |
e05df3b1 | 1458 | spin_lock(&nm_i->free_nid_list_lock); |
e05df3b1 | 1459 | |
55008d84 JK |
1460 | /* We should not use stale free nids created by build_free_nids */ |
1461 | if (nm_i->fcnt && !sbi->on_build_free_nids) { | |
5d56b671 | 1462 | f2fs_bug_on(list_empty(&nm_i->free_nid_list)); |
55008d84 JK |
1463 | list_for_each(this, &nm_i->free_nid_list) { |
1464 | i = list_entry(this, struct free_nid, list); | |
1465 | if (i->state == NID_NEW) | |
1466 | break; | |
1467 | } | |
e05df3b1 | 1468 | |
5d56b671 | 1469 | f2fs_bug_on(i->state != NID_NEW); |
55008d84 JK |
1470 | *nid = i->nid; |
1471 | i->state = NID_ALLOC; | |
1472 | nm_i->fcnt--; | |
1473 | spin_unlock(&nm_i->free_nid_list_lock); | |
1474 | return true; | |
1475 | } | |
e05df3b1 | 1476 | spin_unlock(&nm_i->free_nid_list_lock); |
55008d84 JK |
1477 | |
1478 | /* Let's scan nat pages and its caches to get free nids */ | |
1479 | mutex_lock(&nm_i->build_lock); | |
aabe5136 | 1480 | sbi->on_build_free_nids = true; |
55008d84 | 1481 | build_free_nids(sbi); |
aabe5136 | 1482 | sbi->on_build_free_nids = false; |
55008d84 JK |
1483 | mutex_unlock(&nm_i->build_lock); |
1484 | goto retry; | |
e05df3b1 JK |
1485 | } |
1486 | ||
0a8165d7 | 1487 | /* |
e05df3b1 JK |
1488 | * alloc_nid() should be called prior to this function. |
1489 | */ | |
1490 | void alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid) | |
1491 | { | |
1492 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
1493 | struct free_nid *i; | |
1494 | ||
1495 | spin_lock(&nm_i->free_nid_list_lock); | |
1496 | i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); | |
5d56b671 | 1497 | f2fs_bug_on(!i || i->state != NID_ALLOC); |
49952fa1 | 1498 | __del_from_free_nid_list(i); |
e05df3b1 JK |
1499 | spin_unlock(&nm_i->free_nid_list_lock); |
1500 | } | |
1501 | ||
0a8165d7 | 1502 | /* |
e05df3b1 JK |
1503 | * alloc_nid() should be called prior to this function. |
1504 | */ | |
1505 | void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) | |
1506 | { | |
49952fa1 JK |
1507 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
1508 | struct free_nid *i; | |
1509 | ||
65985d93 JK |
1510 | if (!nid) |
1511 | return; | |
1512 | ||
49952fa1 JK |
1513 | spin_lock(&nm_i->free_nid_list_lock); |
1514 | i = __lookup_free_nid_list(nid, &nm_i->free_nid_list); | |
5d56b671 | 1515 | f2fs_bug_on(!i || i->state != NID_ALLOC); |
95630cba HL |
1516 | if (nm_i->fcnt > 2 * MAX_FREE_NIDS) { |
1517 | __del_from_free_nid_list(i); | |
1518 | } else { | |
1519 | i->state = NID_NEW; | |
1520 | nm_i->fcnt++; | |
1521 | } | |
49952fa1 | 1522 | spin_unlock(&nm_i->free_nid_list_lock); |
e05df3b1 JK |
1523 | } |
1524 | ||
1525 | void recover_node_page(struct f2fs_sb_info *sbi, struct page *page, | |
1526 | struct f2fs_summary *sum, struct node_info *ni, | |
1527 | block_t new_blkaddr) | |
1528 | { | |
1529 | rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr); | |
1530 | set_node_addr(sbi, ni, new_blkaddr); | |
1531 | clear_node_page_dirty(page); | |
1532 | } | |
1533 | ||
1534 | int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) | |
1535 | { | |
1536 | struct address_space *mapping = sbi->node_inode->i_mapping; | |
1537 | struct f2fs_node *src, *dst; | |
1538 | nid_t ino = ino_of_node(page); | |
1539 | struct node_info old_ni, new_ni; | |
1540 | struct page *ipage; | |
1541 | ||
1542 | ipage = grab_cache_page(mapping, ino); | |
1543 | if (!ipage) | |
1544 | return -ENOMEM; | |
1545 | ||
1546 | /* Should not use this inode from free nid list */ | |
1547 | remove_free_nid(NM_I(sbi), ino); | |
1548 | ||
1549 | get_node_info(sbi, ino, &old_ni); | |
1550 | SetPageUptodate(ipage); | |
1551 | fill_node_footer(ipage, ino, ino, 0, true); | |
1552 | ||
45590710 GZ |
1553 | src = F2FS_NODE(page); |
1554 | dst = F2FS_NODE(ipage); | |
e05df3b1 JK |
1555 | |
1556 | memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i); | |
1557 | dst->i.i_size = 0; | |
25ca923b JK |
1558 | dst->i.i_blocks = cpu_to_le64(1); |
1559 | dst->i.i_links = cpu_to_le32(1); | |
e05df3b1 JK |
1560 | dst->i.i_xattr_nid = 0; |
1561 | ||
1562 | new_ni = old_ni; | |
1563 | new_ni.ino = ino; | |
1564 | ||
cfb271d4 | 1565 | if (unlikely(!inc_valid_node_count(sbi, NULL))) |
65e5cd0a | 1566 | WARN_ON(1); |
e05df3b1 JK |
1567 | set_node_addr(sbi, &new_ni, NEW_ADDR); |
1568 | inc_valid_inode_count(sbi); | |
e05df3b1 JK |
1569 | f2fs_put_page(ipage, 1); |
1570 | return 0; | |
1571 | } | |
1572 | ||
9af0ff1c CY |
1573 | /* |
1574 | * ra_sum_pages() merge contiguous pages into one bio and submit. | |
1575 | * these pre-readed pages are linked in pages list. | |
1576 | */ | |
1577 | static int ra_sum_pages(struct f2fs_sb_info *sbi, struct list_head *pages, | |
1578 | int start, int nrpages) | |
1579 | { | |
1580 | struct page *page; | |
1581 | int page_idx = start; | |
458e6197 JK |
1582 | struct f2fs_io_info fio = { |
1583 | .type = META, | |
1584 | .rw = READ_SYNC, | |
1585 | .rw_flag = REQ_META | REQ_PRIO | |
1586 | }; | |
9af0ff1c CY |
1587 | |
1588 | for (; page_idx < start + nrpages; page_idx++) { | |
1589 | /* alloc temporal page for read node summary info*/ | |
a0acdfe0 | 1590 | page = alloc_page(GFP_F2FS_ZERO); |
9af0ff1c CY |
1591 | if (!page) { |
1592 | struct page *tmp; | |
1593 | list_for_each_entry_safe(page, tmp, pages, lru) { | |
1594 | list_del(&page->lru); | |
1595 | unlock_page(page); | |
1596 | __free_pages(page, 0); | |
1597 | } | |
1598 | return -ENOMEM; | |
1599 | } | |
1600 | ||
1601 | lock_page(page); | |
1602 | page->index = page_idx; | |
1603 | list_add_tail(&page->lru, pages); | |
1604 | } | |
1605 | ||
1606 | list_for_each_entry(page, pages, lru) | |
458e6197 | 1607 | f2fs_submit_page_mbio(sbi, page, page->index, &fio); |
9af0ff1c | 1608 | |
458e6197 | 1609 | f2fs_submit_merged_bio(sbi, META, READ); |
9af0ff1c CY |
1610 | return 0; |
1611 | } | |
1612 | ||
e05df3b1 JK |
1613 | int restore_node_summary(struct f2fs_sb_info *sbi, |
1614 | unsigned int segno, struct f2fs_summary_block *sum) | |
1615 | { | |
1616 | struct f2fs_node *rn; | |
1617 | struct f2fs_summary *sum_entry; | |
9af0ff1c | 1618 | struct page *page, *tmp; |
e05df3b1 | 1619 | block_t addr; |
9af0ff1c CY |
1620 | int bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); |
1621 | int i, last_offset, nrpages, err = 0; | |
1622 | LIST_HEAD(page_list); | |
e05df3b1 JK |
1623 | |
1624 | /* scan the node segment */ | |
1625 | last_offset = sbi->blocks_per_seg; | |
1626 | addr = START_BLOCK(sbi, segno); | |
1627 | sum_entry = &sum->entries[0]; | |
1628 | ||
9af0ff1c CY |
1629 | for (i = 0; i < last_offset; i += nrpages, addr += nrpages) { |
1630 | nrpages = min(last_offset - i, bio_blocks); | |
393ff91f | 1631 | |
9af0ff1c CY |
1632 | /* read ahead node pages */ |
1633 | err = ra_sum_pages(sbi, &page_list, addr, nrpages); | |
1634 | if (err) | |
1635 | return err; | |
e05df3b1 | 1636 | |
9af0ff1c CY |
1637 | list_for_each_entry_safe(page, tmp, &page_list, lru) { |
1638 | ||
1639 | lock_page(page); | |
6bacf52f JK |
1640 | if (unlikely(!PageUptodate(page))) { |
1641 | err = -EIO; | |
1642 | } else { | |
9af0ff1c CY |
1643 | rn = F2FS_NODE(page); |
1644 | sum_entry->nid = rn->footer.nid; | |
1645 | sum_entry->version = 0; | |
1646 | sum_entry->ofs_in_node = 0; | |
1647 | sum_entry++; | |
9af0ff1c CY |
1648 | } |
1649 | ||
1650 | list_del(&page->lru); | |
1651 | unlock_page(page); | |
1652 | __free_pages(page, 0); | |
1653 | } | |
e05df3b1 | 1654 | } |
9af0ff1c | 1655 | return err; |
e05df3b1 JK |
1656 | } |
1657 | ||
1658 | static bool flush_nats_in_journal(struct f2fs_sb_info *sbi) | |
1659 | { | |
1660 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
1661 | struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); | |
1662 | struct f2fs_summary_block *sum = curseg->sum_blk; | |
1663 | int i; | |
1664 | ||
1665 | mutex_lock(&curseg->curseg_mutex); | |
1666 | ||
1667 | if (nats_in_cursum(sum) < NAT_JOURNAL_ENTRIES) { | |
1668 | mutex_unlock(&curseg->curseg_mutex); | |
1669 | return false; | |
1670 | } | |
1671 | ||
1672 | for (i = 0; i < nats_in_cursum(sum); i++) { | |
1673 | struct nat_entry *ne; | |
1674 | struct f2fs_nat_entry raw_ne; | |
1675 | nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); | |
1676 | ||
1677 | raw_ne = nat_in_journal(sum, i); | |
1678 | retry: | |
1679 | write_lock(&nm_i->nat_tree_lock); | |
1680 | ne = __lookup_nat_cache(nm_i, nid); | |
1681 | if (ne) { | |
1682 | __set_nat_cache_dirty(nm_i, ne); | |
1683 | write_unlock(&nm_i->nat_tree_lock); | |
1684 | continue; | |
1685 | } | |
1686 | ne = grab_nat_entry(nm_i, nid); | |
1687 | if (!ne) { | |
1688 | write_unlock(&nm_i->nat_tree_lock); | |
1689 | goto retry; | |
1690 | } | |
1691 | nat_set_blkaddr(ne, le32_to_cpu(raw_ne.block_addr)); | |
1692 | nat_set_ino(ne, le32_to_cpu(raw_ne.ino)); | |
1693 | nat_set_version(ne, raw_ne.version); | |
1694 | __set_nat_cache_dirty(nm_i, ne); | |
1695 | write_unlock(&nm_i->nat_tree_lock); | |
1696 | } | |
1697 | update_nats_in_cursum(sum, -i); | |
1698 | mutex_unlock(&curseg->curseg_mutex); | |
1699 | return true; | |
1700 | } | |
1701 | ||
0a8165d7 | 1702 | /* |
e05df3b1 JK |
1703 | * This function is called during the checkpointing process. |
1704 | */ | |
1705 | void flush_nat_entries(struct f2fs_sb_info *sbi) | |
1706 | { | |
1707 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
1708 | struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); | |
1709 | struct f2fs_summary_block *sum = curseg->sum_blk; | |
1710 | struct list_head *cur, *n; | |
1711 | struct page *page = NULL; | |
1712 | struct f2fs_nat_block *nat_blk = NULL; | |
1713 | nid_t start_nid = 0, end_nid = 0; | |
1714 | bool flushed; | |
1715 | ||
1716 | flushed = flush_nats_in_journal(sbi); | |
1717 | ||
1718 | if (!flushed) | |
1719 | mutex_lock(&curseg->curseg_mutex); | |
1720 | ||
1721 | /* 1) flush dirty nat caches */ | |
1722 | list_for_each_safe(cur, n, &nm_i->dirty_nat_entries) { | |
1723 | struct nat_entry *ne; | |
1724 | nid_t nid; | |
1725 | struct f2fs_nat_entry raw_ne; | |
1726 | int offset = -1; | |
2b50638d | 1727 | block_t new_blkaddr; |
e05df3b1 JK |
1728 | |
1729 | ne = list_entry(cur, struct nat_entry, list); | |
1730 | nid = nat_get_nid(ne); | |
1731 | ||
1732 | if (nat_get_blkaddr(ne) == NEW_ADDR) | |
1733 | continue; | |
1734 | if (flushed) | |
1735 | goto to_nat_page; | |
1736 | ||
1737 | /* if there is room for nat enries in curseg->sumpage */ | |
1738 | offset = lookup_journal_in_cursum(sum, NAT_JOURNAL, nid, 1); | |
1739 | if (offset >= 0) { | |
1740 | raw_ne = nat_in_journal(sum, offset); | |
e05df3b1 JK |
1741 | goto flush_now; |
1742 | } | |
1743 | to_nat_page: | |
1744 | if (!page || (start_nid > nid || nid > end_nid)) { | |
1745 | if (page) { | |
1746 | f2fs_put_page(page, 1); | |
1747 | page = NULL; | |
1748 | } | |
1749 | start_nid = START_NID(nid); | |
1750 | end_nid = start_nid + NAT_ENTRY_PER_BLOCK - 1; | |
1751 | ||
1752 | /* | |
1753 | * get nat block with dirty flag, increased reference | |
1754 | * count, mapped and lock | |
1755 | */ | |
1756 | page = get_next_nat_page(sbi, start_nid); | |
1757 | nat_blk = page_address(page); | |
1758 | } | |
1759 | ||
5d56b671 | 1760 | f2fs_bug_on(!nat_blk); |
e05df3b1 | 1761 | raw_ne = nat_blk->entries[nid - start_nid]; |
e05df3b1 JK |
1762 | flush_now: |
1763 | new_blkaddr = nat_get_blkaddr(ne); | |
1764 | ||
1765 | raw_ne.ino = cpu_to_le32(nat_get_ino(ne)); | |
1766 | raw_ne.block_addr = cpu_to_le32(new_blkaddr); | |
1767 | raw_ne.version = nat_get_version(ne); | |
1768 | ||
1769 | if (offset < 0) { | |
1770 | nat_blk->entries[nid - start_nid] = raw_ne; | |
1771 | } else { | |
1772 | nat_in_journal(sum, offset) = raw_ne; | |
1773 | nid_in_journal(sum, offset) = cpu_to_le32(nid); | |
1774 | } | |
1775 | ||
fa372417 | 1776 | if (nat_get_blkaddr(ne) == NULL_ADDR && |
59bbd474 | 1777 | add_free_nid(NM_I(sbi), nid, false) <= 0) { |
e05df3b1 JK |
1778 | write_lock(&nm_i->nat_tree_lock); |
1779 | __del_from_nat_cache(nm_i, ne); | |
1780 | write_unlock(&nm_i->nat_tree_lock); | |
e05df3b1 JK |
1781 | } else { |
1782 | write_lock(&nm_i->nat_tree_lock); | |
1783 | __clear_nat_cache_dirty(nm_i, ne); | |
1784 | ne->checkpointed = true; | |
1785 | write_unlock(&nm_i->nat_tree_lock); | |
1786 | } | |
1787 | } | |
1788 | if (!flushed) | |
1789 | mutex_unlock(&curseg->curseg_mutex); | |
1790 | f2fs_put_page(page, 1); | |
1791 | ||
1792 | /* 2) shrink nat caches if necessary */ | |
1793 | try_to_free_nats(sbi, nm_i->nat_cnt - NM_WOUT_THRESHOLD); | |
1794 | } | |
1795 | ||
1796 | static int init_node_manager(struct f2fs_sb_info *sbi) | |
1797 | { | |
1798 | struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); | |
1799 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
1800 | unsigned char *version_bitmap; | |
1801 | unsigned int nat_segs, nat_blocks; | |
1802 | ||
1803 | nm_i->nat_blkaddr = le32_to_cpu(sb_raw->nat_blkaddr); | |
1804 | ||
1805 | /* segment_count_nat includes pair segment so divide to 2. */ | |
1806 | nat_segs = le32_to_cpu(sb_raw->segment_count_nat) >> 1; | |
1807 | nat_blocks = nat_segs << le32_to_cpu(sb_raw->log_blocks_per_seg); | |
1808 | nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; | |
1809 | nm_i->fcnt = 0; | |
1810 | nm_i->nat_cnt = 0; | |
1811 | ||
1812 | INIT_LIST_HEAD(&nm_i->free_nid_list); | |
1813 | INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC); | |
1814 | INIT_LIST_HEAD(&nm_i->nat_entries); | |
1815 | INIT_LIST_HEAD(&nm_i->dirty_nat_entries); | |
1816 | ||
1817 | mutex_init(&nm_i->build_lock); | |
1818 | spin_lock_init(&nm_i->free_nid_list_lock); | |
1819 | rwlock_init(&nm_i->nat_tree_lock); | |
1820 | ||
e05df3b1 | 1821 | nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); |
79b5793b | 1822 | nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); |
e05df3b1 JK |
1823 | version_bitmap = __bitmap_ptr(sbi, NAT_BITMAP); |
1824 | if (!version_bitmap) | |
1825 | return -EFAULT; | |
1826 | ||
79b5793b AG |
1827 | nm_i->nat_bitmap = kmemdup(version_bitmap, nm_i->bitmap_size, |
1828 | GFP_KERNEL); | |
1829 | if (!nm_i->nat_bitmap) | |
1830 | return -ENOMEM; | |
e05df3b1 JK |
1831 | return 0; |
1832 | } | |
1833 | ||
1834 | int build_node_manager(struct f2fs_sb_info *sbi) | |
1835 | { | |
1836 | int err; | |
1837 | ||
1838 | sbi->nm_info = kzalloc(sizeof(struct f2fs_nm_info), GFP_KERNEL); | |
1839 | if (!sbi->nm_info) | |
1840 | return -ENOMEM; | |
1841 | ||
1842 | err = init_node_manager(sbi); | |
1843 | if (err) | |
1844 | return err; | |
1845 | ||
1846 | build_free_nids(sbi); | |
1847 | return 0; | |
1848 | } | |
1849 | ||
1850 | void destroy_node_manager(struct f2fs_sb_info *sbi) | |
1851 | { | |
1852 | struct f2fs_nm_info *nm_i = NM_I(sbi); | |
1853 | struct free_nid *i, *next_i; | |
1854 | struct nat_entry *natvec[NATVEC_SIZE]; | |
1855 | nid_t nid = 0; | |
1856 | unsigned int found; | |
1857 | ||
1858 | if (!nm_i) | |
1859 | return; | |
1860 | ||
1861 | /* destroy free nid list */ | |
1862 | spin_lock(&nm_i->free_nid_list_lock); | |
1863 | list_for_each_entry_safe(i, next_i, &nm_i->free_nid_list, list) { | |
5d56b671 | 1864 | f2fs_bug_on(i->state == NID_ALLOC); |
e05df3b1 JK |
1865 | __del_from_free_nid_list(i); |
1866 | nm_i->fcnt--; | |
1867 | } | |
5d56b671 | 1868 | f2fs_bug_on(nm_i->fcnt); |
e05df3b1 JK |
1869 | spin_unlock(&nm_i->free_nid_list_lock); |
1870 | ||
1871 | /* destroy nat cache */ | |
1872 | write_lock(&nm_i->nat_tree_lock); | |
1873 | while ((found = __gang_lookup_nat_cache(nm_i, | |
1874 | nid, NATVEC_SIZE, natvec))) { | |
1875 | unsigned idx; | |
1876 | for (idx = 0; idx < found; idx++) { | |
1877 | struct nat_entry *e = natvec[idx]; | |
1878 | nid = nat_get_nid(e) + 1; | |
1879 | __del_from_nat_cache(nm_i, e); | |
1880 | } | |
1881 | } | |
5d56b671 | 1882 | f2fs_bug_on(nm_i->nat_cnt); |
e05df3b1 JK |
1883 | write_unlock(&nm_i->nat_tree_lock); |
1884 | ||
1885 | kfree(nm_i->nat_bitmap); | |
1886 | sbi->nm_info = NULL; | |
1887 | kfree(nm_i); | |
1888 | } | |
1889 | ||
6e6093a8 | 1890 | int __init create_node_manager_caches(void) |
e05df3b1 JK |
1891 | { |
1892 | nat_entry_slab = f2fs_kmem_cache_create("nat_entry", | |
1893 | sizeof(struct nat_entry), NULL); | |
1894 | if (!nat_entry_slab) | |
1895 | return -ENOMEM; | |
1896 | ||
1897 | free_nid_slab = f2fs_kmem_cache_create("free_nid", | |
1898 | sizeof(struct free_nid), NULL); | |
1899 | if (!free_nid_slab) { | |
1900 | kmem_cache_destroy(nat_entry_slab); | |
1901 | return -ENOMEM; | |
1902 | } | |
1903 | return 0; | |
1904 | } | |
1905 | ||
1906 | void destroy_node_manager_caches(void) | |
1907 | { | |
1908 | kmem_cache_destroy(free_nid_slab); | |
1909 | kmem_cache_destroy(nat_entry_slab); | |
1910 | } |