]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/f2fs/recovery.c
72adbbfdb3e59a005f5fc56d1fc8fbec5b33a529
[mirror_ubuntu-bionic-kernel.git] / fs / f2fs / recovery.c
1 /*
2 * fs/f2fs/recovery.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include "f2fs.h"
14 #include "node.h"
15 #include "segment.h"
16
17 static struct kmem_cache *fsync_entry_slab;
18
19 bool space_for_roll_forward(struct f2fs_sb_info *sbi)
20 {
21 if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
22 > sbi->user_block_count)
23 return false;
24 return true;
25 }
26
27 static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
28 nid_t ino)
29 {
30 struct list_head *this;
31 struct fsync_inode_entry *entry;
32
33 list_for_each(this, head) {
34 entry = list_entry(this, struct fsync_inode_entry, list);
35 if (entry->inode->i_ino == ino)
36 return entry;
37 }
38 return NULL;
39 }
40
41 static int recover_dentry(struct page *ipage, struct inode *inode)
42 {
43 struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
44 nid_t pino = le32_to_cpu(raw_inode->i_pino);
45 struct f2fs_dir_entry *de;
46 struct qstr name;
47 struct page *page;
48 struct inode *dir, *einode;
49 int err = 0;
50
51 dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino);
52 if (!dir) {
53 dir = f2fs_iget(inode->i_sb, pino);
54 if (IS_ERR(dir)) {
55 err = PTR_ERR(dir);
56 goto out;
57 }
58 set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
59 add_dirty_dir_inode(dir);
60 }
61
62 name.len = le32_to_cpu(raw_inode->i_namelen);
63 name.name = raw_inode->i_name;
64
65 if (unlikely(name.len > F2FS_NAME_LEN)) {
66 WARN_ON(1);
67 err = -ENAMETOOLONG;
68 goto out;
69 }
70 retry:
71 de = f2fs_find_entry(dir, &name, &page);
72 if (de && inode->i_ino == le32_to_cpu(de->ino))
73 goto out_unmap_put;
74 if (de) {
75 einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
76 if (IS_ERR(einode)) {
77 WARN_ON(1);
78 if (PTR_ERR(einode) == -ENOENT)
79 err = -EEXIST;
80 goto out_unmap_put;
81 }
82 err = acquire_orphan_inode(F2FS_SB(inode->i_sb));
83 if (err) {
84 iput(einode);
85 goto out_unmap_put;
86 }
87 f2fs_delete_entry(de, page, einode);
88 iput(einode);
89 goto retry;
90 }
91 err = __f2fs_add_link(dir, &name, inode);
92 goto out;
93
94 out_unmap_put:
95 kunmap(page);
96 f2fs_put_page(page, 0);
97 out:
98 f2fs_msg(inode->i_sb, KERN_NOTICE,
99 "%s: ino = %x, name = %s, dir = %lx, err = %d",
100 __func__, ino_of_node(ipage), raw_inode->i_name,
101 IS_ERR(dir) ? 0 : dir->i_ino, err);
102 return err;
103 }
104
105 static int recover_inode(struct inode *inode, struct page *node_page)
106 {
107 struct f2fs_inode *raw_inode = F2FS_INODE(node_page);
108
109 if (!IS_INODE(node_page))
110 return 0;
111
112 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
113 i_size_write(inode, le64_to_cpu(raw_inode->i_size));
114 inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
115 inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
116 inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
117 inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
118 inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
119 inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
120
121 if (is_dent_dnode(node_page))
122 return recover_dentry(node_page, inode);
123
124 f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
125 ino_of_node(node_page), raw_inode->i_name);
126 return 0;
127 }
128
129 static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
130 {
131 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
132 struct curseg_info *curseg;
133 struct page *page;
134 block_t blkaddr;
135 int err = 0;
136
137 /* get node pages in the current segment */
138 curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
139 blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
140
141 /* read node page */
142 page = alloc_page(GFP_F2FS_ZERO);
143 if (!page)
144 return -ENOMEM;
145 lock_page(page);
146
147 while (1) {
148 struct fsync_inode_entry *entry;
149
150 err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
151 if (err)
152 return err;
153
154 lock_page(page);
155
156 if (cp_ver != cpver_of_node(page))
157 break;
158
159 if (!is_fsync_dnode(page))
160 goto next;
161
162 entry = get_fsync_inode(head, ino_of_node(page));
163 if (entry) {
164 if (IS_INODE(page) && is_dent_dnode(page))
165 set_inode_flag(F2FS_I(entry->inode),
166 FI_INC_LINK);
167 } else {
168 if (IS_INODE(page) && is_dent_dnode(page)) {
169 err = recover_inode_page(sbi, page);
170 if (err)
171 break;
172 }
173
174 /* add this fsync inode to the list */
175 entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
176 if (!entry) {
177 err = -ENOMEM;
178 break;
179 }
180
181 entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
182 if (IS_ERR(entry->inode)) {
183 err = PTR_ERR(entry->inode);
184 kmem_cache_free(fsync_entry_slab, entry);
185 break;
186 }
187 list_add_tail(&entry->list, head);
188 }
189 entry->blkaddr = blkaddr;
190
191 err = recover_inode(entry->inode, page);
192 if (err && err != -ENOENT)
193 break;
194 next:
195 /* check next segment */
196 blkaddr = next_blkaddr_of_node(page);
197 }
198
199 unlock_page(page);
200 __free_pages(page, 0);
201
202 return err;
203 }
204
205 static void destroy_fsync_dnodes(struct list_head *head)
206 {
207 struct fsync_inode_entry *entry, *tmp;
208
209 list_for_each_entry_safe(entry, tmp, head, list) {
210 iput(entry->inode);
211 list_del(&entry->list);
212 kmem_cache_free(fsync_entry_slab, entry);
213 }
214 }
215
216 static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
217 block_t blkaddr, struct dnode_of_data *dn)
218 {
219 struct seg_entry *sentry;
220 unsigned int segno = GET_SEGNO(sbi, blkaddr);
221 unsigned short blkoff = GET_BLKOFF_FROM_SEG0(sbi, blkaddr);
222 struct f2fs_summary_block *sum_node;
223 struct f2fs_summary sum;
224 struct page *sum_page, *node_page;
225 nid_t ino, nid;
226 struct inode *inode;
227 unsigned int offset;
228 block_t bidx;
229 int i;
230
231 sentry = get_seg_entry(sbi, segno);
232 if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
233 return 0;
234
235 /* Get the previous summary */
236 for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
237 struct curseg_info *curseg = CURSEG_I(sbi, i);
238 if (curseg->segno == segno) {
239 sum = curseg->sum_blk->entries[blkoff];
240 goto got_it;
241 }
242 }
243
244 sum_page = get_sum_page(sbi, segno);
245 sum_node = (struct f2fs_summary_block *)page_address(sum_page);
246 sum = sum_node->entries[blkoff];
247 f2fs_put_page(sum_page, 1);
248 got_it:
249 /* Use the locked dnode page and inode */
250 nid = le32_to_cpu(sum.nid);
251 if (dn->inode->i_ino == nid) {
252 struct dnode_of_data tdn = *dn;
253 tdn.nid = nid;
254 tdn.node_page = dn->inode_page;
255 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
256 truncate_data_blocks_range(&tdn, 1);
257 return 0;
258 } else if (dn->nid == nid) {
259 struct dnode_of_data tdn = *dn;
260 tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
261 truncate_data_blocks_range(&tdn, 1);
262 return 0;
263 }
264
265 /* Get the node page */
266 node_page = get_node_page(sbi, nid);
267 if (IS_ERR(node_page))
268 return PTR_ERR(node_page);
269
270 offset = ofs_of_node(node_page);
271 ino = ino_of_node(node_page);
272 f2fs_put_page(node_page, 1);
273
274 /* Deallocate previous index in the node page */
275 inode = f2fs_iget(sbi->sb, ino);
276 if (IS_ERR(inode))
277 return PTR_ERR(inode);
278
279 bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
280 le16_to_cpu(sum.ofs_in_node);
281
282 truncate_hole(inode, bidx, bidx + 1);
283 iput(inode);
284 return 0;
285 }
286
287 static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
288 struct page *page, block_t blkaddr)
289 {
290 struct f2fs_inode_info *fi = F2FS_I(inode);
291 unsigned int start, end;
292 struct dnode_of_data dn;
293 struct f2fs_summary sum;
294 struct node_info ni;
295 int err = 0, recovered = 0;
296
297 if (recover_inline_data(inode, page))
298 goto out;
299
300 if (recover_xattr_data(inode, page, blkaddr))
301 goto out;
302
303 start = start_bidx_of_node(ofs_of_node(page), fi);
304 if (IS_INODE(page))
305 end = start + ADDRS_PER_INODE(fi);
306 else
307 end = start + ADDRS_PER_BLOCK;
308
309 f2fs_lock_op(sbi);
310
311 set_new_dnode(&dn, inode, NULL, NULL, 0);
312
313 err = get_dnode_of_data(&dn, start, ALLOC_NODE);
314 if (err) {
315 f2fs_unlock_op(sbi);
316 goto out;
317 }
318
319 wait_on_page_writeback(dn.node_page);
320
321 get_node_info(sbi, dn.nid, &ni);
322 f2fs_bug_on(ni.ino != ino_of_node(page));
323 f2fs_bug_on(ofs_of_node(dn.node_page) != ofs_of_node(page));
324
325 for (; start < end; start++) {
326 block_t src, dest;
327
328 src = datablock_addr(dn.node_page, dn.ofs_in_node);
329 dest = datablock_addr(page, dn.ofs_in_node);
330
331 if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
332 if (src == NULL_ADDR) {
333 err = reserve_new_block(&dn);
334 /* We should not get -ENOSPC */
335 f2fs_bug_on(err);
336 }
337
338 /* Check the previous node page having this index */
339 err = check_index_in_prev_nodes(sbi, dest, &dn);
340 if (err)
341 goto err;
342
343 set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
344
345 /* write dummy data page */
346 recover_data_page(sbi, NULL, &sum, src, dest);
347 update_extent_cache(dest, &dn);
348 recovered++;
349 }
350 dn.ofs_in_node++;
351 }
352
353 /* write node page in place */
354 set_summary(&sum, dn.nid, 0, 0);
355 if (IS_INODE(dn.node_page))
356 sync_inode_page(&dn);
357
358 copy_node_footer(dn.node_page, page);
359 fill_node_footer(dn.node_page, dn.nid, ni.ino,
360 ofs_of_node(page), false);
361 set_page_dirty(dn.node_page);
362
363 recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
364 err:
365 f2fs_put_dnode(&dn);
366 f2fs_unlock_op(sbi);
367 out:
368 f2fs_msg(sbi->sb, KERN_NOTICE,
369 "recover_data: ino = %lx, recovered = %d blocks, err = %d",
370 inode->i_ino, recovered, err);
371 return err;
372 }
373
374 static int recover_data(struct f2fs_sb_info *sbi,
375 struct list_head *head, int type)
376 {
377 unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
378 struct curseg_info *curseg;
379 struct page *page;
380 int err = 0;
381 block_t blkaddr;
382
383 /* get node pages in the current segment */
384 curseg = CURSEG_I(sbi, type);
385 blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
386
387 /* read node page */
388 page = alloc_page(GFP_F2FS_ZERO);
389 if (!page)
390 return -ENOMEM;
391
392 lock_page(page);
393
394 while (1) {
395 struct fsync_inode_entry *entry;
396
397 err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
398 if (err)
399 return err;
400
401 lock_page(page);
402
403 if (cp_ver != cpver_of_node(page))
404 break;
405
406 entry = get_fsync_inode(head, ino_of_node(page));
407 if (!entry)
408 goto next;
409
410 err = do_recover_data(sbi, entry->inode, page, blkaddr);
411 if (err)
412 break;
413
414 if (entry->blkaddr == blkaddr) {
415 iput(entry->inode);
416 list_del(&entry->list);
417 kmem_cache_free(fsync_entry_slab, entry);
418 }
419 next:
420 /* check next segment */
421 blkaddr = next_blkaddr_of_node(page);
422 }
423
424 unlock_page(page);
425 __free_pages(page, 0);
426
427 if (!err)
428 allocate_new_segments(sbi);
429 return err;
430 }
431
432 int recover_fsync_data(struct f2fs_sb_info *sbi)
433 {
434 struct list_head inode_list;
435 int err;
436 bool need_writecp = false;
437
438 fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
439 sizeof(struct fsync_inode_entry), NULL);
440 if (!fsync_entry_slab)
441 return -ENOMEM;
442
443 INIT_LIST_HEAD(&inode_list);
444
445 /* step #1: find fsynced inode numbers */
446 sbi->por_doing = true;
447 err = find_fsync_dnodes(sbi, &inode_list);
448 if (err)
449 goto out;
450
451 if (list_empty(&inode_list))
452 goto out;
453
454 need_writecp = true;
455
456 /* step #2: recover data */
457 err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
458 f2fs_bug_on(!list_empty(&inode_list));
459 out:
460 destroy_fsync_dnodes(&inode_list);
461 kmem_cache_destroy(fsync_entry_slab);
462 sbi->por_doing = false;
463 if (!err && need_writecp)
464 write_checkpoint(sbi, false);
465 return err;
466 }