4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/backing-dev.h>
15 #include <linux/writeback.h>
21 #include <trace/events/f2fs.h>
23 void f2fs_mark_inode_dirty_sync(struct inode
*inode
, bool sync
)
25 if (f2fs_inode_dirtied(inode
, sync
))
28 mark_inode_dirty_sync(inode
);
31 void f2fs_set_inode_flags(struct inode
*inode
)
33 unsigned int flags
= F2FS_I(inode
)->i_flags
;
34 unsigned int new_fl
= 0;
36 if (flags
& FS_SYNC_FL
)
38 if (flags
& FS_APPEND_FL
)
40 if (flags
& FS_IMMUTABLE_FL
)
41 new_fl
|= S_IMMUTABLE
;
42 if (flags
& FS_NOATIME_FL
)
44 if (flags
& FS_DIRSYNC_FL
)
46 inode_set_flags(inode
, new_fl
,
47 S_SYNC
|S_APPEND
|S_IMMUTABLE
|S_NOATIME
|S_DIRSYNC
);
50 static void __get_inode_rdev(struct inode
*inode
, struct f2fs_inode
*ri
)
52 int extra_size
= get_extra_isize(inode
);
54 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
) ||
55 S_ISFIFO(inode
->i_mode
) || S_ISSOCK(inode
->i_mode
)) {
56 if (ri
->i_addr
[extra_size
])
57 inode
->i_rdev
= old_decode_dev(
58 le32_to_cpu(ri
->i_addr
[extra_size
]));
60 inode
->i_rdev
= new_decode_dev(
61 le32_to_cpu(ri
->i_addr
[extra_size
+ 1]));
65 static bool __written_first_block(struct f2fs_inode
*ri
)
67 block_t addr
= le32_to_cpu(ri
->i_addr
[offset_in_addr(ri
)]);
69 if (addr
!= NEW_ADDR
&& addr
!= NULL_ADDR
)
74 static void __set_inode_rdev(struct inode
*inode
, struct f2fs_inode
*ri
)
76 int extra_size
= get_extra_isize(inode
);
78 if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
)) {
79 if (old_valid_dev(inode
->i_rdev
)) {
80 ri
->i_addr
[extra_size
] =
81 cpu_to_le32(old_encode_dev(inode
->i_rdev
));
82 ri
->i_addr
[extra_size
+ 1] = 0;
84 ri
->i_addr
[extra_size
] = 0;
85 ri
->i_addr
[extra_size
+ 1] =
86 cpu_to_le32(new_encode_dev(inode
->i_rdev
));
87 ri
->i_addr
[extra_size
+ 2] = 0;
92 static void __recover_inline_status(struct inode
*inode
, struct page
*ipage
)
94 void *inline_data
= inline_data_addr(inode
, ipage
);
95 __le32
*start
= inline_data
;
96 __le32
*end
= start
+ MAX_INLINE_DATA(inode
) / sizeof(__le32
);
100 f2fs_wait_on_page_writeback(ipage
, NODE
, true);
102 set_inode_flag(inode
, FI_DATA_EXIST
);
103 set_raw_inline(inode
, F2FS_INODE(ipage
));
104 set_page_dirty(ipage
);
111 static bool f2fs_enable_inode_chksum(struct f2fs_sb_info
*sbi
, struct page
*page
)
113 struct f2fs_inode
*ri
= &F2FS_NODE(page
)->i
;
114 int extra_isize
= le32_to_cpu(ri
->i_extra_isize
);
116 if (!f2fs_sb_has_inode_chksum(sbi
->sb
))
119 if (!RAW_IS_INODE(F2FS_NODE(page
)) || !(ri
->i_inline
& F2FS_EXTRA_ATTR
))
122 if (!F2FS_FITS_IN_INODE(ri
, extra_isize
, i_inode_checksum
))
128 static __u32
f2fs_inode_chksum(struct f2fs_sb_info
*sbi
, struct page
*page
)
130 struct f2fs_node
*node
= F2FS_NODE(page
);
131 struct f2fs_inode
*ri
= &node
->i
;
132 __le32 ino
= node
->footer
.ino
;
133 __le32 gen
= ri
->i_generation
;
134 __u32 chksum
, chksum_seed
;
136 unsigned int offset
= offsetof(struct f2fs_inode
, i_inode_checksum
);
137 unsigned int cs_size
= sizeof(dummy_cs
);
139 chksum
= f2fs_chksum(sbi
, sbi
->s_chksum_seed
, (__u8
*)&ino
,
141 chksum_seed
= f2fs_chksum(sbi
, chksum
, (__u8
*)&gen
, sizeof(gen
));
143 chksum
= f2fs_chksum(sbi
, chksum_seed
, (__u8
*)ri
, offset
);
144 chksum
= f2fs_chksum(sbi
, chksum
, (__u8
*)&dummy_cs
, cs_size
);
146 chksum
= f2fs_chksum(sbi
, chksum
, (__u8
*)ri
+ offset
,
147 F2FS_BLKSIZE
- offset
);
151 bool f2fs_inode_chksum_verify(struct f2fs_sb_info
*sbi
, struct page
*page
)
153 struct f2fs_inode
*ri
;
154 __u32 provided
, calculated
;
156 if (!f2fs_enable_inode_chksum(sbi
, page
) ||
157 PageDirty(page
) || PageWriteback(page
))
160 ri
= &F2FS_NODE(page
)->i
;
161 provided
= le32_to_cpu(ri
->i_inode_checksum
);
162 calculated
= f2fs_inode_chksum(sbi
, page
);
164 if (provided
!= calculated
)
165 f2fs_msg(sbi
->sb
, KERN_WARNING
,
166 "checksum invalid, ino = %x, %x vs. %x",
167 ino_of_node(page
), provided
, calculated
);
169 return provided
== calculated
;
172 void f2fs_inode_chksum_set(struct f2fs_sb_info
*sbi
, struct page
*page
)
174 struct f2fs_inode
*ri
= &F2FS_NODE(page
)->i
;
176 if (!f2fs_enable_inode_chksum(sbi
, page
))
179 ri
->i_inode_checksum
= cpu_to_le32(f2fs_inode_chksum(sbi
, page
));
182 static int do_read_inode(struct inode
*inode
)
184 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
185 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
186 struct page
*node_page
;
187 struct f2fs_inode
*ri
;
190 /* Check if ino is within scope */
191 if (check_nid_range(sbi
, inode
->i_ino
)) {
192 f2fs_msg(inode
->i_sb
, KERN_ERR
, "bad inode number: %lu",
193 (unsigned long) inode
->i_ino
);
198 node_page
= get_node_page(sbi
, inode
->i_ino
);
199 if (IS_ERR(node_page
))
200 return PTR_ERR(node_page
);
202 ri
= F2FS_INODE(node_page
);
204 inode
->i_mode
= le16_to_cpu(ri
->i_mode
);
205 i_uid_write(inode
, le32_to_cpu(ri
->i_uid
));
206 i_gid_write(inode
, le32_to_cpu(ri
->i_gid
));
207 set_nlink(inode
, le32_to_cpu(ri
->i_links
));
208 inode
->i_size
= le64_to_cpu(ri
->i_size
);
209 inode
->i_blocks
= SECTOR_FROM_BLOCK(le64_to_cpu(ri
->i_blocks
) - 1);
211 inode
->i_atime
.tv_sec
= le64_to_cpu(ri
->i_atime
);
212 inode
->i_ctime
.tv_sec
= le64_to_cpu(ri
->i_ctime
);
213 inode
->i_mtime
.tv_sec
= le64_to_cpu(ri
->i_mtime
);
214 inode
->i_atime
.tv_nsec
= le32_to_cpu(ri
->i_atime_nsec
);
215 inode
->i_ctime
.tv_nsec
= le32_to_cpu(ri
->i_ctime_nsec
);
216 inode
->i_mtime
.tv_nsec
= le32_to_cpu(ri
->i_mtime_nsec
);
217 inode
->i_generation
= le32_to_cpu(ri
->i_generation
);
219 fi
->i_current_depth
= le32_to_cpu(ri
->i_current_depth
);
220 fi
->i_xattr_nid
= le32_to_cpu(ri
->i_xattr_nid
);
221 fi
->i_flags
= le32_to_cpu(ri
->i_flags
);
223 fi
->i_advise
= ri
->i_advise
;
224 fi
->i_pino
= le32_to_cpu(ri
->i_pino
);
225 fi
->i_dir_level
= ri
->i_dir_level
;
227 if (f2fs_init_extent_tree(inode
, &ri
->i_ext
))
228 set_page_dirty(node_page
);
230 get_inline_info(inode
, ri
);
232 fi
->i_extra_isize
= f2fs_has_extra_attr(inode
) ?
233 le16_to_cpu(ri
->i_extra_isize
) : 0;
235 /* check data exist */
236 if (f2fs_has_inline_data(inode
) && !f2fs_exist_data(inode
))
237 __recover_inline_status(inode
, node_page
);
239 /* get rdev by using inline_info */
240 __get_inode_rdev(inode
, ri
);
242 if (__written_first_block(ri
))
243 set_inode_flag(inode
, FI_FIRST_BLOCK_WRITTEN
);
245 if (!need_inode_block_update(sbi
, inode
->i_ino
))
246 fi
->last_disk_size
= inode
->i_size
;
248 if (fi
->i_flags
& FS_PROJINHERIT_FL
)
249 set_inode_flag(inode
, FI_PROJ_INHERIT
);
251 if (f2fs_has_extra_attr(inode
) && f2fs_sb_has_project_quota(sbi
->sb
) &&
252 F2FS_FITS_IN_INODE(ri
, fi
->i_extra_isize
, i_projid
))
253 i_projid
= (projid_t
)le32_to_cpu(ri
->i_projid
);
255 i_projid
= F2FS_DEF_PROJID
;
256 fi
->i_projid
= make_kprojid(&init_user_ns
, i_projid
);
258 f2fs_put_page(node_page
, 1);
260 stat_inc_inline_xattr(inode
);
261 stat_inc_inline_inode(inode
);
262 stat_inc_inline_dir(inode
);
267 struct inode
*f2fs_iget(struct super_block
*sb
, unsigned long ino
)
269 struct f2fs_sb_info
*sbi
= F2FS_SB(sb
);
273 inode
= iget_locked(sb
, ino
);
275 return ERR_PTR(-ENOMEM
);
277 if (!(inode
->i_state
& I_NEW
)) {
278 trace_f2fs_iget(inode
);
281 if (ino
== F2FS_NODE_INO(sbi
) || ino
== F2FS_META_INO(sbi
))
284 ret
= do_read_inode(inode
);
288 if (ino
== F2FS_NODE_INO(sbi
)) {
289 inode
->i_mapping
->a_ops
= &f2fs_node_aops
;
290 mapping_set_gfp_mask(inode
->i_mapping
, GFP_F2FS_ZERO
);
291 } else if (ino
== F2FS_META_INO(sbi
)) {
292 inode
->i_mapping
->a_ops
= &f2fs_meta_aops
;
293 mapping_set_gfp_mask(inode
->i_mapping
, GFP_F2FS_ZERO
);
294 } else if (S_ISREG(inode
->i_mode
)) {
295 inode
->i_op
= &f2fs_file_inode_operations
;
296 inode
->i_fop
= &f2fs_file_operations
;
297 inode
->i_mapping
->a_ops
= &f2fs_dblock_aops
;
298 } else if (S_ISDIR(inode
->i_mode
)) {
299 inode
->i_op
= &f2fs_dir_inode_operations
;
300 inode
->i_fop
= &f2fs_dir_operations
;
301 inode
->i_mapping
->a_ops
= &f2fs_dblock_aops
;
302 mapping_set_gfp_mask(inode
->i_mapping
, GFP_F2FS_HIGH_ZERO
);
303 } else if (S_ISLNK(inode
->i_mode
)) {
304 if (f2fs_encrypted_inode(inode
))
305 inode
->i_op
= &f2fs_encrypted_symlink_inode_operations
;
307 inode
->i_op
= &f2fs_symlink_inode_operations
;
308 inode_nohighmem(inode
);
309 inode
->i_mapping
->a_ops
= &f2fs_dblock_aops
;
310 } else if (S_ISCHR(inode
->i_mode
) || S_ISBLK(inode
->i_mode
) ||
311 S_ISFIFO(inode
->i_mode
) || S_ISSOCK(inode
->i_mode
)) {
312 inode
->i_op
= &f2fs_special_inode_operations
;
313 init_special_inode(inode
, inode
->i_mode
, inode
->i_rdev
);
318 f2fs_set_inode_flags(inode
);
319 unlock_new_inode(inode
);
320 trace_f2fs_iget(inode
);
325 trace_f2fs_iget_exit(inode
, ret
);
329 struct inode
*f2fs_iget_retry(struct super_block
*sb
, unsigned long ino
)
333 inode
= f2fs_iget(sb
, ino
);
335 if (PTR_ERR(inode
) == -ENOMEM
) {
336 congestion_wait(BLK_RW_ASYNC
, HZ
/50);
343 int update_inode(struct inode
*inode
, struct page
*node_page
)
345 struct f2fs_inode
*ri
;
346 struct extent_tree
*et
= F2FS_I(inode
)->extent_tree
;
348 f2fs_inode_synced(inode
);
350 f2fs_wait_on_page_writeback(node_page
, NODE
, true);
352 ri
= F2FS_INODE(node_page
);
354 ri
->i_mode
= cpu_to_le16(inode
->i_mode
);
355 ri
->i_advise
= F2FS_I(inode
)->i_advise
;
356 ri
->i_uid
= cpu_to_le32(i_uid_read(inode
));
357 ri
->i_gid
= cpu_to_le32(i_gid_read(inode
));
358 ri
->i_links
= cpu_to_le32(inode
->i_nlink
);
359 ri
->i_size
= cpu_to_le64(i_size_read(inode
));
360 ri
->i_blocks
= cpu_to_le64(SECTOR_TO_BLOCK(inode
->i_blocks
) + 1);
363 read_lock(&et
->lock
);
364 set_raw_extent(&et
->largest
, &ri
->i_ext
);
365 read_unlock(&et
->lock
);
367 memset(&ri
->i_ext
, 0, sizeof(ri
->i_ext
));
369 set_raw_inline(inode
, ri
);
371 ri
->i_atime
= cpu_to_le64(inode
->i_atime
.tv_sec
);
372 ri
->i_ctime
= cpu_to_le64(inode
->i_ctime
.tv_sec
);
373 ri
->i_mtime
= cpu_to_le64(inode
->i_mtime
.tv_sec
);
374 ri
->i_atime_nsec
= cpu_to_le32(inode
->i_atime
.tv_nsec
);
375 ri
->i_ctime_nsec
= cpu_to_le32(inode
->i_ctime
.tv_nsec
);
376 ri
->i_mtime_nsec
= cpu_to_le32(inode
->i_mtime
.tv_nsec
);
377 ri
->i_current_depth
= cpu_to_le32(F2FS_I(inode
)->i_current_depth
);
378 ri
->i_xattr_nid
= cpu_to_le32(F2FS_I(inode
)->i_xattr_nid
);
379 ri
->i_flags
= cpu_to_le32(F2FS_I(inode
)->i_flags
);
380 ri
->i_pino
= cpu_to_le32(F2FS_I(inode
)->i_pino
);
381 ri
->i_generation
= cpu_to_le32(inode
->i_generation
);
382 ri
->i_dir_level
= F2FS_I(inode
)->i_dir_level
;
384 if (f2fs_has_extra_attr(inode
)) {
385 ri
->i_extra_isize
= cpu_to_le16(F2FS_I(inode
)->i_extra_isize
);
387 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode
)->sb
) &&
388 F2FS_FITS_IN_INODE(ri
, F2FS_I(inode
)->i_extra_isize
,
392 i_projid
= from_kprojid(&init_user_ns
,
393 F2FS_I(inode
)->i_projid
);
394 ri
->i_projid
= cpu_to_le32(i_projid
);
398 __set_inode_rdev(inode
, ri
);
399 set_cold_node(inode
, node_page
);
402 if (inode
->i_nlink
== 0)
403 clear_inline_node(node_page
);
405 return set_page_dirty(node_page
);
408 int update_inode_page(struct inode
*inode
)
410 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
411 struct page
*node_page
;
414 node_page
= get_node_page(sbi
, inode
->i_ino
);
415 if (IS_ERR(node_page
)) {
416 int err
= PTR_ERR(node_page
);
417 if (err
== -ENOMEM
) {
420 } else if (err
!= -ENOENT
) {
421 f2fs_stop_checkpoint(sbi
, false);
425 ret
= update_inode(inode
, node_page
);
426 f2fs_put_page(node_page
, 1);
430 int f2fs_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
432 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
434 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
435 inode
->i_ino
== F2FS_META_INO(sbi
))
438 if (!is_inode_flag_set(inode
, FI_DIRTY_INODE
))
442 * We need to balance fs here to prevent from producing dirty node pages
443 * during the urgent cleaning time when runing out of free sections.
445 update_inode_page(inode
);
446 if (wbc
&& wbc
->nr_to_write
)
447 f2fs_balance_fs(sbi
, true);
452 * Called at the last iput() if i_nlink is zero
454 void f2fs_evict_inode(struct inode
*inode
)
456 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
457 nid_t xnid
= F2FS_I(inode
)->i_xattr_nid
;
460 /* some remained atomic pages should discarded */
461 if (f2fs_is_atomic_file(inode
))
462 drop_inmem_pages(inode
);
464 trace_f2fs_evict_inode(inode
);
465 truncate_inode_pages_final(&inode
->i_data
);
467 if (inode
->i_ino
== F2FS_NODE_INO(sbi
) ||
468 inode
->i_ino
== F2FS_META_INO(sbi
))
471 f2fs_bug_on(sbi
, get_dirty_pages(inode
));
472 remove_dirty_inode(inode
);
474 f2fs_destroy_extent_tree(inode
);
476 if (inode
->i_nlink
|| is_bad_inode(inode
))
479 dquot_initialize(inode
);
481 remove_ino_entry(sbi
, inode
->i_ino
, APPEND_INO
);
482 remove_ino_entry(sbi
, inode
->i_ino
, UPDATE_INO
);
483 remove_ino_entry(sbi
, inode
->i_ino
, FLUSH_INO
);
485 sb_start_intwrite(inode
->i_sb
);
486 set_inode_flag(inode
, FI_NO_ALLOC
);
487 i_size_write(inode
, 0);
489 if (F2FS_HAS_BLOCKS(inode
))
490 err
= f2fs_truncate(inode
);
492 #ifdef CONFIG_F2FS_FAULT_INJECTION
493 if (time_to_inject(sbi
, FAULT_EVICT_INODE
)) {
494 f2fs_show_injection_info(FAULT_EVICT_INODE
);
500 err
= remove_inode_page(inode
);
506 /* give more chances, if ENOMEM case */
507 if (err
== -ENOMEM
) {
513 update_inode_page(inode
);
514 dquot_free_inode(inode
);
515 sb_end_intwrite(inode
->i_sb
);
519 stat_dec_inline_xattr(inode
);
520 stat_dec_inline_dir(inode
);
521 stat_dec_inline_inode(inode
);
523 if (likely(!is_set_ckpt_flags(sbi
, CP_ERROR_FLAG
)))
524 f2fs_bug_on(sbi
, is_inode_flag_set(inode
, FI_DIRTY_INODE
));
526 f2fs_inode_synced(inode
);
528 /* ino == 0, if f2fs_new_inode() was failed t*/
530 invalidate_mapping_pages(NODE_MAPPING(sbi
), inode
->i_ino
,
533 invalidate_mapping_pages(NODE_MAPPING(sbi
), xnid
, xnid
);
534 if (inode
->i_nlink
) {
535 if (is_inode_flag_set(inode
, FI_APPEND_WRITE
))
536 add_ino_entry(sbi
, inode
->i_ino
, APPEND_INO
);
537 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
))
538 add_ino_entry(sbi
, inode
->i_ino
, UPDATE_INO
);
540 if (is_inode_flag_set(inode
, FI_FREE_NID
)) {
541 alloc_nid_failed(sbi
, inode
->i_ino
);
542 clear_inode_flag(inode
, FI_FREE_NID
);
544 f2fs_bug_on(sbi
, err
&&
545 !exist_written_data(sbi
, inode
->i_ino
, ORPHAN_INO
));
548 fscrypt_put_encryption_info(inode
, NULL
);
552 /* caller should call f2fs_lock_op() */
553 void handle_failed_inode(struct inode
*inode
)
555 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
559 * clear nlink of inode in order to release resource of inode
565 * we must call this to avoid inode being remained as dirty, resulting
566 * in a panic when flushing dirty inodes in gdirty_list.
568 update_inode_page(inode
);
569 f2fs_inode_synced(inode
);
571 /* don't make bad inode, since it becomes a regular file. */
572 unlock_new_inode(inode
);
575 * Note: we should add inode to orphan list before f2fs_unlock_op()
576 * so we can prevent losing this orphan when encoutering checkpoint
577 * and following suddenly power-off.
579 get_node_info(sbi
, inode
->i_ino
, &ni
);
581 if (ni
.blk_addr
!= NULL_ADDR
) {
582 int err
= acquire_orphan_inode(sbi
);
584 set_sbi_flag(sbi
, SBI_NEED_FSCK
);
585 f2fs_msg(sbi
->sb
, KERN_WARNING
,
586 "Too many orphan inodes, run fsck to fix.");
588 add_orphan_inode(inode
);
590 alloc_nid_done(sbi
, inode
->i_ino
);
592 set_inode_flag(inode
, FI_FREE_NID
);
597 /* iput will drop the inode object */