4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
23 #include <linux/uuid.h>
32 #include <trace/events/f2fs.h>
34 static int f2fs_vm_page_mkwrite(struct vm_area_struct
*vma
,
37 struct page
*page
= vmf
->page
;
38 struct inode
*inode
= file_inode(vma
->vm_file
);
39 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
40 struct dnode_of_data dn
;
43 sb_start_pagefault(inode
->i_sb
);
45 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
47 /* block allocation */
49 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
50 err
= f2fs_reserve_block(&dn
, page
->index
);
58 f2fs_balance_fs(sbi
, dn
.node_changed
);
60 file_update_time(vma
->vm_file
);
62 if (unlikely(page
->mapping
!= inode
->i_mapping
||
63 page_offset(page
) > i_size_read(inode
) ||
64 !PageUptodate(page
))) {
71 * check to see if the page is mapped already (no holes)
73 if (PageMappedToDisk(page
))
76 /* page is wholly or partially inside EOF */
77 if (((loff_t
)(page
->index
+ 1) << PAGE_SHIFT
) >
80 offset
= i_size_read(inode
) & ~PAGE_MASK
;
81 zero_user_segment(page
, offset
, PAGE_SIZE
);
84 if (!PageUptodate(page
))
85 SetPageUptodate(page
);
87 trace_f2fs_vm_page_mkwrite(page
, DATA
);
90 f2fs_wait_on_page_writeback(page
, DATA
, false);
92 /* wait for GCed encrypted page writeback */
93 if (f2fs_encrypted_inode(inode
) && S_ISREG(inode
->i_mode
))
94 f2fs_wait_on_encrypted_page_writeback(sbi
, dn
.data_blkaddr
);
96 /* if gced page is attached, don't write to cold segment */
97 clear_cold_data(page
);
99 sb_end_pagefault(inode
->i_sb
);
100 f2fs_update_time(sbi
, REQ_TIME
);
101 return block_page_mkwrite_return(err
);
104 static const struct vm_operations_struct f2fs_file_vm_ops
= {
105 .fault
= filemap_fault
,
106 .map_pages
= filemap_map_pages
,
107 .page_mkwrite
= f2fs_vm_page_mkwrite
,
110 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
112 struct dentry
*dentry
;
114 inode
= igrab(inode
);
115 dentry
= d_find_any_alias(inode
);
120 if (update_dent_inode(inode
, inode
, &dentry
->d_name
)) {
125 *pino
= parent_ino(dentry
);
130 static inline bool need_do_checkpoint(struct inode
*inode
)
132 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
133 bool need_cp
= false;
135 if (!S_ISREG(inode
->i_mode
) || inode
->i_nlink
!= 1)
137 else if (file_enc_name(inode
) && need_dentry_mark(sbi
, inode
->i_ino
))
139 else if (file_wrong_pino(inode
))
141 else if (!space_for_roll_forward(sbi
))
143 else if (!is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
145 else if (F2FS_I(inode
)->xattr_ver
== cur_cp_version(F2FS_CKPT(sbi
)))
147 else if (test_opt(sbi
, FASTBOOT
))
149 else if (sbi
->active_logs
== 2)
155 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
157 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
159 /* But we need to avoid that there are some inode updates */
160 if ((i
&& PageDirty(i
)) || need_inode_block_update(sbi
, ino
))
166 static void try_to_fix_pino(struct inode
*inode
)
168 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
171 down_write(&fi
->i_sem
);
173 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
174 get_parent_ino(inode
, &pino
)) {
175 f2fs_i_pino_write(inode
, pino
);
176 file_got_pino(inode
);
178 up_write(&fi
->i_sem
);
181 static int f2fs_do_sync_file(struct file
*file
, loff_t start
, loff_t end
,
182 int datasync
, bool atomic
)
184 struct inode
*inode
= file
->f_mapping
->host
;
185 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
186 nid_t ino
= inode
->i_ino
;
188 bool need_cp
= false;
189 struct writeback_control wbc
= {
190 .sync_mode
= WB_SYNC_ALL
,
191 .nr_to_write
= LONG_MAX
,
195 if (unlikely(f2fs_readonly(inode
->i_sb
)))
198 trace_f2fs_sync_file_enter(inode
);
200 /* if fdatasync is triggered, let's do in-place-update */
201 if (datasync
|| get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
202 set_inode_flag(inode
, FI_NEED_IPU
);
203 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
204 clear_inode_flag(inode
, FI_NEED_IPU
);
207 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
211 /* if the inode is dirty, let's recover all the time */
212 if (!datasync
&& !f2fs_skip_inode_update(inode
)) {
213 f2fs_write_inode(inode
, NULL
);
218 * if there is no written data, don't waste time to write recovery info.
220 if (!is_inode_flag_set(inode
, FI_APPEND_WRITE
) &&
221 !exist_written_data(sbi
, ino
, APPEND_INO
)) {
223 /* it may call write_inode just prior to fsync */
224 if (need_inode_page_update(sbi
, ino
))
227 if (is_inode_flag_set(inode
, FI_UPDATE_WRITE
) ||
228 exist_written_data(sbi
, ino
, UPDATE_INO
))
234 * Both of fdatasync() and fsync() are able to be recovered from
237 down_read(&F2FS_I(inode
)->i_sem
);
238 need_cp
= need_do_checkpoint(inode
);
239 up_read(&F2FS_I(inode
)->i_sem
);
242 /* all the dirty node pages should be flushed for POR */
243 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
246 * We've secured consistency through sync_fs. Following pino
247 * will be used only for fsynced inodes after checkpoint.
249 try_to_fix_pino(inode
);
250 clear_inode_flag(inode
, FI_APPEND_WRITE
);
251 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
255 ret
= fsync_node_pages(sbi
, inode
, &wbc
, atomic
);
259 /* if cp_error was enabled, we should avoid infinite loop */
260 if (unlikely(f2fs_cp_error(sbi
))) {
265 if (need_inode_block_update(sbi
, ino
)) {
266 mark_inode_dirty_sync(inode
);
267 f2fs_write_inode(inode
, NULL
);
271 ret
= wait_on_node_pages_writeback(sbi
, ino
);
275 /* once recovery info is written, don't need to tack this */
276 remove_ino_entry(sbi
, ino
, APPEND_INO
);
277 clear_inode_flag(inode
, FI_APPEND_WRITE
);
279 remove_ino_entry(sbi
, ino
, UPDATE_INO
);
280 clear_inode_flag(inode
, FI_UPDATE_WRITE
);
281 ret
= f2fs_issue_flush(sbi
);
282 f2fs_update_time(sbi
, REQ_TIME
);
284 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
285 f2fs_trace_ios(NULL
, 1);
289 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
291 return f2fs_do_sync_file(file
, start
, end
, datasync
, false);
294 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
295 pgoff_t pgofs
, int whence
)
300 if (whence
!= SEEK_DATA
)
303 /* find first dirty page index */
304 pagevec_init(&pvec
, 0);
305 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &pgofs
,
306 PAGECACHE_TAG_DIRTY
, 1);
307 pgofs
= nr_pages
? pvec
.pages
[0]->index
: ULONG_MAX
;
308 pagevec_release(&pvec
);
312 static bool __found_offset(block_t blkaddr
, pgoff_t dirty
, pgoff_t pgofs
,
317 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
318 (blkaddr
!= NEW_ADDR
&& blkaddr
!= NULL_ADDR
))
322 if (blkaddr
== NULL_ADDR
)
329 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
331 struct inode
*inode
= file
->f_mapping
->host
;
332 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
333 struct dnode_of_data dn
;
334 pgoff_t pgofs
, end_offset
, dirty
;
335 loff_t data_ofs
= offset
;
341 isize
= i_size_read(inode
);
345 /* handle inline data case */
346 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
347 if (whence
== SEEK_HOLE
)
352 pgofs
= (pgoff_t
)(offset
>> PAGE_SHIFT
);
354 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
356 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
357 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
358 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE
);
359 if (err
&& err
!= -ENOENT
) {
361 } else if (err
== -ENOENT
) {
362 /* direct node does not exists */
363 if (whence
== SEEK_DATA
) {
364 pgofs
= get_next_page_offset(&dn
, pgofs
);
371 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
373 /* find data/hole in dnode block */
374 for (; dn
.ofs_in_node
< end_offset
;
375 dn
.ofs_in_node
++, pgofs
++,
376 data_ofs
= (loff_t
)pgofs
<< PAGE_SHIFT
) {
378 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
380 if (__found_offset(blkaddr
, dirty
, pgofs
, whence
)) {
388 if (whence
== SEEK_DATA
)
391 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
394 return vfs_setpos(file
, data_ofs
, maxbytes
);
400 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
402 struct inode
*inode
= file
->f_mapping
->host
;
403 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
409 return generic_file_llseek_size(file
, offset
, whence
,
410 maxbytes
, i_size_read(inode
));
415 return f2fs_seek_block(file
, offset
, whence
);
421 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
423 struct inode
*inode
= file_inode(file
);
426 if (f2fs_encrypted_inode(inode
)) {
427 err
= fscrypt_get_encryption_info(inode
);
430 if (!f2fs_encrypted_inode(inode
))
434 /* we don't need to use inline_data strictly */
435 err
= f2fs_convert_inline_inode(inode
);
440 vma
->vm_ops
= &f2fs_file_vm_ops
;
444 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
446 int ret
= generic_file_open(inode
, filp
);
449 if (!ret
&& f2fs_encrypted_inode(inode
)) {
450 ret
= fscrypt_get_encryption_info(inode
);
453 if (!fscrypt_has_encryption_key(inode
))
456 dir
= dget_parent(file_dentry(filp
));
457 if (f2fs_encrypted_inode(d_inode(dir
)) &&
458 !fscrypt_has_permitted_context(d_inode(dir
), inode
)) {
466 int truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
468 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
469 struct f2fs_node
*raw_node
;
470 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
473 raw_node
= F2FS_NODE(dn
->node_page
);
474 addr
= blkaddr_in_node(raw_node
) + ofs
;
476 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++) {
477 block_t blkaddr
= le32_to_cpu(*addr
);
478 if (blkaddr
== NULL_ADDR
)
481 dn
->data_blkaddr
= NULL_ADDR
;
482 set_data_blkaddr(dn
);
483 invalidate_blocks(sbi
, blkaddr
);
484 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
485 clear_inode_flag(dn
->inode
, FI_FIRST_BLOCK_WRITTEN
);
492 * once we invalidate valid blkaddr in range [ofs, ofs + count],
493 * we will invalidate all blkaddr in the whole range.
495 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
),
497 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
498 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
500 dn
->ofs_in_node
= ofs
;
502 f2fs_update_time(sbi
, REQ_TIME
);
503 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
504 dn
->ofs_in_node
, nr_free
);
508 void truncate_data_blocks(struct dnode_of_data
*dn
)
510 truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK
);
513 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
516 unsigned offset
= from
& (PAGE_SIZE
- 1);
517 pgoff_t index
= from
>> PAGE_SHIFT
;
518 struct address_space
*mapping
= inode
->i_mapping
;
521 if (!offset
&& !cache_only
)
525 page
= f2fs_grab_cache_page(mapping
, index
, false);
526 if (page
&& PageUptodate(page
))
528 f2fs_put_page(page
, 1);
532 page
= get_lock_data_page(inode
, index
, true);
536 f2fs_wait_on_page_writeback(page
, DATA
, true);
537 zero_user(page
, offset
, PAGE_SIZE
- offset
);
538 if (!cache_only
|| !f2fs_encrypted_inode(inode
) ||
539 !S_ISREG(inode
->i_mode
))
540 set_page_dirty(page
);
541 f2fs_put_page(page
, 1);
545 int truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
547 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
548 unsigned int blocksize
= inode
->i_sb
->s_blocksize
;
549 struct dnode_of_data dn
;
551 int count
= 0, err
= 0;
553 bool truncate_page
= false;
555 trace_f2fs_truncate_blocks_enter(inode
, from
);
557 free_from
= (pgoff_t
)F2FS_BYTES_TO_BLK(from
+ blocksize
- 1);
559 if (free_from
>= sbi
->max_file_blocks
)
565 ipage
= get_node_page(sbi
, inode
->i_ino
);
567 err
= PTR_ERR(ipage
);
571 if (f2fs_has_inline_data(inode
)) {
572 if (truncate_inline_inode(ipage
, from
))
573 set_page_dirty(ipage
);
574 f2fs_put_page(ipage
, 1);
575 truncate_page
= true;
579 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
580 err
= get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE_RA
);
587 count
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
589 count
-= dn
.ofs_in_node
;
590 f2fs_bug_on(sbi
, count
< 0);
592 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
593 truncate_data_blocks_range(&dn
, count
);
599 err
= truncate_inode_blocks(inode
, free_from
);
604 /* lastly zero out the first data page */
606 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
608 trace_f2fs_truncate_blocks_exit(inode
, err
);
612 int f2fs_truncate(struct inode
*inode
)
616 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
617 S_ISLNK(inode
->i_mode
)))
620 trace_f2fs_truncate(inode
);
622 /* we should check inline_data size */
623 if (!f2fs_may_inline_data(inode
)) {
624 err
= f2fs_convert_inline_inode(inode
);
629 err
= truncate_blocks(inode
, i_size_read(inode
), true);
633 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
634 mark_inode_dirty_sync(inode
);
638 int f2fs_getattr(struct vfsmount
*mnt
,
639 struct dentry
*dentry
, struct kstat
*stat
)
641 struct inode
*inode
= d_inode(dentry
);
642 generic_fillattr(inode
, stat
);
647 #ifdef CONFIG_F2FS_FS_POSIX_ACL
648 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
650 unsigned int ia_valid
= attr
->ia_valid
;
652 if (ia_valid
& ATTR_UID
)
653 inode
->i_uid
= attr
->ia_uid
;
654 if (ia_valid
& ATTR_GID
)
655 inode
->i_gid
= attr
->ia_gid
;
656 if (ia_valid
& ATTR_ATIME
)
657 inode
->i_atime
= timespec_trunc(attr
->ia_atime
,
658 inode
->i_sb
->s_time_gran
);
659 if (ia_valid
& ATTR_MTIME
)
660 inode
->i_mtime
= timespec_trunc(attr
->ia_mtime
,
661 inode
->i_sb
->s_time_gran
);
662 if (ia_valid
& ATTR_CTIME
)
663 inode
->i_ctime
= timespec_trunc(attr
->ia_ctime
,
664 inode
->i_sb
->s_time_gran
);
665 if (ia_valid
& ATTR_MODE
) {
666 umode_t mode
= attr
->ia_mode
;
668 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
670 set_acl_inode(inode
, mode
);
674 #define __setattr_copy setattr_copy
677 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
679 struct inode
*inode
= d_inode(dentry
);
682 err
= inode_change_ok(inode
, attr
);
686 if (attr
->ia_valid
& ATTR_SIZE
) {
687 if (f2fs_encrypted_inode(inode
) &&
688 fscrypt_get_encryption_info(inode
))
691 if (attr
->ia_size
<= i_size_read(inode
)) {
692 truncate_setsize(inode
, attr
->ia_size
);
693 err
= f2fs_truncate(inode
);
696 f2fs_balance_fs(F2FS_I_SB(inode
), true);
699 * do not trim all blocks after i_size if target size is
700 * larger than i_size.
702 truncate_setsize(inode
, attr
->ia_size
);
704 /* should convert inline inode here */
705 if (!f2fs_may_inline_data(inode
)) {
706 err
= f2fs_convert_inline_inode(inode
);
710 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
714 __setattr_copy(inode
, attr
);
716 if (attr
->ia_valid
& ATTR_MODE
) {
717 err
= posix_acl_chmod(inode
, get_inode_mode(inode
));
718 if (err
|| is_inode_flag_set(inode
, FI_ACL_MODE
)) {
719 inode
->i_mode
= F2FS_I(inode
)->i_acl_mode
;
720 clear_inode_flag(inode
, FI_ACL_MODE
);
724 mark_inode_dirty_sync(inode
);
728 const struct inode_operations f2fs_file_inode_operations
= {
729 .getattr
= f2fs_getattr
,
730 .setattr
= f2fs_setattr
,
731 .get_acl
= f2fs_get_acl
,
732 .set_acl
= f2fs_set_acl
,
733 #ifdef CONFIG_F2FS_FS_XATTR
734 .setxattr
= generic_setxattr
,
735 .getxattr
= generic_getxattr
,
736 .listxattr
= f2fs_listxattr
,
737 .removexattr
= generic_removexattr
,
739 .fiemap
= f2fs_fiemap
,
742 static int fill_zero(struct inode
*inode
, pgoff_t index
,
743 loff_t start
, loff_t len
)
745 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
751 f2fs_balance_fs(sbi
, true);
754 page
= get_new_data_page(inode
, NULL
, index
, false);
758 return PTR_ERR(page
);
760 f2fs_wait_on_page_writeback(page
, DATA
, true);
761 zero_user(page
, start
, len
);
762 set_page_dirty(page
);
763 f2fs_put_page(page
, 1);
767 int truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
771 while (pg_start
< pg_end
) {
772 struct dnode_of_data dn
;
773 pgoff_t end_offset
, count
;
775 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
776 err
= get_dnode_of_data(&dn
, pg_start
, LOOKUP_NODE
);
778 if (err
== -ENOENT
) {
785 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
786 count
= min(end_offset
- dn
.ofs_in_node
, pg_end
- pg_start
);
788 f2fs_bug_on(F2FS_I_SB(inode
), count
== 0 || count
> end_offset
);
790 truncate_data_blocks_range(&dn
, count
);
798 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
800 pgoff_t pg_start
, pg_end
;
801 loff_t off_start
, off_end
;
804 ret
= f2fs_convert_inline_inode(inode
);
808 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
809 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
811 off_start
= offset
& (PAGE_SIZE
- 1);
812 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
814 if (pg_start
== pg_end
) {
815 ret
= fill_zero(inode
, pg_start
, off_start
,
816 off_end
- off_start
);
821 ret
= fill_zero(inode
, pg_start
++, off_start
,
822 PAGE_SIZE
- off_start
);
827 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
832 if (pg_start
< pg_end
) {
833 struct address_space
*mapping
= inode
->i_mapping
;
834 loff_t blk_start
, blk_end
;
835 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
837 f2fs_balance_fs(sbi
, true);
839 blk_start
= (loff_t
)pg_start
<< PAGE_SHIFT
;
840 blk_end
= (loff_t
)pg_end
<< PAGE_SHIFT
;
841 truncate_inode_pages_range(mapping
, blk_start
,
845 ret
= truncate_hole(inode
, pg_start
, pg_end
);
853 static int __exchange_data_block(struct inode
*inode
, pgoff_t src
,
854 pgoff_t dst
, bool full
)
856 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
857 struct dnode_of_data dn
;
859 bool do_replace
= false;
862 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
863 ret
= get_dnode_of_data(&dn
, src
, LOOKUP_NODE_RA
);
864 if (ret
&& ret
!= -ENOENT
) {
866 } else if (ret
== -ENOENT
) {
867 new_addr
= NULL_ADDR
;
869 new_addr
= dn
.data_blkaddr
;
870 if (!is_checkpointed_data(sbi
, new_addr
)) {
871 /* do not invalidate this block address */
872 f2fs_update_data_blkaddr(&dn
, NULL_ADDR
);
878 if (new_addr
== NULL_ADDR
)
879 return full
? truncate_hole(inode
, dst
, dst
+ 1) : 0;
885 if (test_opt(sbi
, LFS
)) {
890 ipage
= get_node_page(sbi
, inode
->i_ino
);
892 ret
= PTR_ERR(ipage
);
896 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
897 ret
= f2fs_reserve_block(&dn
, dst
);
901 truncate_data_blocks_range(&dn
, 1);
903 get_node_info(sbi
, dn
.nid
, &ni
);
904 f2fs_replace_block(sbi
, &dn
, dn
.data_blkaddr
, new_addr
,
905 ni
.version
, true, false);
908 struct page
*psrc
, *pdst
;
910 psrc
= get_lock_data_page(inode
, src
, true);
912 return PTR_ERR(psrc
);
913 pdst
= get_new_data_page(inode
, NULL
, dst
, true);
915 f2fs_put_page(psrc
, 1);
916 return PTR_ERR(pdst
);
918 f2fs_copy_page(psrc
, pdst
);
919 set_page_dirty(pdst
);
920 f2fs_put_page(pdst
, 1);
921 f2fs_put_page(psrc
, 1);
923 return truncate_hole(inode
, src
, src
+ 1);
928 if (!get_dnode_of_data(&dn
, src
, LOOKUP_NODE
)) {
929 f2fs_update_data_blkaddr(&dn
, new_addr
);
935 static int f2fs_do_collapse(struct inode
*inode
, pgoff_t start
, pgoff_t end
)
937 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
938 pgoff_t nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
941 for (; end
< nrpages
; start
++, end
++) {
942 f2fs_balance_fs(sbi
, true);
944 ret
= __exchange_data_block(inode
, end
, start
, true);
952 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
954 pgoff_t pg_start
, pg_end
;
958 if (offset
+ len
>= i_size_read(inode
))
961 /* collapse range should be aligned to block size of f2fs. */
962 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
965 ret
= f2fs_convert_inline_inode(inode
);
969 pg_start
= offset
>> PAGE_SHIFT
;
970 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
972 /* write out all dirty pages from offset */
973 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
977 truncate_pagecache(inode
, offset
);
979 ret
= f2fs_do_collapse(inode
, pg_start
, pg_end
);
983 /* write out all moved pages, if possible */
984 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
985 truncate_pagecache(inode
, offset
);
987 new_size
= i_size_read(inode
) - len
;
988 truncate_pagecache(inode
, new_size
);
990 ret
= truncate_blocks(inode
, new_size
, true);
992 f2fs_i_size_write(inode
, new_size
);
997 static int f2fs_do_zero_range(struct dnode_of_data
*dn
, pgoff_t start
,
1000 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
1001 pgoff_t index
= start
;
1002 unsigned int ofs_in_node
= dn
->ofs_in_node
;
1006 for (; index
< end
; index
++, dn
->ofs_in_node
++) {
1007 if (datablock_addr(dn
->node_page
, dn
->ofs_in_node
) == NULL_ADDR
)
1011 dn
->ofs_in_node
= ofs_in_node
;
1012 ret
= reserve_new_blocks(dn
, count
);
1016 dn
->ofs_in_node
= ofs_in_node
;
1017 for (index
= start
; index
< end
; index
++, dn
->ofs_in_node
++) {
1019 datablock_addr(dn
->node_page
, dn
->ofs_in_node
);
1021 * reserve_new_blocks will not guarantee entire block
1024 if (dn
->data_blkaddr
== NULL_ADDR
) {
1028 if (dn
->data_blkaddr
!= NEW_ADDR
) {
1029 invalidate_blocks(sbi
, dn
->data_blkaddr
);
1030 dn
->data_blkaddr
= NEW_ADDR
;
1031 set_data_blkaddr(dn
);
1035 f2fs_update_extent_cache_range(dn
, start
, 0, index
- start
);
1040 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
1043 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1044 struct address_space
*mapping
= inode
->i_mapping
;
1045 pgoff_t index
, pg_start
, pg_end
;
1046 loff_t new_size
= i_size_read(inode
);
1047 loff_t off_start
, off_end
;
1050 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1054 ret
= f2fs_convert_inline_inode(inode
);
1058 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
1062 truncate_pagecache_range(inode
, offset
, offset
+ len
- 1);
1064 pg_start
= ((unsigned long long) offset
) >> PAGE_SHIFT
;
1065 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_SHIFT
;
1067 off_start
= offset
& (PAGE_SIZE
- 1);
1068 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1070 if (pg_start
== pg_end
) {
1071 ret
= fill_zero(inode
, pg_start
, off_start
,
1072 off_end
- off_start
);
1076 if (offset
+ len
> new_size
)
1077 new_size
= offset
+ len
;
1078 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1081 ret
= fill_zero(inode
, pg_start
++, off_start
,
1082 PAGE_SIZE
- off_start
);
1086 new_size
= max_t(loff_t
, new_size
,
1087 (loff_t
)pg_start
<< PAGE_SHIFT
);
1090 for (index
= pg_start
; index
< pg_end
;) {
1091 struct dnode_of_data dn
;
1092 unsigned int end_offset
;
1097 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1098 ret
= get_dnode_of_data(&dn
, index
, ALLOC_NODE
);
1100 f2fs_unlock_op(sbi
);
1104 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, inode
);
1105 end
= min(pg_end
, end_offset
- dn
.ofs_in_node
+ index
);
1107 ret
= f2fs_do_zero_range(&dn
, index
, end
);
1108 f2fs_put_dnode(&dn
);
1109 f2fs_unlock_op(sbi
);
1114 new_size
= max_t(loff_t
, new_size
,
1115 (loff_t
)index
<< PAGE_SHIFT
);
1119 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1123 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1128 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && i_size_read(inode
) < new_size
)
1129 f2fs_i_size_write(inode
, new_size
);
1134 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1136 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1137 pgoff_t pg_start
, pg_end
, delta
, nrpages
, idx
;
1141 new_size
= i_size_read(inode
) + len
;
1142 if (new_size
> inode
->i_sb
->s_maxbytes
)
1145 if (offset
>= i_size_read(inode
))
1148 /* insert range should be aligned to block size of f2fs. */
1149 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1152 ret
= f2fs_convert_inline_inode(inode
);
1156 f2fs_balance_fs(sbi
, true);
1158 ret
= truncate_blocks(inode
, i_size_read(inode
), true);
1162 /* write out all dirty pages from offset */
1163 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1167 truncate_pagecache(inode
, offset
);
1169 pg_start
= offset
>> PAGE_SHIFT
;
1170 pg_end
= (offset
+ len
) >> PAGE_SHIFT
;
1171 delta
= pg_end
- pg_start
;
1172 nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1174 for (idx
= nrpages
- 1; idx
>= pg_start
&& idx
!= -1; idx
--) {
1176 ret
= __exchange_data_block(inode
, idx
, idx
+ delta
, false);
1177 f2fs_unlock_op(sbi
);
1182 /* write out all moved pages, if possible */
1183 filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1184 truncate_pagecache(inode
, offset
);
1187 f2fs_i_size_write(inode
, new_size
);
1191 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1192 loff_t len
, int mode
)
1194 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1195 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
};
1197 loff_t new_size
= i_size_read(inode
);
1201 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1205 ret
= f2fs_convert_inline_inode(inode
);
1209 f2fs_balance_fs(sbi
, true);
1211 pg_end
= ((unsigned long long)offset
+ len
) >> PAGE_SHIFT
;
1212 off_end
= (offset
+ len
) & (PAGE_SIZE
- 1);
1214 map
.m_lblk
= ((unsigned long long)offset
) >> PAGE_SHIFT
;
1215 map
.m_len
= pg_end
- map
.m_lblk
;
1219 ret
= f2fs_map_blocks(inode
, &map
, 1, F2FS_GET_BLOCK_PRE_AIO
);
1226 last_off
= map
.m_lblk
+ map
.m_len
- 1;
1228 /* update new size to the failed position */
1229 new_size
= (last_off
== pg_end
) ? offset
+ len
:
1230 (loff_t
)(last_off
+ 1) << PAGE_SHIFT
;
1232 new_size
= ((loff_t
)pg_end
<< PAGE_SHIFT
) + off_end
;
1235 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && i_size_read(inode
) < new_size
)
1236 f2fs_i_size_write(inode
, new_size
);
1241 static long f2fs_fallocate(struct file
*file
, int mode
,
1242 loff_t offset
, loff_t len
)
1244 struct inode
*inode
= file_inode(file
);
1247 /* f2fs only support ->fallocate for regular file */
1248 if (!S_ISREG(inode
->i_mode
))
1251 if (f2fs_encrypted_inode(inode
) &&
1252 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1255 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1256 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1257 FALLOC_FL_INSERT_RANGE
))
1262 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1263 if (offset
>= inode
->i_size
)
1266 ret
= punch_hole(inode
, offset
, len
);
1267 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1268 ret
= f2fs_collapse_range(inode
, offset
, len
);
1269 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1270 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1271 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1272 ret
= f2fs_insert_range(inode
, offset
, len
);
1274 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1278 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1279 mark_inode_dirty_sync(inode
);
1280 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1284 inode_unlock(inode
);
1286 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1290 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1293 * f2fs_relase_file is called at every close calls. So we should
1294 * not drop any inmemory pages by close called by other process.
1296 if (!(filp
->f_mode
& FMODE_WRITE
) ||
1297 atomic_read(&inode
->i_writecount
) != 1)
1300 /* some remained atomic pages should discarded */
1301 if (f2fs_is_atomic_file(inode
))
1302 drop_inmem_pages(inode
);
1303 if (f2fs_is_volatile_file(inode
)) {
1304 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1305 set_inode_flag(inode
, FI_DROP_CACHE
);
1306 filemap_fdatawrite(inode
->i_mapping
);
1307 clear_inode_flag(inode
, FI_DROP_CACHE
);
1312 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
1313 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
1315 static inline __u32
f2fs_mask_flags(umode_t mode
, __u32 flags
)
1319 else if (S_ISREG(mode
))
1320 return flags
& F2FS_REG_FLMASK
;
1322 return flags
& F2FS_OTHER_FLMASK
;
1325 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1327 struct inode
*inode
= file_inode(filp
);
1328 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1329 unsigned int flags
= fi
->i_flags
& FS_FL_USER_VISIBLE
;
1330 return put_user(flags
, (int __user
*)arg
);
1333 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1335 struct inode
*inode
= file_inode(filp
);
1336 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1337 unsigned int flags
= fi
->i_flags
& FS_FL_USER_VISIBLE
;
1338 unsigned int oldflags
;
1341 if (!inode_owner_or_capable(inode
))
1344 if (get_user(flags
, (int __user
*)arg
))
1347 ret
= mnt_want_write_file(filp
);
1351 flags
= f2fs_mask_flags(inode
->i_mode
, flags
);
1355 oldflags
= fi
->i_flags
;
1357 if ((flags
^ oldflags
) & (FS_APPEND_FL
| FS_IMMUTABLE_FL
)) {
1358 if (!capable(CAP_LINUX_IMMUTABLE
)) {
1359 inode_unlock(inode
);
1365 flags
= flags
& FS_FL_USER_MODIFIABLE
;
1366 flags
|= oldflags
& ~FS_FL_USER_MODIFIABLE
;
1367 fi
->i_flags
= flags
;
1368 inode_unlock(inode
);
1370 inode
->i_ctime
= CURRENT_TIME
;
1371 f2fs_set_inode_flags(inode
);
1373 mnt_drop_write_file(filp
);
1377 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1379 struct inode
*inode
= file_inode(filp
);
1381 return put_user(inode
->i_generation
, (int __user
*)arg
);
1384 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
1386 struct inode
*inode
= file_inode(filp
);
1389 if (!inode_owner_or_capable(inode
))
1392 ret
= mnt_want_write_file(filp
);
1398 if (f2fs_is_atomic_file(inode
))
1401 ret
= f2fs_convert_inline_inode(inode
);
1405 set_inode_flag(inode
, FI_ATOMIC_FILE
);
1406 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1408 if (!get_dirty_pages(inode
))
1411 f2fs_msg(F2FS_I_SB(inode
)->sb
, KERN_WARNING
,
1412 "Unexpected flush for atomic writes: ino=%lu, npages=%lld",
1413 inode
->i_ino
, get_dirty_pages(inode
));
1414 ret
= filemap_write_and_wait_range(inode
->i_mapping
, 0, LLONG_MAX
);
1416 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1418 inode_unlock(inode
);
1419 mnt_drop_write_file(filp
);
1423 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
1425 struct inode
*inode
= file_inode(filp
);
1428 if (!inode_owner_or_capable(inode
))
1431 ret
= mnt_want_write_file(filp
);
1437 if (f2fs_is_volatile_file(inode
))
1440 if (f2fs_is_atomic_file(inode
)) {
1441 clear_inode_flag(inode
, FI_ATOMIC_FILE
);
1442 ret
= commit_inmem_pages(inode
);
1444 set_inode_flag(inode
, FI_ATOMIC_FILE
);
1449 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1451 inode_unlock(inode
);
1452 mnt_drop_write_file(filp
);
1456 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
1458 struct inode
*inode
= file_inode(filp
);
1461 if (!inode_owner_or_capable(inode
))
1464 ret
= mnt_want_write_file(filp
);
1470 if (f2fs_is_volatile_file(inode
))
1473 ret
= f2fs_convert_inline_inode(inode
);
1477 set_inode_flag(inode
, FI_VOLATILE_FILE
);
1478 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1480 inode_unlock(inode
);
1481 mnt_drop_write_file(filp
);
1485 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
1487 struct inode
*inode
= file_inode(filp
);
1490 if (!inode_owner_or_capable(inode
))
1493 ret
= mnt_want_write_file(filp
);
1499 if (!f2fs_is_volatile_file(inode
))
1502 if (!f2fs_is_first_block_written(inode
)) {
1503 ret
= truncate_partial_data_page(inode
, 0, true);
1507 ret
= punch_hole(inode
, 0, F2FS_BLKSIZE
);
1509 inode_unlock(inode
);
1510 mnt_drop_write_file(filp
);
1514 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
1516 struct inode
*inode
= file_inode(filp
);
1519 if (!inode_owner_or_capable(inode
))
1522 ret
= mnt_want_write_file(filp
);
1528 if (f2fs_is_atomic_file(inode
))
1529 drop_inmem_pages(inode
);
1530 if (f2fs_is_volatile_file(inode
)) {
1531 clear_inode_flag(inode
, FI_VOLATILE_FILE
);
1532 ret
= f2fs_do_sync_file(filp
, 0, LLONG_MAX
, 0, true);
1535 inode_unlock(inode
);
1537 mnt_drop_write_file(filp
);
1538 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1542 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
1544 struct inode
*inode
= file_inode(filp
);
1545 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1546 struct super_block
*sb
= sbi
->sb
;
1550 if (!capable(CAP_SYS_ADMIN
))
1553 if (get_user(in
, (__u32 __user
*)arg
))
1556 ret
= mnt_want_write_file(filp
);
1561 case F2FS_GOING_DOWN_FULLSYNC
:
1562 sb
= freeze_bdev(sb
->s_bdev
);
1563 if (sb
&& !IS_ERR(sb
)) {
1564 f2fs_stop_checkpoint(sbi
, false);
1565 thaw_bdev(sb
->s_bdev
, sb
);
1568 case F2FS_GOING_DOWN_METASYNC
:
1569 /* do checkpoint only */
1570 f2fs_sync_fs(sb
, 1);
1571 f2fs_stop_checkpoint(sbi
, false);
1573 case F2FS_GOING_DOWN_NOSYNC
:
1574 f2fs_stop_checkpoint(sbi
, false);
1576 case F2FS_GOING_DOWN_METAFLUSH
:
1577 sync_meta_pages(sbi
, META
, LONG_MAX
);
1578 f2fs_stop_checkpoint(sbi
, false);
1584 f2fs_update_time(sbi
, REQ_TIME
);
1586 mnt_drop_write_file(filp
);
1590 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
1592 struct inode
*inode
= file_inode(filp
);
1593 struct super_block
*sb
= inode
->i_sb
;
1594 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1595 struct fstrim_range range
;
1598 if (!capable(CAP_SYS_ADMIN
))
1601 if (!blk_queue_discard(q
))
1604 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
1608 ret
= mnt_want_write_file(filp
);
1612 range
.minlen
= max((unsigned int)range
.minlen
,
1613 q
->limits
.discard_granularity
);
1614 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
1615 mnt_drop_write_file(filp
);
1619 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
1622 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1626 static bool uuid_is_nonzero(__u8 u
[16])
1630 for (i
= 0; i
< 16; i
++)
1636 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
1638 struct fscrypt_policy policy
;
1639 struct inode
*inode
= file_inode(filp
);
1642 if (copy_from_user(&policy
, (struct fscrypt_policy __user
*)arg
,
1646 ret
= mnt_want_write_file(filp
);
1650 f2fs_update_time(F2FS_I_SB(inode
), REQ_TIME
);
1651 ret
= fscrypt_process_policy(inode
, &policy
);
1653 mnt_drop_write_file(filp
);
1657 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
1659 struct fscrypt_policy policy
;
1660 struct inode
*inode
= file_inode(filp
);
1663 err
= fscrypt_get_policy(inode
, &policy
);
1667 if (copy_to_user((struct fscrypt_policy __user
*)arg
, &policy
, sizeof(policy
)))
1672 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
1674 struct inode
*inode
= file_inode(filp
);
1675 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1678 if (!f2fs_sb_has_crypto(inode
->i_sb
))
1681 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
1684 err
= mnt_want_write_file(filp
);
1688 /* update superblock with uuid */
1689 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
1691 err
= f2fs_commit_super(sbi
, false);
1694 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
1695 mnt_drop_write_file(filp
);
1698 mnt_drop_write_file(filp
);
1700 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
1706 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
1708 struct inode
*inode
= file_inode(filp
);
1709 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1713 if (!capable(CAP_SYS_ADMIN
))
1716 if (get_user(sync
, (__u32 __user
*)arg
))
1719 if (f2fs_readonly(sbi
->sb
))
1722 ret
= mnt_want_write_file(filp
);
1727 if (!mutex_trylock(&sbi
->gc_mutex
)) {
1732 mutex_lock(&sbi
->gc_mutex
);
1735 ret
= f2fs_gc(sbi
, sync
);
1737 mnt_drop_write_file(filp
);
1741 static int f2fs_ioc_write_checkpoint(struct file
*filp
, unsigned long arg
)
1743 struct inode
*inode
= file_inode(filp
);
1744 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1747 if (!capable(CAP_SYS_ADMIN
))
1750 if (f2fs_readonly(sbi
->sb
))
1753 ret
= mnt_want_write_file(filp
);
1757 ret
= f2fs_sync_fs(sbi
->sb
, 1);
1759 mnt_drop_write_file(filp
);
1763 static int f2fs_defragment_range(struct f2fs_sb_info
*sbi
,
1765 struct f2fs_defragment
*range
)
1767 struct inode
*inode
= file_inode(filp
);
1768 struct f2fs_map_blocks map
= { .m_next_pgofs
= NULL
};
1769 struct extent_info ei
;
1770 pgoff_t pg_start
, pg_end
;
1771 unsigned int blk_per_seg
= sbi
->blocks_per_seg
;
1772 unsigned int total
= 0, sec_num
;
1773 unsigned int pages_per_sec
= sbi
->segs_per_sec
* blk_per_seg
;
1774 block_t blk_end
= 0;
1775 bool fragmented
= false;
1778 /* if in-place-update policy is enabled, don't waste time here */
1779 if (need_inplace_update(inode
))
1782 pg_start
= range
->start
>> PAGE_SHIFT
;
1783 pg_end
= (range
->start
+ range
->len
) >> PAGE_SHIFT
;
1785 f2fs_balance_fs(sbi
, true);
1789 /* writeback all dirty pages in the range */
1790 err
= filemap_write_and_wait_range(inode
->i_mapping
, range
->start
,
1791 range
->start
+ range
->len
- 1);
1796 * lookup mapping info in extent cache, skip defragmenting if physical
1797 * block addresses are continuous.
1799 if (f2fs_lookup_extent_cache(inode
, pg_start
, &ei
)) {
1800 if (ei
.fofs
+ ei
.len
>= pg_end
)
1804 map
.m_lblk
= pg_start
;
1807 * lookup mapping info in dnode page cache, skip defragmenting if all
1808 * physical block addresses are continuous even if there are hole(s)
1809 * in logical blocks.
1811 while (map
.m_lblk
< pg_end
) {
1812 map
.m_len
= pg_end
- map
.m_lblk
;
1813 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_READ
);
1817 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
1822 if (blk_end
&& blk_end
!= map
.m_pblk
) {
1826 blk_end
= map
.m_pblk
+ map
.m_len
;
1828 map
.m_lblk
+= map
.m_len
;
1834 map
.m_lblk
= pg_start
;
1835 map
.m_len
= pg_end
- pg_start
;
1837 sec_num
= (map
.m_len
+ pages_per_sec
- 1) / pages_per_sec
;
1840 * make sure there are enough free section for LFS allocation, this can
1841 * avoid defragment running in SSR mode when free section are allocated
1844 if (has_not_enough_free_secs(sbi
, sec_num
)) {
1849 while (map
.m_lblk
< pg_end
) {
1854 map
.m_len
= pg_end
- map
.m_lblk
;
1855 err
= f2fs_map_blocks(inode
, &map
, 0, F2FS_GET_BLOCK_READ
);
1859 if (!(map
.m_flags
& F2FS_MAP_FLAGS
)) {
1864 set_inode_flag(inode
, FI_DO_DEFRAG
);
1867 while (idx
< map
.m_lblk
+ map
.m_len
&& cnt
< blk_per_seg
) {
1870 page
= get_lock_data_page(inode
, idx
, true);
1872 err
= PTR_ERR(page
);
1876 set_page_dirty(page
);
1877 f2fs_put_page(page
, 1);
1886 if (idx
< pg_end
&& cnt
< blk_per_seg
)
1889 clear_inode_flag(inode
, FI_DO_DEFRAG
);
1891 err
= filemap_fdatawrite(inode
->i_mapping
);
1896 clear_inode_flag(inode
, FI_DO_DEFRAG
);
1898 inode_unlock(inode
);
1900 range
->len
= (u64
)total
<< PAGE_SHIFT
;
1904 static int f2fs_ioc_defragment(struct file
*filp
, unsigned long arg
)
1906 struct inode
*inode
= file_inode(filp
);
1907 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1908 struct f2fs_defragment range
;
1911 if (!capable(CAP_SYS_ADMIN
))
1914 if (!S_ISREG(inode
->i_mode
))
1917 err
= mnt_want_write_file(filp
);
1921 if (f2fs_readonly(sbi
->sb
)) {
1926 if (copy_from_user(&range
, (struct f2fs_defragment __user
*)arg
,
1932 /* verify alignment of offset & size */
1933 if (range
.start
& (F2FS_BLKSIZE
- 1) ||
1934 range
.len
& (F2FS_BLKSIZE
- 1)) {
1939 err
= f2fs_defragment_range(sbi
, filp
, &range
);
1940 f2fs_update_time(sbi
, REQ_TIME
);
1944 if (copy_to_user((struct f2fs_defragment __user
*)arg
, &range
,
1948 mnt_drop_write_file(filp
);
1952 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1955 case F2FS_IOC_GETFLAGS
:
1956 return f2fs_ioc_getflags(filp
, arg
);
1957 case F2FS_IOC_SETFLAGS
:
1958 return f2fs_ioc_setflags(filp
, arg
);
1959 case F2FS_IOC_GETVERSION
:
1960 return f2fs_ioc_getversion(filp
, arg
);
1961 case F2FS_IOC_START_ATOMIC_WRITE
:
1962 return f2fs_ioc_start_atomic_write(filp
);
1963 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
1964 return f2fs_ioc_commit_atomic_write(filp
);
1965 case F2FS_IOC_START_VOLATILE_WRITE
:
1966 return f2fs_ioc_start_volatile_write(filp
);
1967 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
1968 return f2fs_ioc_release_volatile_write(filp
);
1969 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
1970 return f2fs_ioc_abort_volatile_write(filp
);
1971 case F2FS_IOC_SHUTDOWN
:
1972 return f2fs_ioc_shutdown(filp
, arg
);
1974 return f2fs_ioc_fitrim(filp
, arg
);
1975 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
1976 return f2fs_ioc_set_encryption_policy(filp
, arg
);
1977 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
1978 return f2fs_ioc_get_encryption_policy(filp
, arg
);
1979 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
1980 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
1981 case F2FS_IOC_GARBAGE_COLLECT
:
1982 return f2fs_ioc_gc(filp
, arg
);
1983 case F2FS_IOC_WRITE_CHECKPOINT
:
1984 return f2fs_ioc_write_checkpoint(filp
, arg
);
1985 case F2FS_IOC_DEFRAGMENT
:
1986 return f2fs_ioc_defragment(filp
, arg
);
1992 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1994 struct file
*file
= iocb
->ki_filp
;
1995 struct inode
*inode
= file_inode(file
);
1998 if (f2fs_encrypted_inode(inode
) &&
1999 !fscrypt_has_encryption_key(inode
) &&
2000 fscrypt_get_encryption_info(inode
))
2004 ret
= generic_write_checks(iocb
, from
);
2006 ret
= f2fs_preallocate_blocks(iocb
, from
);
2008 ret
= __generic_file_write_iter(iocb
, from
);
2010 inode_unlock(inode
);
2013 ret
= generic_write_sync(iocb
, ret
);
2017 #ifdef CONFIG_COMPAT
2018 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
2021 case F2FS_IOC32_GETFLAGS
:
2022 cmd
= F2FS_IOC_GETFLAGS
;
2024 case F2FS_IOC32_SETFLAGS
:
2025 cmd
= F2FS_IOC_SETFLAGS
;
2027 case F2FS_IOC32_GETVERSION
:
2028 cmd
= F2FS_IOC_GETVERSION
;
2030 case F2FS_IOC_START_ATOMIC_WRITE
:
2031 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
2032 case F2FS_IOC_START_VOLATILE_WRITE
:
2033 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
2034 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
2035 case F2FS_IOC_SHUTDOWN
:
2036 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
2037 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
2038 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
2039 case F2FS_IOC_GARBAGE_COLLECT
:
2040 case F2FS_IOC_WRITE_CHECKPOINT
:
2041 case F2FS_IOC_DEFRAGMENT
:
2044 return -ENOIOCTLCMD
;
2046 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
2050 const struct file_operations f2fs_file_operations
= {
2051 .llseek
= f2fs_llseek
,
2052 .read_iter
= generic_file_read_iter
,
2053 .write_iter
= f2fs_file_write_iter
,
2054 .open
= f2fs_file_open
,
2055 .release
= f2fs_release_file
,
2056 .mmap
= f2fs_file_mmap
,
2057 .fsync
= f2fs_sync_file
,
2058 .fallocate
= f2fs_fallocate
,
2059 .unlocked_ioctl
= f2fs_ioctl
,
2060 #ifdef CONFIG_COMPAT
2061 .compat_ioctl
= f2fs_compat_ioctl
,
2063 .splice_read
= generic_file_splice_read
,
2064 .splice_write
= iter_file_splice_write
,