4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com/
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
12 #include <linux/f2fs_fs.h>
13 #include <linux/stat.h>
14 #include <linux/buffer_head.h>
15 #include <linux/writeback.h>
16 #include <linux/blkdev.h>
17 #include <linux/falloc.h>
18 #include <linux/types.h>
19 #include <linux/compat.h>
20 #include <linux/uaccess.h>
21 #include <linux/mount.h>
22 #include <linux/pagevec.h>
23 #include <linux/random.h>
32 #include <trace/events/f2fs.h>
34 static int f2fs_vm_page_mkwrite(struct vm_area_struct
*vma
,
37 struct page
*page
= vmf
->page
;
38 struct inode
*inode
= file_inode(vma
->vm_file
);
39 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
40 struct dnode_of_data dn
;
45 sb_start_pagefault(inode
->i_sb
);
47 f2fs_bug_on(sbi
, f2fs_has_inline_data(inode
));
49 /* block allocation */
51 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
52 err
= f2fs_reserve_block(&dn
, page
->index
);
60 file_update_time(vma
->vm_file
);
62 if (unlikely(page
->mapping
!= inode
->i_mapping
||
63 page_offset(page
) > i_size_read(inode
) ||
64 !PageUptodate(page
))) {
71 * check to see if the page is mapped already (no holes)
73 if (PageMappedToDisk(page
))
76 /* page is wholly or partially inside EOF */
77 if (((loff_t
)(page
->index
+ 1) << PAGE_CACHE_SHIFT
) >
80 offset
= i_size_read(inode
) & ~PAGE_CACHE_MASK
;
81 zero_user_segment(page
, offset
, PAGE_CACHE_SIZE
);
84 SetPageUptodate(page
);
86 trace_f2fs_vm_page_mkwrite(page
, DATA
);
89 f2fs_wait_on_page_writeback(page
, DATA
);
90 /* if gced page is attached, don't write to cold segment */
91 clear_cold_data(page
);
93 sb_end_pagefault(inode
->i_sb
);
94 return block_page_mkwrite_return(err
);
97 static const struct vm_operations_struct f2fs_file_vm_ops
= {
98 .fault
= filemap_fault
,
99 .map_pages
= filemap_map_pages
,
100 .page_mkwrite
= f2fs_vm_page_mkwrite
,
103 static int get_parent_ino(struct inode
*inode
, nid_t
*pino
)
105 struct dentry
*dentry
;
107 inode
= igrab(inode
);
108 dentry
= d_find_any_alias(inode
);
113 if (update_dent_inode(inode
, inode
, &dentry
->d_name
)) {
118 *pino
= parent_ino(dentry
);
123 static inline bool need_do_checkpoint(struct inode
*inode
)
125 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
126 bool need_cp
= false;
128 if (!S_ISREG(inode
->i_mode
) || inode
->i_nlink
!= 1)
130 else if (file_enc_name(inode
) && need_dentry_mark(sbi
, inode
->i_ino
))
132 else if (file_wrong_pino(inode
))
134 else if (!space_for_roll_forward(sbi
))
136 else if (!is_checkpointed_node(sbi
, F2FS_I(inode
)->i_pino
))
138 else if (F2FS_I(inode
)->xattr_ver
== cur_cp_version(F2FS_CKPT(sbi
)))
140 else if (test_opt(sbi
, FASTBOOT
))
142 else if (sbi
->active_logs
== 2)
148 static bool need_inode_page_update(struct f2fs_sb_info
*sbi
, nid_t ino
)
150 struct page
*i
= find_get_page(NODE_MAPPING(sbi
), ino
);
152 /* But we need to avoid that there are some inode updates */
153 if ((i
&& PageDirty(i
)) || need_inode_block_update(sbi
, ino
))
159 static void try_to_fix_pino(struct inode
*inode
)
161 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
164 down_write(&fi
->i_sem
);
166 if (file_wrong_pino(inode
) && inode
->i_nlink
== 1 &&
167 get_parent_ino(inode
, &pino
)) {
169 file_got_pino(inode
);
170 up_write(&fi
->i_sem
);
172 mark_inode_dirty_sync(inode
);
173 f2fs_write_inode(inode
, NULL
);
175 up_write(&fi
->i_sem
);
179 int f2fs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
181 struct inode
*inode
= file
->f_mapping
->host
;
182 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
183 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
184 nid_t ino
= inode
->i_ino
;
186 bool need_cp
= false;
187 struct writeback_control wbc
= {
188 .sync_mode
= WB_SYNC_ALL
,
189 .nr_to_write
= LONG_MAX
,
193 if (unlikely(f2fs_readonly(inode
->i_sb
)))
196 trace_f2fs_sync_file_enter(inode
);
198 /* if fdatasync is triggered, let's do in-place-update */
199 if (get_dirty_pages(inode
) <= SM_I(sbi
)->min_fsync_blocks
)
200 set_inode_flag(fi
, FI_NEED_IPU
);
201 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
202 clear_inode_flag(fi
, FI_NEED_IPU
);
205 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
209 /* if the inode is dirty, let's recover all the time */
211 f2fs_write_inode(inode
, NULL
);
216 * if there is no written data, don't waste time to write recovery info.
218 if (!is_inode_flag_set(fi
, FI_APPEND_WRITE
) &&
219 !exist_written_data(sbi
, ino
, APPEND_INO
)) {
221 /* it may call write_inode just prior to fsync */
222 if (need_inode_page_update(sbi
, ino
))
225 if (is_inode_flag_set(fi
, FI_UPDATE_WRITE
) ||
226 exist_written_data(sbi
, ino
, UPDATE_INO
))
231 /* guarantee free sections for fsync */
232 f2fs_balance_fs(sbi
);
235 * Both of fdatasync() and fsync() are able to be recovered from
238 down_read(&fi
->i_sem
);
239 need_cp
= need_do_checkpoint(inode
);
243 /* all the dirty node pages should be flushed for POR */
244 ret
= f2fs_sync_fs(inode
->i_sb
, 1);
247 * We've secured consistency through sync_fs. Following pino
248 * will be used only for fsynced inodes after checkpoint.
250 try_to_fix_pino(inode
);
251 clear_inode_flag(fi
, FI_APPEND_WRITE
);
252 clear_inode_flag(fi
, FI_UPDATE_WRITE
);
256 sync_node_pages(sbi
, ino
, &wbc
);
258 /* if cp_error was enabled, we should avoid infinite loop */
259 if (unlikely(f2fs_cp_error(sbi
)))
262 if (need_inode_block_update(sbi
, ino
)) {
263 mark_inode_dirty_sync(inode
);
264 f2fs_write_inode(inode
, NULL
);
268 ret
= wait_on_node_pages_writeback(sbi
, ino
);
272 /* once recovery info is written, don't need to tack this */
273 remove_dirty_inode(sbi
, ino
, APPEND_INO
);
274 clear_inode_flag(fi
, FI_APPEND_WRITE
);
276 remove_dirty_inode(sbi
, ino
, UPDATE_INO
);
277 clear_inode_flag(fi
, FI_UPDATE_WRITE
);
278 ret
= f2fs_issue_flush(sbi
);
280 trace_f2fs_sync_file_exit(inode
, need_cp
, datasync
, ret
);
281 f2fs_trace_ios(NULL
, 1);
285 static pgoff_t
__get_first_dirty_index(struct address_space
*mapping
,
286 pgoff_t pgofs
, int whence
)
291 if (whence
!= SEEK_DATA
)
294 /* find first dirty page index */
295 pagevec_init(&pvec
, 0);
296 nr_pages
= pagevec_lookup_tag(&pvec
, mapping
, &pgofs
,
297 PAGECACHE_TAG_DIRTY
, 1);
298 pgofs
= nr_pages
? pvec
.pages
[0]->index
: LONG_MAX
;
299 pagevec_release(&pvec
);
303 static bool __found_offset(block_t blkaddr
, pgoff_t dirty
, pgoff_t pgofs
,
308 if ((blkaddr
== NEW_ADDR
&& dirty
== pgofs
) ||
309 (blkaddr
!= NEW_ADDR
&& blkaddr
!= NULL_ADDR
))
313 if (blkaddr
== NULL_ADDR
)
320 static loff_t
f2fs_seek_block(struct file
*file
, loff_t offset
, int whence
)
322 struct inode
*inode
= file
->f_mapping
->host
;
323 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
324 struct dnode_of_data dn
;
325 pgoff_t pgofs
, end_offset
, dirty
;
326 loff_t data_ofs
= offset
;
330 mutex_lock(&inode
->i_mutex
);
332 isize
= i_size_read(inode
);
336 /* handle inline data case */
337 if (f2fs_has_inline_data(inode
) || f2fs_has_inline_dentry(inode
)) {
338 if (whence
== SEEK_HOLE
)
343 pgofs
= (pgoff_t
)(offset
>> PAGE_CACHE_SHIFT
);
345 dirty
= __get_first_dirty_index(inode
->i_mapping
, pgofs
, whence
);
347 for (; data_ofs
< isize
; data_ofs
= (loff_t
)pgofs
<< PAGE_CACHE_SHIFT
) {
348 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
349 err
= get_dnode_of_data(&dn
, pgofs
, LOOKUP_NODE_RA
);
350 if (err
&& err
!= -ENOENT
) {
352 } else if (err
== -ENOENT
) {
353 /* direct node does not exists */
354 if (whence
== SEEK_DATA
) {
355 pgofs
= PGOFS_OF_NEXT_DNODE(pgofs
,
363 end_offset
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
365 /* find data/hole in dnode block */
366 for (; dn
.ofs_in_node
< end_offset
;
367 dn
.ofs_in_node
++, pgofs
++,
368 data_ofs
= (loff_t
)pgofs
<< PAGE_CACHE_SHIFT
) {
370 blkaddr
= datablock_addr(dn
.node_page
, dn
.ofs_in_node
);
372 if (__found_offset(blkaddr
, dirty
, pgofs
, whence
)) {
380 if (whence
== SEEK_DATA
)
383 if (whence
== SEEK_HOLE
&& data_ofs
> isize
)
385 mutex_unlock(&inode
->i_mutex
);
386 return vfs_setpos(file
, data_ofs
, maxbytes
);
388 mutex_unlock(&inode
->i_mutex
);
392 static loff_t
f2fs_llseek(struct file
*file
, loff_t offset
, int whence
)
394 struct inode
*inode
= file
->f_mapping
->host
;
395 loff_t maxbytes
= inode
->i_sb
->s_maxbytes
;
401 return generic_file_llseek_size(file
, offset
, whence
,
402 maxbytes
, i_size_read(inode
));
407 return f2fs_seek_block(file
, offset
, whence
);
413 static int f2fs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
415 struct inode
*inode
= file_inode(file
);
417 if (f2fs_encrypted_inode(inode
)) {
418 int err
= f2fs_get_encryption_info(inode
);
423 /* we don't need to use inline_data strictly */
424 if (f2fs_has_inline_data(inode
)) {
425 int err
= f2fs_convert_inline_inode(inode
);
431 vma
->vm_ops
= &f2fs_file_vm_ops
;
435 static int f2fs_file_open(struct inode
*inode
, struct file
*filp
)
437 int ret
= generic_file_open(inode
, filp
);
439 if (!ret
&& f2fs_encrypted_inode(inode
)) {
440 ret
= f2fs_get_encryption_info(inode
);
447 int truncate_data_blocks_range(struct dnode_of_data
*dn
, int count
)
449 struct f2fs_sb_info
*sbi
= F2FS_I_SB(dn
->inode
);
450 struct f2fs_node
*raw_node
;
451 int nr_free
= 0, ofs
= dn
->ofs_in_node
, len
= count
;
454 raw_node
= F2FS_NODE(dn
->node_page
);
455 addr
= blkaddr_in_node(raw_node
) + ofs
;
457 for (; count
> 0; count
--, addr
++, dn
->ofs_in_node
++) {
458 block_t blkaddr
= le32_to_cpu(*addr
);
459 if (blkaddr
== NULL_ADDR
)
462 dn
->data_blkaddr
= NULL_ADDR
;
463 set_data_blkaddr(dn
);
464 invalidate_blocks(sbi
, blkaddr
);
465 if (dn
->ofs_in_node
== 0 && IS_INODE(dn
->node_page
))
466 clear_inode_flag(F2FS_I(dn
->inode
),
467 FI_FIRST_BLOCK_WRITTEN
);
474 * once we invalidate valid blkaddr in range [ofs, ofs + count],
475 * we will invalidate all blkaddr in the whole range.
477 fofs
= start_bidx_of_node(ofs_of_node(dn
->node_page
),
478 F2FS_I(dn
->inode
)) + ofs
;
479 f2fs_update_extent_cache_range(dn
, fofs
, 0, len
);
480 dec_valid_block_count(sbi
, dn
->inode
, nr_free
);
481 set_page_dirty(dn
->node_page
);
484 dn
->ofs_in_node
= ofs
;
486 trace_f2fs_truncate_data_blocks_range(dn
->inode
, dn
->nid
,
487 dn
->ofs_in_node
, nr_free
);
491 void truncate_data_blocks(struct dnode_of_data
*dn
)
493 truncate_data_blocks_range(dn
, ADDRS_PER_BLOCK
);
496 static int truncate_partial_data_page(struct inode
*inode
, u64 from
,
499 unsigned offset
= from
& (PAGE_CACHE_SIZE
- 1);
500 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
501 struct address_space
*mapping
= inode
->i_mapping
;
504 if (!offset
&& !cache_only
)
508 page
= grab_cache_page(mapping
, index
);
509 if (page
&& PageUptodate(page
))
511 f2fs_put_page(page
, 1);
515 page
= get_lock_data_page(inode
, index
);
519 f2fs_wait_on_page_writeback(page
, DATA
);
520 zero_user(page
, offset
, PAGE_CACHE_SIZE
- offset
);
521 if (!cache_only
|| !f2fs_encrypted_inode(inode
) || !S_ISREG(inode
->i_mode
))
522 set_page_dirty(page
);
523 f2fs_put_page(page
, 1);
527 int truncate_blocks(struct inode
*inode
, u64 from
, bool lock
)
529 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
530 unsigned int blocksize
= inode
->i_sb
->s_blocksize
;
531 struct dnode_of_data dn
;
533 int count
= 0, err
= 0;
535 bool truncate_page
= false;
537 trace_f2fs_truncate_blocks_enter(inode
, from
);
539 free_from
= (pgoff_t
)F2FS_BYTES_TO_BLK(from
+ blocksize
- 1);
544 ipage
= get_node_page(sbi
, inode
->i_ino
);
546 err
= PTR_ERR(ipage
);
550 if (f2fs_has_inline_data(inode
)) {
551 if (truncate_inline_inode(ipage
, from
))
552 set_page_dirty(ipage
);
553 f2fs_put_page(ipage
, 1);
554 truncate_page
= true;
558 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
559 err
= get_dnode_of_data(&dn
, free_from
, LOOKUP_NODE
);
566 count
= ADDRS_PER_PAGE(dn
.node_page
, F2FS_I(inode
));
568 count
-= dn
.ofs_in_node
;
569 f2fs_bug_on(sbi
, count
< 0);
571 if (dn
.ofs_in_node
|| IS_INODE(dn
.node_page
)) {
572 truncate_data_blocks_range(&dn
, count
);
578 err
= truncate_inode_blocks(inode
, free_from
);
583 /* lastly zero out the first data page */
585 err
= truncate_partial_data_page(inode
, from
, truncate_page
);
587 trace_f2fs_truncate_blocks_exit(inode
, err
);
591 int f2fs_truncate(struct inode
*inode
, bool lock
)
595 if (!(S_ISREG(inode
->i_mode
) || S_ISDIR(inode
->i_mode
) ||
596 S_ISLNK(inode
->i_mode
)))
599 trace_f2fs_truncate(inode
);
601 /* we should check inline_data size */
602 if (f2fs_has_inline_data(inode
) && !f2fs_may_inline_data(inode
)) {
603 err
= f2fs_convert_inline_inode(inode
);
608 err
= truncate_blocks(inode
, i_size_read(inode
), lock
);
612 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
613 mark_inode_dirty(inode
);
617 int f2fs_getattr(struct vfsmount
*mnt
,
618 struct dentry
*dentry
, struct kstat
*stat
)
620 struct inode
*inode
= d_inode(dentry
);
621 generic_fillattr(inode
, stat
);
626 #ifdef CONFIG_F2FS_FS_POSIX_ACL
627 static void __setattr_copy(struct inode
*inode
, const struct iattr
*attr
)
629 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
630 unsigned int ia_valid
= attr
->ia_valid
;
632 if (ia_valid
& ATTR_UID
)
633 inode
->i_uid
= attr
->ia_uid
;
634 if (ia_valid
& ATTR_GID
)
635 inode
->i_gid
= attr
->ia_gid
;
636 if (ia_valid
& ATTR_ATIME
)
637 inode
->i_atime
= timespec_trunc(attr
->ia_atime
,
638 inode
->i_sb
->s_time_gran
);
639 if (ia_valid
& ATTR_MTIME
)
640 inode
->i_mtime
= timespec_trunc(attr
->ia_mtime
,
641 inode
->i_sb
->s_time_gran
);
642 if (ia_valid
& ATTR_CTIME
)
643 inode
->i_ctime
= timespec_trunc(attr
->ia_ctime
,
644 inode
->i_sb
->s_time_gran
);
645 if (ia_valid
& ATTR_MODE
) {
646 umode_t mode
= attr
->ia_mode
;
648 if (!in_group_p(inode
->i_gid
) && !capable(CAP_FSETID
))
650 set_acl_inode(fi
, mode
);
654 #define __setattr_copy setattr_copy
657 int f2fs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
659 struct inode
*inode
= d_inode(dentry
);
660 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
663 err
= inode_change_ok(inode
, attr
);
667 if (attr
->ia_valid
& ATTR_SIZE
) {
668 if (f2fs_encrypted_inode(inode
) &&
669 f2fs_get_encryption_info(inode
))
672 if (attr
->ia_size
<= i_size_read(inode
)) {
673 truncate_setsize(inode
, attr
->ia_size
);
674 err
= f2fs_truncate(inode
, true);
677 f2fs_balance_fs(F2FS_I_SB(inode
));
680 * do not trim all blocks after i_size if target size is
681 * larger than i_size.
683 truncate_setsize(inode
, attr
->ia_size
);
687 __setattr_copy(inode
, attr
);
689 if (attr
->ia_valid
& ATTR_MODE
) {
690 err
= posix_acl_chmod(inode
, get_inode_mode(inode
));
691 if (err
|| is_inode_flag_set(fi
, FI_ACL_MODE
)) {
692 inode
->i_mode
= fi
->i_acl_mode
;
693 clear_inode_flag(fi
, FI_ACL_MODE
);
697 mark_inode_dirty(inode
);
701 const struct inode_operations f2fs_file_inode_operations
= {
702 .getattr
= f2fs_getattr
,
703 .setattr
= f2fs_setattr
,
704 .get_acl
= f2fs_get_acl
,
705 .set_acl
= f2fs_set_acl
,
706 #ifdef CONFIG_F2FS_FS_XATTR
707 .setxattr
= generic_setxattr
,
708 .getxattr
= generic_getxattr
,
709 .listxattr
= f2fs_listxattr
,
710 .removexattr
= generic_removexattr
,
712 .fiemap
= f2fs_fiemap
,
715 static int fill_zero(struct inode
*inode
, pgoff_t index
,
716 loff_t start
, loff_t len
)
718 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
724 f2fs_balance_fs(sbi
);
727 page
= get_new_data_page(inode
, NULL
, index
, false);
731 return PTR_ERR(page
);
733 f2fs_wait_on_page_writeback(page
, DATA
);
734 zero_user(page
, start
, len
);
735 set_page_dirty(page
);
736 f2fs_put_page(page
, 1);
740 int truncate_hole(struct inode
*inode
, pgoff_t pg_start
, pgoff_t pg_end
)
745 for (index
= pg_start
; index
< pg_end
; index
++) {
746 struct dnode_of_data dn
;
748 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
749 err
= get_dnode_of_data(&dn
, index
, LOOKUP_NODE
);
756 if (dn
.data_blkaddr
!= NULL_ADDR
)
757 truncate_data_blocks_range(&dn
, 1);
763 static int punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
765 pgoff_t pg_start
, pg_end
;
766 loff_t off_start
, off_end
;
769 if (f2fs_has_inline_data(inode
)) {
770 ret
= f2fs_convert_inline_inode(inode
);
775 pg_start
= ((unsigned long long) offset
) >> PAGE_CACHE_SHIFT
;
776 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_CACHE_SHIFT
;
778 off_start
= offset
& (PAGE_CACHE_SIZE
- 1);
779 off_end
= (offset
+ len
) & (PAGE_CACHE_SIZE
- 1);
781 if (pg_start
== pg_end
) {
782 ret
= fill_zero(inode
, pg_start
, off_start
,
783 off_end
- off_start
);
788 ret
= fill_zero(inode
, pg_start
++, off_start
,
789 PAGE_CACHE_SIZE
- off_start
);
794 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
799 if (pg_start
< pg_end
) {
800 struct address_space
*mapping
= inode
->i_mapping
;
801 loff_t blk_start
, blk_end
;
802 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
804 f2fs_balance_fs(sbi
);
806 blk_start
= (loff_t
)pg_start
<< PAGE_CACHE_SHIFT
;
807 blk_end
= (loff_t
)pg_end
<< PAGE_CACHE_SHIFT
;
808 truncate_inode_pages_range(mapping
, blk_start
,
812 ret
= truncate_hole(inode
, pg_start
, pg_end
);
820 static int f2fs_do_collapse(struct inode
*inode
, pgoff_t start
, pgoff_t end
)
822 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
823 struct dnode_of_data dn
;
824 pgoff_t nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
827 for (; end
< nrpages
; start
++, end
++) {
828 block_t new_addr
, old_addr
;
832 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
833 ret
= get_dnode_of_data(&dn
, end
, LOOKUP_NODE_RA
);
834 if (ret
&& ret
!= -ENOENT
) {
836 } else if (ret
== -ENOENT
) {
837 new_addr
= NULL_ADDR
;
839 new_addr
= dn
.data_blkaddr
;
840 truncate_data_blocks_range(&dn
, 1);
844 if (new_addr
== NULL_ADDR
) {
845 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
846 ret
= get_dnode_of_data(&dn
, start
, LOOKUP_NODE_RA
);
847 if (ret
&& ret
!= -ENOENT
) {
849 } else if (ret
== -ENOENT
) {
854 if (dn
.data_blkaddr
== NULL_ADDR
) {
859 truncate_data_blocks_range(&dn
, 1);
866 ipage
= get_node_page(sbi
, inode
->i_ino
);
868 ret
= PTR_ERR(ipage
);
872 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
873 ret
= f2fs_reserve_block(&dn
, start
);
877 old_addr
= dn
.data_blkaddr
;
878 if (old_addr
!= NEW_ADDR
&& new_addr
== NEW_ADDR
) {
879 dn
.data_blkaddr
= NULL_ADDR
;
880 f2fs_update_extent_cache(&dn
);
881 invalidate_blocks(sbi
, old_addr
);
883 dn
.data_blkaddr
= new_addr
;
884 set_data_blkaddr(&dn
);
885 } else if (new_addr
!= NEW_ADDR
) {
888 get_node_info(sbi
, dn
.nid
, &ni
);
889 f2fs_replace_block(sbi
, &dn
, old_addr
, new_addr
,
903 static int f2fs_collapse_range(struct inode
*inode
, loff_t offset
, loff_t len
)
905 pgoff_t pg_start
, pg_end
;
909 if (offset
+ len
>= i_size_read(inode
))
912 /* collapse range should be aligned to block size of f2fs. */
913 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
916 f2fs_balance_fs(F2FS_I_SB(inode
));
918 if (f2fs_has_inline_data(inode
)) {
919 ret
= f2fs_convert_inline_inode(inode
);
924 pg_start
= offset
>> PAGE_CACHE_SHIFT
;
925 pg_end
= (offset
+ len
) >> PAGE_CACHE_SHIFT
;
927 /* write out all dirty pages from offset */
928 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
932 truncate_pagecache(inode
, offset
);
934 ret
= f2fs_do_collapse(inode
, pg_start
, pg_end
);
938 new_size
= i_size_read(inode
) - len
;
940 ret
= truncate_blocks(inode
, new_size
, true);
942 i_size_write(inode
, new_size
);
947 static int f2fs_zero_range(struct inode
*inode
, loff_t offset
, loff_t len
,
950 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
951 struct address_space
*mapping
= inode
->i_mapping
;
952 pgoff_t index
, pg_start
, pg_end
;
953 loff_t new_size
= i_size_read(inode
);
954 loff_t off_start
, off_end
;
957 ret
= inode_newsize_ok(inode
, (len
+ offset
));
961 f2fs_balance_fs(sbi
);
963 if (f2fs_has_inline_data(inode
)) {
964 ret
= f2fs_convert_inline_inode(inode
);
969 ret
= filemap_write_and_wait_range(mapping
, offset
, offset
+ len
- 1);
973 truncate_pagecache_range(inode
, offset
, offset
+ len
- 1);
975 pg_start
= ((unsigned long long) offset
) >> PAGE_CACHE_SHIFT
;
976 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_CACHE_SHIFT
;
978 off_start
= offset
& (PAGE_CACHE_SIZE
- 1);
979 off_end
= (offset
+ len
) & (PAGE_CACHE_SIZE
- 1);
981 if (pg_start
== pg_end
) {
982 ret
= fill_zero(inode
, pg_start
, off_start
,
983 off_end
- off_start
);
987 if (offset
+ len
> new_size
)
988 new_size
= offset
+ len
;
989 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
992 ret
= fill_zero(inode
, pg_start
++, off_start
,
993 PAGE_CACHE_SIZE
- off_start
);
997 new_size
= max_t(loff_t
, new_size
,
998 (loff_t
)pg_start
<< PAGE_CACHE_SHIFT
);
1001 for (index
= pg_start
; index
< pg_end
; index
++) {
1002 struct dnode_of_data dn
;
1007 ipage
= get_node_page(sbi
, inode
->i_ino
);
1008 if (IS_ERR(ipage
)) {
1009 ret
= PTR_ERR(ipage
);
1010 f2fs_unlock_op(sbi
);
1014 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
1015 ret
= f2fs_reserve_block(&dn
, index
);
1017 f2fs_unlock_op(sbi
);
1021 if (dn
.data_blkaddr
!= NEW_ADDR
) {
1022 invalidate_blocks(sbi
, dn
.data_blkaddr
);
1024 dn
.data_blkaddr
= NEW_ADDR
;
1025 set_data_blkaddr(&dn
);
1027 dn
.data_blkaddr
= NULL_ADDR
;
1028 f2fs_update_extent_cache(&dn
);
1030 f2fs_put_dnode(&dn
);
1031 f2fs_unlock_op(sbi
);
1033 new_size
= max_t(loff_t
, new_size
,
1034 (loff_t
)(index
+ 1) << PAGE_CACHE_SHIFT
);
1038 ret
= fill_zero(inode
, pg_end
, 0, off_end
);
1042 new_size
= max_t(loff_t
, new_size
, offset
+ len
);
1047 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && i_size_read(inode
) < new_size
) {
1048 i_size_write(inode
, new_size
);
1049 mark_inode_dirty(inode
);
1050 update_inode_page(inode
);
1056 static int f2fs_insert_range(struct inode
*inode
, loff_t offset
, loff_t len
)
1058 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1059 pgoff_t pg_start
, pg_end
, delta
, nrpages
, idx
;
1063 new_size
= i_size_read(inode
) + len
;
1064 if (new_size
> inode
->i_sb
->s_maxbytes
)
1067 if (offset
>= i_size_read(inode
))
1070 /* insert range should be aligned to block size of f2fs. */
1071 if (offset
& (F2FS_BLKSIZE
- 1) || len
& (F2FS_BLKSIZE
- 1))
1074 f2fs_balance_fs(sbi
);
1076 if (f2fs_has_inline_data(inode
)) {
1077 ret
= f2fs_convert_inline_inode(inode
);
1082 ret
= truncate_blocks(inode
, i_size_read(inode
), true);
1086 /* write out all dirty pages from offset */
1087 ret
= filemap_write_and_wait_range(inode
->i_mapping
, offset
, LLONG_MAX
);
1091 truncate_pagecache(inode
, offset
);
1093 pg_start
= offset
>> PAGE_CACHE_SHIFT
;
1094 pg_end
= (offset
+ len
) >> PAGE_CACHE_SHIFT
;
1095 delta
= pg_end
- pg_start
;
1096 nrpages
= (i_size_read(inode
) + PAGE_SIZE
- 1) / PAGE_SIZE
;
1098 for (idx
= nrpages
- 1; idx
>= pg_start
&& idx
!= -1; idx
--) {
1099 struct dnode_of_data dn
;
1101 block_t new_addr
, old_addr
;
1105 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1106 ret
= get_dnode_of_data(&dn
, idx
, LOOKUP_NODE_RA
);
1107 if (ret
&& ret
!= -ENOENT
) {
1109 } else if (ret
== -ENOENT
) {
1111 } else if (dn
.data_blkaddr
== NULL_ADDR
) {
1112 f2fs_put_dnode(&dn
);
1115 new_addr
= dn
.data_blkaddr
;
1116 truncate_data_blocks_range(&dn
, 1);
1117 f2fs_put_dnode(&dn
);
1120 ipage
= get_node_page(sbi
, inode
->i_ino
);
1121 if (IS_ERR(ipage
)) {
1122 ret
= PTR_ERR(ipage
);
1126 set_new_dnode(&dn
, inode
, ipage
, NULL
, 0);
1127 ret
= f2fs_reserve_block(&dn
, idx
+ delta
);
1131 old_addr
= dn
.data_blkaddr
;
1132 f2fs_bug_on(sbi
, old_addr
!= NEW_ADDR
);
1134 if (new_addr
!= NEW_ADDR
) {
1135 struct node_info ni
;
1137 get_node_info(sbi
, dn
.nid
, &ni
);
1138 f2fs_replace_block(sbi
, &dn
, old_addr
, new_addr
,
1141 f2fs_put_dnode(&dn
);
1143 f2fs_unlock_op(sbi
);
1146 i_size_write(inode
, new_size
);
1149 f2fs_unlock_op(sbi
);
1153 static int expand_inode_data(struct inode
*inode
, loff_t offset
,
1154 loff_t len
, int mode
)
1156 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1157 pgoff_t index
, pg_start
, pg_end
;
1158 loff_t new_size
= i_size_read(inode
);
1159 loff_t off_start
, off_end
;
1162 f2fs_balance_fs(sbi
);
1164 ret
= inode_newsize_ok(inode
, (len
+ offset
));
1168 if (f2fs_has_inline_data(inode
)) {
1169 ret
= f2fs_convert_inline_inode(inode
);
1174 pg_start
= ((unsigned long long) offset
) >> PAGE_CACHE_SHIFT
;
1175 pg_end
= ((unsigned long long) offset
+ len
) >> PAGE_CACHE_SHIFT
;
1177 off_start
= offset
& (PAGE_CACHE_SIZE
- 1);
1178 off_end
= (offset
+ len
) & (PAGE_CACHE_SIZE
- 1);
1182 for (index
= pg_start
; index
<= pg_end
; index
++) {
1183 struct dnode_of_data dn
;
1185 if (index
== pg_end
&& !off_end
)
1188 set_new_dnode(&dn
, inode
, NULL
, NULL
, 0);
1189 ret
= f2fs_reserve_block(&dn
, index
);
1193 if (pg_start
== pg_end
)
1194 new_size
= offset
+ len
;
1195 else if (index
== pg_start
&& off_start
)
1196 new_size
= (loff_t
)(index
+ 1) << PAGE_CACHE_SHIFT
;
1197 else if (index
== pg_end
)
1198 new_size
= ((loff_t
)index
<< PAGE_CACHE_SHIFT
) +
1201 new_size
+= PAGE_CACHE_SIZE
;
1204 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
1205 i_size_read(inode
) < new_size
) {
1206 i_size_write(inode
, new_size
);
1207 mark_inode_dirty(inode
);
1208 update_inode_page(inode
);
1210 f2fs_unlock_op(sbi
);
1215 static long f2fs_fallocate(struct file
*file
, int mode
,
1216 loff_t offset
, loff_t len
)
1218 struct inode
*inode
= file_inode(file
);
1221 /* f2fs only support ->fallocate for regular file */
1222 if (!S_ISREG(inode
->i_mode
))
1225 if (f2fs_encrypted_inode(inode
) &&
1226 (mode
& (FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_INSERT_RANGE
)))
1229 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
|
1230 FALLOC_FL_COLLAPSE_RANGE
| FALLOC_FL_ZERO_RANGE
|
1231 FALLOC_FL_INSERT_RANGE
))
1234 mutex_lock(&inode
->i_mutex
);
1236 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1237 if (offset
>= inode
->i_size
)
1240 ret
= punch_hole(inode
, offset
, len
);
1241 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1242 ret
= f2fs_collapse_range(inode
, offset
, len
);
1243 } else if (mode
& FALLOC_FL_ZERO_RANGE
) {
1244 ret
= f2fs_zero_range(inode
, offset
, len
, mode
);
1245 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1246 ret
= f2fs_insert_range(inode
, offset
, len
);
1248 ret
= expand_inode_data(inode
, offset
, len
, mode
);
1252 inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1253 mark_inode_dirty(inode
);
1257 mutex_unlock(&inode
->i_mutex
);
1259 trace_f2fs_fallocate(inode
, mode
, offset
, len
, ret
);
1263 static int f2fs_release_file(struct inode
*inode
, struct file
*filp
)
1265 /* some remained atomic pages should discarded */
1266 if (f2fs_is_atomic_file(inode
))
1267 commit_inmem_pages(inode
, true);
1268 if (f2fs_is_volatile_file(inode
)) {
1269 set_inode_flag(F2FS_I(inode
), FI_DROP_CACHE
);
1270 filemap_fdatawrite(inode
->i_mapping
);
1271 clear_inode_flag(F2FS_I(inode
), FI_DROP_CACHE
);
1276 #define F2FS_REG_FLMASK (~(FS_DIRSYNC_FL | FS_TOPDIR_FL))
1277 #define F2FS_OTHER_FLMASK (FS_NODUMP_FL | FS_NOATIME_FL)
1279 static inline __u32
f2fs_mask_flags(umode_t mode
, __u32 flags
)
1283 else if (S_ISREG(mode
))
1284 return flags
& F2FS_REG_FLMASK
;
1286 return flags
& F2FS_OTHER_FLMASK
;
1289 static int f2fs_ioc_getflags(struct file
*filp
, unsigned long arg
)
1291 struct inode
*inode
= file_inode(filp
);
1292 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1293 unsigned int flags
= fi
->i_flags
& FS_FL_USER_VISIBLE
;
1294 return put_user(flags
, (int __user
*)arg
);
1297 static int f2fs_ioc_setflags(struct file
*filp
, unsigned long arg
)
1299 struct inode
*inode
= file_inode(filp
);
1300 struct f2fs_inode_info
*fi
= F2FS_I(inode
);
1301 unsigned int flags
= fi
->i_flags
& FS_FL_USER_VISIBLE
;
1302 unsigned int oldflags
;
1305 ret
= mnt_want_write_file(filp
);
1309 if (!inode_owner_or_capable(inode
)) {
1314 if (get_user(flags
, (int __user
*)arg
)) {
1319 flags
= f2fs_mask_flags(inode
->i_mode
, flags
);
1321 mutex_lock(&inode
->i_mutex
);
1323 oldflags
= fi
->i_flags
;
1325 if ((flags
^ oldflags
) & (FS_APPEND_FL
| FS_IMMUTABLE_FL
)) {
1326 if (!capable(CAP_LINUX_IMMUTABLE
)) {
1327 mutex_unlock(&inode
->i_mutex
);
1333 flags
= flags
& FS_FL_USER_MODIFIABLE
;
1334 flags
|= oldflags
& ~FS_FL_USER_MODIFIABLE
;
1335 fi
->i_flags
= flags
;
1336 mutex_unlock(&inode
->i_mutex
);
1338 f2fs_set_inode_flags(inode
);
1339 inode
->i_ctime
= CURRENT_TIME
;
1340 mark_inode_dirty(inode
);
1342 mnt_drop_write_file(filp
);
1346 static int f2fs_ioc_getversion(struct file
*filp
, unsigned long arg
)
1348 struct inode
*inode
= file_inode(filp
);
1350 return put_user(inode
->i_generation
, (int __user
*)arg
);
1353 static int f2fs_ioc_start_atomic_write(struct file
*filp
)
1355 struct inode
*inode
= file_inode(filp
);
1358 if (!inode_owner_or_capable(inode
))
1361 f2fs_balance_fs(F2FS_I_SB(inode
));
1363 if (f2fs_is_atomic_file(inode
))
1366 ret
= f2fs_convert_inline_inode(inode
);
1370 set_inode_flag(F2FS_I(inode
), FI_ATOMIC_FILE
);
1374 static int f2fs_ioc_commit_atomic_write(struct file
*filp
)
1376 struct inode
*inode
= file_inode(filp
);
1379 if (!inode_owner_or_capable(inode
))
1382 if (f2fs_is_volatile_file(inode
))
1385 ret
= mnt_want_write_file(filp
);
1389 if (f2fs_is_atomic_file(inode
)) {
1390 clear_inode_flag(F2FS_I(inode
), FI_ATOMIC_FILE
);
1391 ret
= commit_inmem_pages(inode
, false);
1396 ret
= f2fs_sync_file(filp
, 0, LLONG_MAX
, 0);
1398 mnt_drop_write_file(filp
);
1402 static int f2fs_ioc_start_volatile_write(struct file
*filp
)
1404 struct inode
*inode
= file_inode(filp
);
1407 if (!inode_owner_or_capable(inode
))
1410 if (f2fs_is_volatile_file(inode
))
1413 ret
= f2fs_convert_inline_inode(inode
);
1417 set_inode_flag(F2FS_I(inode
), FI_VOLATILE_FILE
);
1421 static int f2fs_ioc_release_volatile_write(struct file
*filp
)
1423 struct inode
*inode
= file_inode(filp
);
1425 if (!inode_owner_or_capable(inode
))
1428 if (!f2fs_is_volatile_file(inode
))
1431 if (!f2fs_is_first_block_written(inode
))
1432 return truncate_partial_data_page(inode
, 0, true);
1434 return punch_hole(inode
, 0, F2FS_BLKSIZE
);
1437 static int f2fs_ioc_abort_volatile_write(struct file
*filp
)
1439 struct inode
*inode
= file_inode(filp
);
1442 if (!inode_owner_or_capable(inode
))
1445 ret
= mnt_want_write_file(filp
);
1449 f2fs_balance_fs(F2FS_I_SB(inode
));
1451 if (f2fs_is_atomic_file(inode
)) {
1452 clear_inode_flag(F2FS_I(inode
), FI_ATOMIC_FILE
);
1453 commit_inmem_pages(inode
, true);
1456 if (f2fs_is_volatile_file(inode
))
1457 clear_inode_flag(F2FS_I(inode
), FI_VOLATILE_FILE
);
1459 mnt_drop_write_file(filp
);
1463 static int f2fs_ioc_shutdown(struct file
*filp
, unsigned long arg
)
1465 struct inode
*inode
= file_inode(filp
);
1466 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1467 struct super_block
*sb
= sbi
->sb
;
1470 if (!capable(CAP_SYS_ADMIN
))
1473 if (get_user(in
, (__u32 __user
*)arg
))
1477 case F2FS_GOING_DOWN_FULLSYNC
:
1478 sb
= freeze_bdev(sb
->s_bdev
);
1479 if (sb
&& !IS_ERR(sb
)) {
1480 f2fs_stop_checkpoint(sbi
);
1481 thaw_bdev(sb
->s_bdev
, sb
);
1484 case F2FS_GOING_DOWN_METASYNC
:
1485 /* do checkpoint only */
1486 f2fs_sync_fs(sb
, 1);
1487 f2fs_stop_checkpoint(sbi
);
1489 case F2FS_GOING_DOWN_NOSYNC
:
1490 f2fs_stop_checkpoint(sbi
);
1498 static int f2fs_ioc_fitrim(struct file
*filp
, unsigned long arg
)
1500 struct inode
*inode
= file_inode(filp
);
1501 struct super_block
*sb
= inode
->i_sb
;
1502 struct request_queue
*q
= bdev_get_queue(sb
->s_bdev
);
1503 struct fstrim_range range
;
1506 if (!capable(CAP_SYS_ADMIN
))
1509 if (!blk_queue_discard(q
))
1512 if (copy_from_user(&range
, (struct fstrim_range __user
*)arg
,
1516 range
.minlen
= max((unsigned int)range
.minlen
,
1517 q
->limits
.discard_granularity
);
1518 ret
= f2fs_trim_fs(F2FS_SB(sb
), &range
);
1522 if (copy_to_user((struct fstrim_range __user
*)arg
, &range
,
1528 static bool uuid_is_nonzero(__u8 u
[16])
1532 for (i
= 0; i
< 16; i
++)
1538 static int f2fs_ioc_set_encryption_policy(struct file
*filp
, unsigned long arg
)
1540 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1541 struct f2fs_encryption_policy policy
;
1542 struct inode
*inode
= file_inode(filp
);
1544 if (copy_from_user(&policy
, (struct f2fs_encryption_policy __user
*)arg
,
1548 return f2fs_process_policy(&policy
, inode
);
1554 static int f2fs_ioc_get_encryption_policy(struct file
*filp
, unsigned long arg
)
1556 #ifdef CONFIG_F2FS_FS_ENCRYPTION
1557 struct f2fs_encryption_policy policy
;
1558 struct inode
*inode
= file_inode(filp
);
1561 err
= f2fs_get_policy(inode
, &policy
);
1565 if (copy_to_user((struct f2fs_encryption_policy __user
*)arg
, &policy
,
1574 static int f2fs_ioc_get_encryption_pwsalt(struct file
*filp
, unsigned long arg
)
1576 struct inode
*inode
= file_inode(filp
);
1577 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1580 if (!f2fs_sb_has_crypto(inode
->i_sb
))
1583 if (uuid_is_nonzero(sbi
->raw_super
->encrypt_pw_salt
))
1586 err
= mnt_want_write_file(filp
);
1590 /* update superblock with uuid */
1591 generate_random_uuid(sbi
->raw_super
->encrypt_pw_salt
);
1593 err
= f2fs_commit_super(sbi
, false);
1595 mnt_drop_write_file(filp
);
1598 memset(sbi
->raw_super
->encrypt_pw_salt
, 0, 16);
1602 if (copy_to_user((__u8 __user
*)arg
, sbi
->raw_super
->encrypt_pw_salt
,
1608 static int f2fs_ioc_gc(struct file
*filp
, unsigned long arg
)
1610 struct inode
*inode
= file_inode(filp
);
1611 struct f2fs_sb_info
*sbi
= F2FS_I_SB(inode
);
1614 if (!capable(CAP_SYS_ADMIN
))
1617 if (get_user(count
, (__u32 __user
*)arg
))
1620 if (!count
|| count
> F2FS_BATCH_GC_MAX_NUM
)
1623 for (i
= 0; i
< count
; i
++) {
1624 if (!mutex_trylock(&sbi
->gc_mutex
))
1631 if (put_user(i
, (__u32 __user
*)arg
))
1637 long f2fs_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
1640 case F2FS_IOC_GETFLAGS
:
1641 return f2fs_ioc_getflags(filp
, arg
);
1642 case F2FS_IOC_SETFLAGS
:
1643 return f2fs_ioc_setflags(filp
, arg
);
1644 case F2FS_IOC_GETVERSION
:
1645 return f2fs_ioc_getversion(filp
, arg
);
1646 case F2FS_IOC_START_ATOMIC_WRITE
:
1647 return f2fs_ioc_start_atomic_write(filp
);
1648 case F2FS_IOC_COMMIT_ATOMIC_WRITE
:
1649 return f2fs_ioc_commit_atomic_write(filp
);
1650 case F2FS_IOC_START_VOLATILE_WRITE
:
1651 return f2fs_ioc_start_volatile_write(filp
);
1652 case F2FS_IOC_RELEASE_VOLATILE_WRITE
:
1653 return f2fs_ioc_release_volatile_write(filp
);
1654 case F2FS_IOC_ABORT_VOLATILE_WRITE
:
1655 return f2fs_ioc_abort_volatile_write(filp
);
1656 case F2FS_IOC_SHUTDOWN
:
1657 return f2fs_ioc_shutdown(filp
, arg
);
1659 return f2fs_ioc_fitrim(filp
, arg
);
1660 case F2FS_IOC_SET_ENCRYPTION_POLICY
:
1661 return f2fs_ioc_set_encryption_policy(filp
, arg
);
1662 case F2FS_IOC_GET_ENCRYPTION_POLICY
:
1663 return f2fs_ioc_get_encryption_policy(filp
, arg
);
1664 case F2FS_IOC_GET_ENCRYPTION_PWSALT
:
1665 return f2fs_ioc_get_encryption_pwsalt(filp
, arg
);
1666 case F2FS_IOC_GARBAGE_COLLECT
:
1667 return f2fs_ioc_gc(filp
, arg
);
1673 static ssize_t
f2fs_file_write_iter(struct kiocb
*iocb
, struct iov_iter
*from
)
1675 struct inode
*inode
= file_inode(iocb
->ki_filp
);
1677 if (f2fs_encrypted_inode(inode
) &&
1678 !f2fs_has_encryption_key(inode
) &&
1679 f2fs_get_encryption_info(inode
))
1682 return generic_file_write_iter(iocb
, from
);
1685 #ifdef CONFIG_COMPAT
1686 long f2fs_compat_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
1689 case F2FS_IOC32_GETFLAGS
:
1690 cmd
= F2FS_IOC_GETFLAGS
;
1692 case F2FS_IOC32_SETFLAGS
:
1693 cmd
= F2FS_IOC_SETFLAGS
;
1696 return -ENOIOCTLCMD
;
1698 return f2fs_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
1702 const struct file_operations f2fs_file_operations
= {
1703 .llseek
= f2fs_llseek
,
1704 .read_iter
= generic_file_read_iter
,
1705 .write_iter
= f2fs_file_write_iter
,
1706 .open
= f2fs_file_open
,
1707 .release
= f2fs_release_file
,
1708 .mmap
= f2fs_file_mmap
,
1709 .fsync
= f2fs_sync_file
,
1710 .fallocate
= f2fs_fallocate
,
1711 .unlocked_ioctl
= f2fs_ioctl
,
1712 #ifdef CONFIG_COMPAT
1713 .compat_ioctl
= f2fs_compat_ioctl
,
1715 .splice_read
= generic_file_splice_read
,
1716 .splice_write
= iter_file_splice_write
,