]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/f2fs/file.c
f2fs: deprecate f2fs_trace_io
[mirror_ubuntu-jammy-kernel.git] / fs / f2fs / file.c
CommitLineData
7c1a000d 1// SPDX-License-Identifier: GPL-2.0
0a8165d7 2/*
fbfa2cc5
JK
3 * fs/f2fs/file.c
4 *
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
fbfa2cc5
JK
7 */
8#include <linux/fs.h>
9#include <linux/f2fs_fs.h>
10#include <linux/stat.h>
11#include <linux/buffer_head.h>
12#include <linux/writeback.h>
ae51fb31 13#include <linux/blkdev.h>
fbfa2cc5
JK
14#include <linux/falloc.h>
15#include <linux/types.h>
e9750824 16#include <linux/compat.h>
fbfa2cc5
JK
17#include <linux/uaccess.h>
18#include <linux/mount.h>
7f7670fe 19#include <linux/pagevec.h>
dc91de78 20#include <linux/uio.h>
8da4b8c4 21#include <linux/uuid.h>
4dd6f977 22#include <linux/file.h>
4507847c 23#include <linux/nls.h>
9af84648 24#include <linux/sched/signal.h>
fbfa2cc5
JK
25
26#include "f2fs.h"
27#include "node.h"
28#include "segment.h"
29#include "xattr.h"
30#include "acl.h"
c1c1b583 31#include "gc.h"
a2a4a7e4 32#include <trace/events/f2fs.h>
fa4320ce 33#include <uapi/linux/f2fs.h>
fbfa2cc5 34
ea4d479b 35static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf)
5a3a2d83
QS
36{
37 struct inode *inode = file_inode(vmf->vma->vm_file);
ea4d479b 38 vm_fault_t ret;
5a3a2d83
QS
39
40 down_read(&F2FS_I(inode)->i_mmap_sem);
ea4d479b 41 ret = filemap_fault(vmf);
5a3a2d83
QS
42 up_read(&F2FS_I(inode)->i_mmap_sem);
43
8b83ac81
CY
44 if (!ret)
45 f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO,
46 F2FS_BLKSIZE);
47
d7648343
CY
48 trace_f2fs_filemap_fault(inode, vmf->pgoff, (unsigned long)ret);
49
ea4d479b 50 return ret;
5a3a2d83
QS
51}
52
ea4d479b 53static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
fbfa2cc5
JK
54{
55 struct page *page = vmf->page;
11bac800 56 struct inode *inode = file_inode(vmf->vma->vm_file);
4081363f 57 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
bdf03299 58 struct dnode_of_data dn;
4c8ff709
CY
59 bool need_alloc = true;
60 int err = 0;
fbfa2cc5 61
e0fcd015
CY
62 if (unlikely(IS_IMMUTABLE(inode)))
63 return VM_FAULT_SIGBUS;
64
1f227a3e
JK
65 if (unlikely(f2fs_cp_error(sbi))) {
66 err = -EIO;
67 goto err;
68 }
69
00e09c0b
CY
70 if (!f2fs_is_checkpoint_ready(sbi)) {
71 err = -ENOSPC;
955ebcd3 72 goto err;
00e09c0b 73 }
1f227a3e 74
4c8ff709
CY
75#ifdef CONFIG_F2FS_FS_COMPRESSION
76 if (f2fs_compressed_file(inode)) {
77 int ret = f2fs_is_compressed_cluster(inode, page->index);
78
79 if (ret < 0) {
80 err = ret;
81 goto err;
82 } else if (ret) {
83 if (ret < F2FS_I(inode)->i_cluster_size) {
84 err = -EAGAIN;
85 goto err;
86 }
87 need_alloc = false;
88 }
89 }
90#endif
bdf03299 91 /* should do out of any locked page */
4c8ff709
CY
92 if (need_alloc)
93 f2fs_balance_fs(sbi, true);
bdf03299 94
fbfa2cc5 95 sb_start_pagefault(inode->i_sb);
b3d208f9
JK
96
97 f2fs_bug_on(sbi, f2fs_has_inline_data(inode));
b067ba1f 98
11bac800 99 file_update_time(vmf->vma->vm_file);
5a3a2d83 100 down_read(&F2FS_I(inode)->i_mmap_sem);
fbfa2cc5 101 lock_page(page);
6bacf52f 102 if (unlikely(page->mapping != inode->i_mapping ||
9851e6e1 103 page_offset(page) > i_size_read(inode) ||
6bacf52f 104 !PageUptodate(page))) {
fbfa2cc5
JK
105 unlock_page(page);
106 err = -EFAULT;
5a3a2d83 107 goto out_sem;
fbfa2cc5
JK
108 }
109
4c8ff709
CY
110 if (need_alloc) {
111 /* block allocation */
0ef81833 112 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true);
4c8ff709
CY
113 set_new_dnode(&dn, inode, NULL, NULL, 0);
114 err = f2fs_get_block(&dn, page->index);
115 f2fs_put_dnode(&dn);
0ef81833 116 f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false);
39a86958
CY
117 }
118
06c7540f
CY
119#ifdef CONFIG_F2FS_FS_COMPRESSION
120 if (!need_alloc) {
121 set_new_dnode(&dn, inode, NULL, NULL, 0);
122 err = f2fs_get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
123 f2fs_put_dnode(&dn);
124 }
125#endif
126 if (err) {
127 unlock_page(page);
128 goto out_sem;
39a86958
CY
129 }
130
bae0ee7a 131 f2fs_wait_on_page_writeback(page, DATA, false, true);
39a86958
CY
132
133 /* wait for GCed page writeback via META_MAPPING */
134 f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
135
fbfa2cc5
JK
136 /*
137 * check to see if the page is mapped already (no holes)
138 */
139 if (PageMappedToDisk(page))
39a86958 140 goto out_sem;
fbfa2cc5
JK
141
142 /* page is wholly or partially inside EOF */
09cbfeaf 143 if (((loff_t)(page->index + 1) << PAGE_SHIFT) >
9edcdabf 144 i_size_read(inode)) {
193bea1d 145 loff_t offset;
f11e98bd 146
09cbfeaf
KS
147 offset = i_size_read(inode) & ~PAGE_MASK;
148 zero_user_segment(page, offset, PAGE_SIZE);
fbfa2cc5
JK
149 }
150 set_page_dirty(page);
237c0790
JK
151 if (!PageUptodate(page))
152 SetPageUptodate(page);
fbfa2cc5 153
b0af6d49 154 f2fs_update_iostat(sbi, APP_MAPPED_IO, F2FS_BLKSIZE);
c75f2feb 155 f2fs_update_time(sbi, REQ_TIME);
b0af6d49 156
e943a10d 157 trace_f2fs_vm_page_mkwrite(page, DATA);
5a3a2d83
QS
158out_sem:
159 up_read(&F2FS_I(inode)->i_mmap_sem);
39a86958 160
fbfa2cc5 161 sb_end_pagefault(inode->i_sb);
1f227a3e 162err:
fbfa2cc5
JK
163 return block_page_mkwrite_return(err);
164}
165
166static const struct vm_operations_struct f2fs_file_vm_ops = {
5a3a2d83 167 .fault = f2fs_filemap_fault,
f1820361 168 .map_pages = filemap_map_pages,
692bb55d 169 .page_mkwrite = f2fs_vm_page_mkwrite,
fbfa2cc5
JK
170};
171
354a3399
JK
172static int get_parent_ino(struct inode *inode, nid_t *pino)
173{
174 struct dentry *dentry;
175
84c9c2de
EB
176 /*
177 * Make sure to get the non-deleted alias. The alias associated with
178 * the open file descriptor being fsync()'ed may be deleted already.
179 */
180 dentry = d_find_alias(inode);
354a3399
JK
181 if (!dentry)
182 return 0;
183
f0947e5c
JK
184 *pino = parent_ino(dentry);
185 dput(dentry);
354a3399
JK
186 return 1;
187}
188
a5fd5050 189static inline enum cp_reason_type need_do_checkpoint(struct inode *inode)
9d1589ef 190{
4081363f 191 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
a5fd5050 192 enum cp_reason_type cp_reason = CP_NO_NEEDED;
9d1589ef 193
a5fd5050
CY
194 if (!S_ISREG(inode->i_mode))
195 cp_reason = CP_NON_REGULAR;
4c8ff709
CY
196 else if (f2fs_compressed_file(inode))
197 cp_reason = CP_COMPRESSED;
a5fd5050
CY
198 else if (inode->i_nlink != 1)
199 cp_reason = CP_HARDLINK;
bbf156f7 200 else if (is_sbi_flag_set(sbi, SBI_NEED_CP))
a5fd5050 201 cp_reason = CP_SB_NEED_CP;
9d1589ef 202 else if (file_wrong_pino(inode))
a5fd5050 203 cp_reason = CP_WRONG_PINO;
4d57b86d 204 else if (!f2fs_space_for_roll_forward(sbi))
a5fd5050 205 cp_reason = CP_NO_SPC_ROLL;
4d57b86d 206 else if (!f2fs_is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
a5fd5050 207 cp_reason = CP_NODE_NEED_CP;
d5053a34 208 else if (test_opt(sbi, FASTBOOT))
a5fd5050 209 cp_reason = CP_FASTBOOT_MODE;
63189b78 210 else if (F2FS_OPTION(sbi).active_logs == 2)
a5fd5050 211 cp_reason = CP_SPEC_LOG_NUM;
63189b78 212 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT &&
4d57b86d
CY
213 f2fs_need_dentry_mark(sbi, inode->i_ino) &&
214 f2fs_exist_written_data(sbi, F2FS_I(inode)->i_pino,
215 TRANS_DIR_INO))
0a007b97 216 cp_reason = CP_RECOVER_DIR;
9d1589ef 217
a5fd5050 218 return cp_reason;
9d1589ef
CY
219}
220
9c7bb702
CL
221static bool need_inode_page_update(struct f2fs_sb_info *sbi, nid_t ino)
222{
223 struct page *i = find_get_page(NODE_MAPPING(sbi), ino);
224 bool ret = false;
225 /* But we need to avoid that there are some inode updates */
4d57b86d 226 if ((i && PageDirty(i)) || f2fs_need_inode_block_update(sbi, ino))
9c7bb702
CL
227 ret = true;
228 f2fs_put_page(i, 0);
229 return ret;
230}
231
51455b19
CL
232static void try_to_fix_pino(struct inode *inode)
233{
234 struct f2fs_inode_info *fi = F2FS_I(inode);
235 nid_t pino;
236
237 down_write(&fi->i_sem);
51455b19
CL
238 if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
239 get_parent_ino(inode, &pino)) {
205b9822 240 f2fs_i_pino_write(inode, pino);
51455b19 241 file_got_pino(inode);
51455b19 242 }
ee6d182f 243 up_write(&fi->i_sem);
51455b19
CL
244}
245
608514de
JK
246static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
247 int datasync, bool atomic)
fbfa2cc5
JK
248{
249 struct inode *inode = file->f_mapping->host;
4081363f 250 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2403c155 251 nid_t ino = inode->i_ino;
fbfa2cc5 252 int ret = 0;
a5fd5050 253 enum cp_reason_type cp_reason = 0;
fbfa2cc5 254 struct writeback_control wbc = {
c81bf1c8 255 .sync_mode = WB_SYNC_ALL,
fbfa2cc5
JK
256 .nr_to_write = LONG_MAX,
257 .for_reclaim = 0,
258 };
50fa53ec 259 unsigned int seq_id = 0;
fbfa2cc5 260
4354994f
DR
261 if (unlikely(f2fs_readonly(inode->i_sb) ||
262 is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1fa95b0b
NJ
263 return 0;
264
a2a4a7e4 265 trace_f2fs_sync_file_enter(inode);
ea1aa12c 266
b61ac5b7
YH
267 if (S_ISDIR(inode->i_mode))
268 goto go_write;
269
ea1aa12c 270 /* if fdatasync is triggered, let's do in-place-update */
c46a155b 271 if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
91942321 272 set_inode_flag(inode, FI_NEED_IPU);
3b49c9a1 273 ret = file_write_and_wait_range(file, start, end);
91942321 274 clear_inode_flag(inode, FI_NEED_IPU);
c1ce1b02 275
a2a4a7e4 276 if (ret) {
a5fd5050 277 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
fbfa2cc5 278 return ret;
a2a4a7e4 279 }
fbfa2cc5 280
9c7bb702 281 /* if the inode is dirty, let's recover all the time */
281518c6 282 if (!f2fs_skip_inode_update(inode, datasync)) {
2286c020 283 f2fs_write_inode(inode, NULL);
9c7bb702
CL
284 goto go_write;
285 }
286
6d99ba41
JK
287 /*
288 * if there is no written data, don't waste time to write recovery info.
289 */
91942321 290 if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
4d57b86d 291 !f2fs_exist_written_data(sbi, ino, APPEND_INO)) {
19c9c466 292
9c7bb702
CL
293 /* it may call write_inode just prior to fsync */
294 if (need_inode_page_update(sbi, ino))
19c9c466 295 goto go_write;
19c9c466 296
91942321 297 if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
4d57b86d 298 f2fs_exist_written_data(sbi, ino, UPDATE_INO))
6d99ba41
JK
299 goto flush_out;
300 goto out;
301 }
19c9c466 302go_write:
e5d2385e
JK
303 /*
304 * Both of fdatasync() and fsync() are able to be recovered from
305 * sudden-power-off.
306 */
91942321 307 down_read(&F2FS_I(inode)->i_sem);
a5fd5050 308 cp_reason = need_do_checkpoint(inode);
91942321 309 up_read(&F2FS_I(inode)->i_sem);
d928bfbf 310
a5fd5050 311 if (cp_reason) {
fbfa2cc5
JK
312 /* all the dirty node pages should be flushed for POR */
313 ret = f2fs_sync_fs(inode->i_sb, 1);
d928bfbf 314
51455b19
CL
315 /*
316 * We've secured consistency through sync_fs. Following pino
317 * will be used only for fsynced inodes after checkpoint.
318 */
319 try_to_fix_pino(inode);
91942321
JK
320 clear_inode_flag(inode, FI_APPEND_WRITE);
321 clear_inode_flag(inode, FI_UPDATE_WRITE);
51455b19
CL
322 goto out;
323 }
88bd02c9 324sync_nodes:
c29fd0c0 325 atomic_inc(&sbi->wb_sync_req[NODE]);
50fa53ec 326 ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
c29fd0c0 327 atomic_dec(&sbi->wb_sync_req[NODE]);
c267ec15
JK
328 if (ret)
329 goto out;
51455b19 330
871f599f 331 /* if cp_error was enabled, we should avoid infinite loop */
6d5a1495
CY
332 if (unlikely(f2fs_cp_error(sbi))) {
333 ret = -EIO;
871f599f 334 goto out;
6d5a1495 335 }
871f599f 336
4d57b86d 337 if (f2fs_need_inode_block_update(sbi, ino)) {
7c45729a 338 f2fs_mark_inode_dirty_sync(inode, true);
51455b19
CL
339 f2fs_write_inode(inode, NULL);
340 goto sync_nodes;
fbfa2cc5 341 }
51455b19 342
b6a245eb
JK
343 /*
344 * If it's atomic_write, it's just fine to keep write ordering. So
345 * here we don't need to wait for node write completion, since we use
346 * node chain which serializes node blocks. If one of node writes are
347 * reordered, we can see simply broken chain, resulting in stopping
348 * roll-forward recovery. It means we'll recover all or none node blocks
349 * given fsync mark.
350 */
351 if (!atomic) {
50fa53ec 352 ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
b6a245eb
JK
353 if (ret)
354 goto out;
355 }
51455b19
CL
356
357 /* once recovery info is written, don't need to tack this */
4d57b86d 358 f2fs_remove_ino_entry(sbi, ino, APPEND_INO);
91942321 359 clear_inode_flag(inode, FI_APPEND_WRITE);
51455b19 360flush_out:
d6290814 361 if (!atomic && F2FS_OPTION(sbi).fsync_mode != FSYNC_MODE_NOBARRIER)
39d787be 362 ret = f2fs_issue_flush(sbi, inode->i_ino);
3f06252f 363 if (!ret) {
4d57b86d 364 f2fs_remove_ino_entry(sbi, ino, UPDATE_INO);
3f06252f 365 clear_inode_flag(inode, FI_UPDATE_WRITE);
4d57b86d 366 f2fs_remove_ino_entry(sbi, ino, FLUSH_INO);
3f06252f 367 }
d0239e1b 368 f2fs_update_time(sbi, REQ_TIME);
fbfa2cc5 369out:
a5fd5050 370 trace_f2fs_sync_file_exit(inode, cp_reason, datasync, ret);
fbfa2cc5
JK
371 return ret;
372}
373
608514de
JK
374int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
375{
1f227a3e
JK
376 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
377 return -EIO;
608514de
JK
378 return f2fs_do_sync_file(file, start, end, datasync, false);
379}
380
4cb03fec
MWO
381static bool __found_offset(struct address_space *mapping, block_t blkaddr,
382 pgoff_t index, int whence)
7f7670fe
JK
383{
384 switch (whence) {
385 case SEEK_DATA:
4cb03fec
MWO
386 if (__is_valid_data_blkaddr(blkaddr))
387 return true;
388 if (blkaddr == NEW_ADDR &&
389 xa_get_mark(&mapping->i_pages, index, PAGECACHE_TAG_DIRTY))
7f7670fe
JK
390 return true;
391 break;
392 case SEEK_HOLE:
393 if (blkaddr == NULL_ADDR)
394 return true;
395 break;
396 }
397 return false;
398}
399
267378d4
CY
400static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
401{
402 struct inode *inode = file->f_mapping->host;
403 loff_t maxbytes = inode->i_sb->s_maxbytes;
404 struct dnode_of_data dn;
4cb03fec 405 pgoff_t pgofs, end_offset;
7f7670fe
JK
406 loff_t data_ofs = offset;
407 loff_t isize;
267378d4
CY
408 int err = 0;
409
5955102c 410 inode_lock(inode);
267378d4
CY
411
412 isize = i_size_read(inode);
413 if (offset >= isize)
414 goto fail;
415
416 /* handle inline data case */
7a6e59d7
CY
417 if (f2fs_has_inline_data(inode)) {
418 if (whence == SEEK_HOLE) {
419 data_ofs = isize;
420 goto found;
421 } else if (whence == SEEK_DATA) {
422 data_ofs = offset;
423 goto found;
424 }
267378d4
CY
425 }
426
09cbfeaf 427 pgofs = (pgoff_t)(offset >> PAGE_SHIFT);
267378d4 428
09cbfeaf 429 for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
267378d4 430 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 431 err = f2fs_get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
267378d4
CY
432 if (err && err != -ENOENT) {
433 goto fail;
434 } else if (err == -ENOENT) {
e1c42045 435 /* direct node does not exists */
267378d4 436 if (whence == SEEK_DATA) {
4d57b86d 437 pgofs = f2fs_get_next_page_offset(&dn, pgofs);
267378d4
CY
438 continue;
439 } else {
440 goto found;
441 }
442 }
443
81ca7350 444 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
267378d4
CY
445
446 /* find data/hole in dnode block */
447 for (; dn.ofs_in_node < end_offset;
448 dn.ofs_in_node++, pgofs++,
09cbfeaf 449 data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
267378d4 450 block_t blkaddr;
f11e98bd 451
a2ced1ce 452 blkaddr = f2fs_data_blkaddr(&dn);
267378d4 453
c9b60788
CY
454 if (__is_valid_data_blkaddr(blkaddr) &&
455 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
93770ab7 456 blkaddr, DATA_GENERIC_ENHANCE)) {
c9b60788
CY
457 f2fs_put_dnode(&dn);
458 goto fail;
459 }
460
4cb03fec 461 if (__found_offset(file->f_mapping, blkaddr,
e1da7872 462 pgofs, whence)) {
267378d4
CY
463 f2fs_put_dnode(&dn);
464 goto found;
465 }
466 }
467 f2fs_put_dnode(&dn);
468 }
469
470 if (whence == SEEK_DATA)
471 goto fail;
267378d4 472found:
fe369bc8
JK
473 if (whence == SEEK_HOLE && data_ofs > isize)
474 data_ofs = isize;
5955102c 475 inode_unlock(inode);
267378d4
CY
476 return vfs_setpos(file, data_ofs, maxbytes);
477fail:
5955102c 478 inode_unlock(inode);
267378d4
CY
479 return -ENXIO;
480}
481
482static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
483{
484 struct inode *inode = file->f_mapping->host;
485 loff_t maxbytes = inode->i_sb->s_maxbytes;
486
6d1451bf
CX
487 if (f2fs_compressed_file(inode))
488 maxbytes = max_file_blocks(inode) << F2FS_BLKSIZE_BITS;
489
267378d4
CY
490 switch (whence) {
491 case SEEK_SET:
492 case SEEK_CUR:
493 case SEEK_END:
494 return generic_file_llseek_size(file, offset, whence,
495 maxbytes, i_size_read(inode));
496 case SEEK_DATA:
497 case SEEK_HOLE:
0b4c5afd
JK
498 if (offset < 0)
499 return -ENXIO;
267378d4
CY
500 return f2fs_seek_block(file, offset, whence);
501 }
502
503 return -EINVAL;
504}
505
fbfa2cc5
JK
506static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
507{
b3d208f9 508 struct inode *inode = file_inode(file);
b9d777b8 509 int err;
b3d208f9 510
1f227a3e
JK
511 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
512 return -EIO;
513
4c8ff709
CY
514 if (!f2fs_is_compress_backend_ready(inode))
515 return -EOPNOTSUPP;
516
b3d208f9 517 /* we don't need to use inline_data strictly */
b9d777b8
JK
518 err = f2fs_convert_inline_inode(inode);
519 if (err)
520 return err;
b3d208f9 521
fbfa2cc5
JK
522 file_accessed(file);
523 vma->vm_ops = &f2fs_file_vm_ops;
4c8ff709 524 set_inode_flag(inode, FI_MMAP_FILE);
fbfa2cc5
JK
525 return 0;
526}
527
fcc85a4d
JK
528static int f2fs_file_open(struct inode *inode, struct file *filp)
529{
2e168c82 530 int err = fscrypt_file_open(inode, filp);
fcc85a4d 531
95ae251f
EB
532 if (err)
533 return err;
534
4c8ff709
CY
535 if (!f2fs_is_compress_backend_ready(inode))
536 return -EOPNOTSUPP;
537
95ae251f 538 err = fsverity_file_open(inode, filp);
2e168c82
EB
539 if (err)
540 return err;
b91050a8
HL
541
542 filp->f_mode |= FMODE_NOWAIT;
543
0abd675e 544 return dquot_file_open(inode, filp);
fcc85a4d
JK
545}
546
4d57b86d 547void f2fs_truncate_data_blocks_range(struct dnode_of_data *dn, int count)
fbfa2cc5 548{
4081363f 549 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
fbfa2cc5 550 struct f2fs_node *raw_node;
19b2c30d 551 int nr_free = 0, ofs = dn->ofs_in_node, len = count;
fbfa2cc5 552 __le32 *addr;
7a2af766 553 int base = 0;
4c8ff709
CY
554 bool compressed_cluster = false;
555 int cluster_index = 0, valid_blocks = 0;
556 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
c2759eba 557 bool released = !atomic_read(&F2FS_I(dn->inode)->i_compr_blocks);
7a2af766
CY
558
559 if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
560 base = get_extra_isize(dn->inode);
fbfa2cc5 561
45590710 562 raw_node = F2FS_NODE(dn->node_page);
7a2af766 563 addr = blkaddr_in_node(raw_node) + base + ofs;
fbfa2cc5 564
4c8ff709
CY
565 /* Assumption: truncateion starts with cluster */
566 for (; count > 0; count--, addr++, dn->ofs_in_node++, cluster_index++) {
fbfa2cc5 567 block_t blkaddr = le32_to_cpu(*addr);
f11e98bd 568
4c8ff709
CY
569 if (f2fs_compressed_file(dn->inode) &&
570 !(cluster_index & (cluster_size - 1))) {
571 if (compressed_cluster)
572 f2fs_i_compr_blocks_update(dn->inode,
573 valid_blocks, false);
574 compressed_cluster = (blkaddr == COMPRESS_ADDR);
575 valid_blocks = 0;
576 }
577
fbfa2cc5
JK
578 if (blkaddr == NULL_ADDR)
579 continue;
580
e1509cf2 581 dn->data_blkaddr = NULL_ADDR;
4d57b86d 582 f2fs_set_data_blkaddr(dn);
c9b60788 583
4c8ff709
CY
584 if (__is_valid_data_blkaddr(blkaddr)) {
585 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
93770ab7 586 DATA_GENERIC_ENHANCE))
4c8ff709
CY
587 continue;
588 if (compressed_cluster)
589 valid_blocks++;
590 }
c9b60788 591
3c6c2beb 592 if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
91942321 593 clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
4c8ff709
CY
594
595 f2fs_invalidate_blocks(sbi, blkaddr);
ef8d563f
CY
596
597 if (!released || blkaddr != COMPRESS_ADDR)
598 nr_free++;
fbfa2cc5 599 }
19b2c30d 600
4c8ff709
CY
601 if (compressed_cluster)
602 f2fs_i_compr_blocks_update(dn->inode, valid_blocks, false);
603
fbfa2cc5 604 if (nr_free) {
19b2c30d
CY
605 pgoff_t fofs;
606 /*
607 * once we invalidate valid blkaddr in range [ofs, ofs + count],
608 * we will invalidate all blkaddr in the whole range.
609 */
4d57b86d 610 fofs = f2fs_start_bidx_of_node(ofs_of_node(dn->node_page),
81ca7350 611 dn->inode) + ofs;
19b2c30d 612 f2fs_update_extent_cache_range(dn, fofs, 0, len);
d7cc950b 613 dec_valid_block_count(sbi, dn->inode, nr_free);
fbfa2cc5
JK
614 }
615 dn->ofs_in_node = ofs;
51dd6249 616
d0239e1b 617 f2fs_update_time(sbi, REQ_TIME);
51dd6249
NJ
618 trace_f2fs_truncate_data_blocks_range(dn->inode, dn->nid,
619 dn->ofs_in_node, nr_free);
fbfa2cc5
JK
620}
621
4d57b86d 622void f2fs_truncate_data_blocks(struct dnode_of_data *dn)
fbfa2cc5 623{
d02a6e61 624 f2fs_truncate_data_blocks_range(dn, ADDRS_PER_BLOCK(dn->inode));
fbfa2cc5
JK
625}
626
0bfcfcca 627static int truncate_partial_data_page(struct inode *inode, u64 from,
43f3eae1 628 bool cache_only)
fbfa2cc5 629{
193bea1d 630 loff_t offset = from & (PAGE_SIZE - 1);
09cbfeaf 631 pgoff_t index = from >> PAGE_SHIFT;
43f3eae1 632 struct address_space *mapping = inode->i_mapping;
fbfa2cc5
JK
633 struct page *page;
634
43f3eae1 635 if (!offset && !cache_only)
b3d208f9 636 return 0;
fbfa2cc5 637
43f3eae1 638 if (cache_only) {
34b5d5c2 639 page = find_lock_page(mapping, index);
43f3eae1
JK
640 if (page && PageUptodate(page))
641 goto truncate_out;
642 f2fs_put_page(page, 1);
b3d208f9 643 return 0;
43f3eae1 644 }
fbfa2cc5 645
4d57b86d 646 page = f2fs_get_lock_data_page(inode, index, true);
43f3eae1 647 if (IS_ERR(page))
a78aaa2c 648 return PTR_ERR(page) == -ENOENT ? 0 : PTR_ERR(page);
43f3eae1 649truncate_out:
bae0ee7a 650 f2fs_wait_on_page_writeback(page, DATA, true, true);
09cbfeaf 651 zero_user(page, offset, PAGE_SIZE - offset);
a9bcf9bc
JK
652
653 /* An encrypted inode should have a key and truncate the last page. */
62230e0d 654 f2fs_bug_on(F2FS_I_SB(inode), cache_only && IS_ENCRYPTED(inode));
a9bcf9bc 655 if (!cache_only)
0bfcfcca 656 set_page_dirty(page);
fbfa2cc5 657 f2fs_put_page(page, 1);
b3d208f9 658 return 0;
fbfa2cc5
JK
659}
660
3265d3db 661int f2fs_do_truncate_blocks(struct inode *inode, u64 from, bool lock)
fbfa2cc5 662{
4081363f 663 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
fbfa2cc5
JK
664 struct dnode_of_data dn;
665 pgoff_t free_from;
9ffe0fb5 666 int count = 0, err = 0;
b3d208f9 667 struct page *ipage;
0bfcfcca 668 bool truncate_page = false;
fbfa2cc5 669
51dd6249
NJ
670 trace_f2fs_truncate_blocks_enter(inode, from);
671
df033caf 672 free_from = (pgoff_t)F2FS_BLK_ALIGN(from);
fbfa2cc5 673
6d1451bf 674 if (free_from >= max_file_blocks(inode))
09210c97
CY
675 goto free_partial;
676
764aa3e9 677 if (lock)
c42d28ce 678 f2fs_lock_op(sbi);
9ffe0fb5 679
4d57b86d 680 ipage = f2fs_get_node_page(sbi, inode->i_ino);
b3d208f9
JK
681 if (IS_ERR(ipage)) {
682 err = PTR_ERR(ipage);
683 goto out;
684 }
685
686 if (f2fs_has_inline_data(inode)) {
4d57b86d 687 f2fs_truncate_inline_inode(inode, ipage, from);
b3d208f9 688 f2fs_put_page(ipage, 1);
0bfcfcca 689 truncate_page = true;
b3d208f9
JK
690 goto out;
691 }
692
693 set_new_dnode(&dn, inode, ipage, NULL, 0);
4d57b86d 694 err = f2fs_get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA);
fbfa2cc5
JK
695 if (err) {
696 if (err == -ENOENT)
697 goto free_next;
b3d208f9 698 goto out;
1ce86bf6
JK
699 }
700
81ca7350 701 count = ADDRS_PER_PAGE(dn.node_page, inode);
fbfa2cc5
JK
702
703 count -= dn.ofs_in_node;
9850cf4a 704 f2fs_bug_on(sbi, count < 0);
39936837 705
fbfa2cc5 706 if (dn.ofs_in_node || IS_INODE(dn.node_page)) {
4d57b86d 707 f2fs_truncate_data_blocks_range(&dn, count);
fbfa2cc5
JK
708 free_from += count;
709 }
710
711 f2fs_put_dnode(&dn);
712free_next:
4d57b86d 713 err = f2fs_truncate_inode_blocks(inode, free_from);
764d2c80
JK
714out:
715 if (lock)
c42d28ce 716 f2fs_unlock_op(sbi);
09210c97 717free_partial:
b3d208f9
JK
718 /* lastly zero out the first data page */
719 if (!err)
0bfcfcca 720 err = truncate_partial_data_page(inode, from, truncate_page);
fbfa2cc5 721
51dd6249 722 trace_f2fs_truncate_blocks_exit(inode, err);
fbfa2cc5
JK
723 return err;
724}
725
4c8ff709
CY
726int f2fs_truncate_blocks(struct inode *inode, u64 from, bool lock)
727{
728 u64 free_from = from;
3265d3db 729 int err;
4c8ff709 730
3265d3db 731#ifdef CONFIG_F2FS_FS_COMPRESSION
4c8ff709
CY
732 /*
733 * for compressed file, only support cluster size
734 * aligned truncation.
735 */
4fec3fc0
CY
736 if (f2fs_compressed_file(inode))
737 free_from = round_up(from,
738 F2FS_I(inode)->i_cluster_size << PAGE_SHIFT);
3265d3db
CY
739#endif
740
741 err = f2fs_do_truncate_blocks(inode, free_from, lock);
742 if (err)
743 return err;
744
745#ifdef CONFIG_F2FS_FS_COMPRESSION
17d7648d 746 if (from != free_from) {
3265d3db 747 err = f2fs_truncate_partial_cluster(inode, from, lock);
17d7648d
CY
748 if (err)
749 return err;
750 }
3265d3db 751#endif
4c8ff709 752
17d7648d 753 return 0;
4c8ff709
CY
754}
755
9a449e9c 756int f2fs_truncate(struct inode *inode)
fbfa2cc5 757{
b0154891
CY
758 int err;
759
1f227a3e
JK
760 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
761 return -EIO;
762
fbfa2cc5
JK
763 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
764 S_ISLNK(inode->i_mode)))
b0154891 765 return 0;
fbfa2cc5 766
51dd6249
NJ
767 trace_f2fs_truncate(inode);
768
14b44d23 769 if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
c45d6002 770 f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_TRUNCATE);
14b44d23
JK
771 return -EIO;
772 }
7fa750a1 773
92dffd01 774 /* we should check inline_data size */
b9d777b8 775 if (!f2fs_may_inline_data(inode)) {
b0154891
CY
776 err = f2fs_convert_inline_inode(inode);
777 if (err)
778 return err;
92dffd01
JK
779 }
780
c42d28ce 781 err = f2fs_truncate_blocks(inode, i_size_read(inode), true);
b0154891
CY
782 if (err)
783 return err;
784
078cd827 785 inode->i_mtime = inode->i_ctime = current_time(inode);
7c45729a 786 f2fs_mark_inode_dirty_sync(inode, false);
b0154891 787 return 0;
fbfa2cc5
JK
788}
789
a528d35e 790int f2fs_getattr(const struct path *path, struct kstat *stat,
1c6d8ee4 791 u32 request_mask, unsigned int query_flags)
fbfa2cc5 792{
a528d35e 793 struct inode *inode = d_inode(path->dentry);
1c6d8ee4 794 struct f2fs_inode_info *fi = F2FS_I(inode);
1c1d35df 795 struct f2fs_inode *ri;
1c6d8ee4
CY
796 unsigned int flags;
797
1c1d35df 798 if (f2fs_has_extra_attr(inode) &&
7beb01f7 799 f2fs_sb_has_inode_crtime(F2FS_I_SB(inode)) &&
1c1d35df
CY
800 F2FS_FITS_IN_INODE(ri, fi->i_extra_isize, i_crtime)) {
801 stat->result_mask |= STATX_BTIME;
802 stat->btime.tv_sec = fi->i_crtime.tv_sec;
803 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
804 }
805
36098557 806 flags = fi->i_flags;
fd26725f
CY
807 if (flags & F2FS_COMPR_FL)
808 stat->attributes |= STATX_ATTR_COMPRESSED;
59c84408 809 if (flags & F2FS_APPEND_FL)
1c6d8ee4 810 stat->attributes |= STATX_ATTR_APPEND;
62230e0d 811 if (IS_ENCRYPTED(inode))
1c6d8ee4 812 stat->attributes |= STATX_ATTR_ENCRYPTED;
59c84408 813 if (flags & F2FS_IMMUTABLE_FL)
1c6d8ee4 814 stat->attributes |= STATX_ATTR_IMMUTABLE;
59c84408 815 if (flags & F2FS_NODUMP_FL)
1c6d8ee4 816 stat->attributes |= STATX_ATTR_NODUMP;
924e3194
EB
817 if (IS_VERITY(inode))
818 stat->attributes |= STATX_ATTR_VERITY;
1c6d8ee4 819
fd26725f
CY
820 stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
821 STATX_ATTR_APPEND |
1c6d8ee4
CY
822 STATX_ATTR_ENCRYPTED |
823 STATX_ATTR_IMMUTABLE |
924e3194
EB
824 STATX_ATTR_NODUMP |
825 STATX_ATTR_VERITY);
1c6d8ee4 826
fbfa2cc5 827 generic_fillattr(inode, stat);
5b4267d1
JK
828
829 /* we need to show initial sectors used for inline_data/dentries */
830 if ((S_ISREG(inode->i_mode) && f2fs_has_inline_data(inode)) ||
831 f2fs_has_inline_dentry(inode))
832 stat->blocks += (stat->size + 511) >> 9;
833
fbfa2cc5
JK
834 return 0;
835}
836
837#ifdef CONFIG_F2FS_FS_POSIX_ACL
838static void __setattr_copy(struct inode *inode, const struct iattr *attr)
839{
fbfa2cc5
JK
840 unsigned int ia_valid = attr->ia_valid;
841
842 if (ia_valid & ATTR_UID)
843 inode->i_uid = attr->ia_uid;
844 if (ia_valid & ATTR_GID)
845 inode->i_gid = attr->ia_gid;
eb31e2f6
AG
846 if (ia_valid & ATTR_ATIME)
847 inode->i_atime = attr->ia_atime;
848 if (ia_valid & ATTR_MTIME)
849 inode->i_mtime = attr->ia_mtime;
850 if (ia_valid & ATTR_CTIME)
851 inode->i_ctime = attr->ia_ctime;
fbfa2cc5
JK
852 if (ia_valid & ATTR_MODE) {
853 umode_t mode = attr->ia_mode;
854
2562515f
CY
855 if (!in_group_p(inode->i_gid) &&
856 !capable_wrt_inode_uidgid(inode, CAP_FSETID))
fbfa2cc5 857 mode &= ~S_ISGID;
91942321 858 set_acl_inode(inode, mode);
fbfa2cc5
JK
859 }
860}
861#else
862#define __setattr_copy setattr_copy
863#endif
864
865int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
866{
2b0143b5 867 struct inode *inode = d_inode(dentry);
fbfa2cc5
JK
868 int err;
869
1f227a3e
JK
870 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
871 return -EIO;
872
e0fcd015
CY
873 if (unlikely(IS_IMMUTABLE(inode)))
874 return -EPERM;
875
876 if (unlikely(IS_APPEND(inode) &&
877 (attr->ia_valid & (ATTR_MODE | ATTR_UID |
878 ATTR_GID | ATTR_TIMES_SET))))
879 return -EPERM;
880
4c8ff709
CY
881 if ((attr->ia_valid & ATTR_SIZE) &&
882 !f2fs_is_compress_backend_ready(inode))
883 return -EOPNOTSUPP;
884
31051c85 885 err = setattr_prepare(dentry, attr);
fbfa2cc5
JK
886 if (err)
887 return err;
888
20bb2479
EB
889 err = fscrypt_prepare_setattr(dentry, attr);
890 if (err)
891 return err;
892
95ae251f
EB
893 err = fsverity_prepare_setattr(dentry, attr);
894 if (err)
895 return err;
896
0abd675e
CY
897 if (is_quota_modification(inode, attr)) {
898 err = dquot_initialize(inode);
899 if (err)
900 return err;
901 }
902 if ((attr->ia_valid & ATTR_UID &&
903 !uid_eq(attr->ia_uid, inode->i_uid)) ||
904 (attr->ia_valid & ATTR_GID &&
905 !gid_eq(attr->ia_gid, inode->i_gid))) {
af033b2a 906 f2fs_lock_op(F2FS_I_SB(inode));
0abd675e 907 err = dquot_transfer(inode, attr);
af033b2a
CY
908 if (err) {
909 set_sbi_flag(F2FS_I_SB(inode),
910 SBI_QUOTA_NEED_REPAIR);
911 f2fs_unlock_op(F2FS_I_SB(inode));
0abd675e 912 return err;
af033b2a
CY
913 }
914 /*
915 * update uid/gid under lock_op(), so that dquot and inode can
916 * be updated atomically.
917 */
918 if (attr->ia_valid & ATTR_UID)
919 inode->i_uid = attr->ia_uid;
920 if (attr->ia_valid & ATTR_GID)
921 inode->i_gid = attr->ia_gid;
922 f2fs_mark_inode_dirty_sync(inode, true);
923 f2fs_unlock_op(F2FS_I_SB(inode));
0abd675e
CY
924 }
925
09db6a2e 926 if (attr->ia_valid & ATTR_SIZE) {
cfb9a34d
JK
927 loff_t old_size = i_size_read(inode);
928
929 if (attr->ia_size > MAX_INLINE_DATA(inode)) {
930 /*
931 * should convert inline inode before i_size_write to
932 * keep smaller than inline_data size with inline flag.
933 */
934 err = f2fs_convert_inline_inode(inode);
935 if (err)
936 return err;
937 }
a33c1502 938
a33c1502 939 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
6f8d4455 940 down_write(&F2FS_I(inode)->i_mmap_sem);
a33c1502
CY
941
942 truncate_setsize(inode, attr->ia_size);
943
cfb9a34d 944 if (attr->ia_size <= old_size)
9a449e9c 945 err = f2fs_truncate(inode);
a33c1502
CY
946 /*
947 * do not trim all blocks after i_size if target size is
948 * larger than i_size.
949 */
a33c1502 950 up_write(&F2FS_I(inode)->i_mmap_sem);
6f8d4455 951 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
a33c1502
CY
952 if (err)
953 return err;
0cab80ee 954
c10c9820 955 spin_lock(&F2FS_I(inode)->i_size_lock);
cfb9a34d 956 inode->i_mtime = inode->i_ctime = current_time(inode);
a0d00fad 957 F2FS_I(inode)->last_disk_size = i_size_read(inode);
c10c9820 958 spin_unlock(&F2FS_I(inode)->i_size_lock);
fbfa2cc5
JK
959 }
960
961 __setattr_copy(inode, attr);
962
963 if (attr->ia_valid & ATTR_MODE) {
4d57b86d 964 err = posix_acl_chmod(inode, f2fs_get_inode_mode(inode));
17232e83
CY
965
966 if (is_inode_flag_set(inode, FI_ACL_MODE)) {
967 if (!err)
968 inode->i_mode = F2FS_I(inode)->i_acl_mode;
91942321 969 clear_inode_flag(inode, FI_ACL_MODE);
fbfa2cc5
JK
970 }
971 }
972
c0ed4405 973 /* file size may changed here */
ca597bdd 974 f2fs_mark_inode_dirty_sync(inode, true);
15d04354
JK
975
976 /* inode change will produce dirty node pages flushed by checkpoint */
977 f2fs_balance_fs(F2FS_I_SB(inode), true);
978
fbfa2cc5
JK
979 return err;
980}
981
982const struct inode_operations f2fs_file_inode_operations = {
983 .getattr = f2fs_getattr,
984 .setattr = f2fs_setattr,
985 .get_acl = f2fs_get_acl,
a6dda0e6 986 .set_acl = f2fs_set_acl,
fbfa2cc5 987 .listxattr = f2fs_listxattr,
9ab70134 988 .fiemap = f2fs_fiemap,
fbfa2cc5
JK
989};
990
6394328a 991static int fill_zero(struct inode *inode, pgoff_t index,
fbfa2cc5
JK
992 loff_t start, loff_t len)
993{
4081363f 994 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
fbfa2cc5
JK
995 struct page *page;
996
997 if (!len)
6394328a 998 return 0;
fbfa2cc5 999
2c4db1a6 1000 f2fs_balance_fs(sbi, true);
bd43df02 1001
e479556b 1002 f2fs_lock_op(sbi);
4d57b86d 1003 page = f2fs_get_new_data_page(inode, NULL, index, false);
e479556b 1004 f2fs_unlock_op(sbi);
fbfa2cc5 1005
6394328a
CY
1006 if (IS_ERR(page))
1007 return PTR_ERR(page);
1008
bae0ee7a 1009 f2fs_wait_on_page_writeback(page, DATA, true, true);
6394328a
CY
1010 zero_user(page, start, len);
1011 set_page_dirty(page);
1012 f2fs_put_page(page, 1);
1013 return 0;
fbfa2cc5
JK
1014}
1015
4d57b86d 1016int f2fs_truncate_hole(struct inode *inode, pgoff_t pg_start, pgoff_t pg_end)
fbfa2cc5 1017{
fbfa2cc5
JK
1018 int err;
1019
ea58711e 1020 while (pg_start < pg_end) {
fbfa2cc5 1021 struct dnode_of_data dn;
ea58711e 1022 pgoff_t end_offset, count;
9eaeba70 1023
fbfa2cc5 1024 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1025 err = f2fs_get_dnode_of_data(&dn, pg_start, LOOKUP_NODE);
fbfa2cc5 1026 if (err) {
ea58711e 1027 if (err == -ENOENT) {
4d57b86d
CY
1028 pg_start = f2fs_get_next_page_offset(&dn,
1029 pg_start);
fbfa2cc5 1030 continue;
ea58711e 1031 }
fbfa2cc5
JK
1032 return err;
1033 }
1034
81ca7350 1035 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
ea58711e
CY
1036 count = min(end_offset - dn.ofs_in_node, pg_end - pg_start);
1037
1038 f2fs_bug_on(F2FS_I_SB(inode), count == 0 || count > end_offset);
1039
4d57b86d 1040 f2fs_truncate_data_blocks_range(&dn, count);
fbfa2cc5 1041 f2fs_put_dnode(&dn);
ea58711e
CY
1042
1043 pg_start += count;
fbfa2cc5
JK
1044 }
1045 return 0;
1046}
1047
a66c7b2f 1048static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
fbfa2cc5
JK
1049{
1050 pgoff_t pg_start, pg_end;
1051 loff_t off_start, off_end;
b9d777b8 1052 int ret;
fbfa2cc5 1053
b9d777b8
JK
1054 ret = f2fs_convert_inline_inode(inode);
1055 if (ret)
1056 return ret;
9ffe0fb5 1057
09cbfeaf
KS
1058 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1059 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
fbfa2cc5 1060
09cbfeaf
KS
1061 off_start = offset & (PAGE_SIZE - 1);
1062 off_end = (offset + len) & (PAGE_SIZE - 1);
fbfa2cc5
JK
1063
1064 if (pg_start == pg_end) {
6394328a 1065 ret = fill_zero(inode, pg_start, off_start,
fbfa2cc5 1066 off_end - off_start);
6394328a
CY
1067 if (ret)
1068 return ret;
fbfa2cc5 1069 } else {
6394328a
CY
1070 if (off_start) {
1071 ret = fill_zero(inode, pg_start++, off_start,
09cbfeaf 1072 PAGE_SIZE - off_start);
6394328a
CY
1073 if (ret)
1074 return ret;
1075 }
1076 if (off_end) {
1077 ret = fill_zero(inode, pg_end, 0, off_end);
1078 if (ret)
1079 return ret;
1080 }
fbfa2cc5
JK
1081
1082 if (pg_start < pg_end) {
1083 struct address_space *mapping = inode->i_mapping;
1084 loff_t blk_start, blk_end;
4081363f 1085 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1127a3d4 1086
2c4db1a6 1087 f2fs_balance_fs(sbi, true);
fbfa2cc5 1088
09cbfeaf
KS
1089 blk_start = (loff_t)pg_start << PAGE_SHIFT;
1090 blk_end = (loff_t)pg_end << PAGE_SHIFT;
a33c1502 1091
a33c1502 1092 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
6f8d4455 1093 down_write(&F2FS_I(inode)->i_mmap_sem);
a33c1502 1094
fbfa2cc5
JK
1095 truncate_inode_pages_range(mapping, blk_start,
1096 blk_end - 1);
39936837 1097
e479556b 1098 f2fs_lock_op(sbi);
4d57b86d 1099 ret = f2fs_truncate_hole(inode, pg_start, pg_end);
e479556b 1100 f2fs_unlock_op(sbi);
a33c1502 1101
5a3a2d83 1102 up_write(&F2FS_I(inode)->i_mmap_sem);
6f8d4455 1103 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
fbfa2cc5
JK
1104 }
1105 }
1106
fbfa2cc5
JK
1107 return ret;
1108}
1109
0a2aa8fb
JK
1110static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
1111 int *do_replace, pgoff_t off, pgoff_t len)
b4ace337
CY
1112{
1113 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1114 struct dnode_of_data dn;
0a2aa8fb 1115 int ret, done, i;
ecbaa406 1116
0a2aa8fb 1117next_dnode:
6e2c64ad 1118 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1119 ret = f2fs_get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
6e2c64ad
JK
1120 if (ret && ret != -ENOENT) {
1121 return ret;
1122 } else if (ret == -ENOENT) {
0a2aa8fb
JK
1123 if (dn.max_level == 0)
1124 return -ENOENT;
4c8ff709
CY
1125 done = min((pgoff_t)ADDRS_PER_BLOCK(inode) -
1126 dn.ofs_in_node, len);
0a2aa8fb
JK
1127 blkaddr += done;
1128 do_replace += done;
1129 goto next;
1130 }
1131
1132 done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
1133 dn.ofs_in_node, len);
1134 for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
a2ced1ce 1135 *blkaddr = f2fs_data_blkaddr(&dn);
93770ab7
CY
1136
1137 if (__is_valid_data_blkaddr(*blkaddr) &&
1138 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1139 DATA_GENERIC_ENHANCE)) {
1140 f2fs_put_dnode(&dn);
10f966bb 1141 return -EFSCORRUPTED;
93770ab7
CY
1142 }
1143
4d57b86d 1144 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
0a2aa8fb 1145
b0332a0f 1146 if (f2fs_lfs_mode(sbi)) {
0a2aa8fb 1147 f2fs_put_dnode(&dn);
fd114ab2 1148 return -EOPNOTSUPP;
0a2aa8fb
JK
1149 }
1150
6e2c64ad 1151 /* do not invalidate this block address */
f28b3434 1152 f2fs_update_data_blkaddr(&dn, NULL_ADDR);
0a2aa8fb 1153 *do_replace = 1;
b4ace337 1154 }
6e2c64ad 1155 }
0a2aa8fb
JK
1156 f2fs_put_dnode(&dn);
1157next:
1158 len -= done;
1159 off += done;
1160 if (len)
1161 goto next_dnode;
1162 return 0;
1163}
b4ace337 1164
0a2aa8fb
JK
1165static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
1166 int *do_replace, pgoff_t off, int len)
1167{
1168 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1169 struct dnode_of_data dn;
1170 int ret, i;
b4ace337 1171
0a2aa8fb
JK
1172 for (i = 0; i < len; i++, do_replace++, blkaddr++) {
1173 if (*do_replace == 0)
1174 continue;
b4ace337 1175
0a2aa8fb 1176 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1177 ret = f2fs_get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
0a2aa8fb
JK
1178 if (ret) {
1179 dec_valid_block_count(sbi, inode, 1);
4d57b86d 1180 f2fs_invalidate_blocks(sbi, *blkaddr);
0a2aa8fb
JK
1181 } else {
1182 f2fs_update_data_blkaddr(&dn, *blkaddr);
36abef4e 1183 }
0a2aa8fb
JK
1184 f2fs_put_dnode(&dn);
1185 }
1186 return 0;
1187}
1188
1189static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
1190 block_t *blkaddr, int *do_replace,
1191 pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
1192{
1193 struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
1194 pgoff_t i = 0;
1195 int ret;
36abef4e 1196
0a2aa8fb
JK
1197 while (i < len) {
1198 if (blkaddr[i] == NULL_ADDR && !full) {
1199 i++;
1200 continue;
6e2c64ad 1201 }
b4ace337 1202
0a2aa8fb
JK
1203 if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
1204 struct dnode_of_data dn;
1205 struct node_info ni;
1206 size_t new_size;
1207 pgoff_t ilen;
b4ace337 1208
0a2aa8fb 1209 set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
4d57b86d 1210 ret = f2fs_get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
0a2aa8fb
JK
1211 if (ret)
1212 return ret;
b4ace337 1213
7735730d
CY
1214 ret = f2fs_get_node_info(sbi, dn.nid, &ni);
1215 if (ret) {
1216 f2fs_put_dnode(&dn);
1217 return ret;
1218 }
1219
0a2aa8fb
JK
1220 ilen = min((pgoff_t)
1221 ADDRS_PER_PAGE(dn.node_page, dst_inode) -
1222 dn.ofs_in_node, len - i);
1223 do {
a2ced1ce 1224 dn.data_blkaddr = f2fs_data_blkaddr(&dn);
4d57b86d 1225 f2fs_truncate_data_blocks_range(&dn, 1);
0a2aa8fb
JK
1226
1227 if (do_replace[i]) {
1228 f2fs_i_blocks_write(src_inode,
0abd675e 1229 1, false, false);
0a2aa8fb 1230 f2fs_i_blocks_write(dst_inode,
0abd675e 1231 1, true, false);
0a2aa8fb
JK
1232 f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
1233 blkaddr[i], ni.version, true, false);
1234
1235 do_replace[i] = 0;
1236 }
1237 dn.ofs_in_node++;
1238 i++;
1f0d5c91 1239 new_size = (loff_t)(dst + i) << PAGE_SHIFT;
0a2aa8fb
JK
1240 if (dst_inode->i_size < new_size)
1241 f2fs_i_size_write(dst_inode, new_size);
e87f7329 1242 } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
6e2c64ad 1243
0a2aa8fb
JK
1244 f2fs_put_dnode(&dn);
1245 } else {
1246 struct page *psrc, *pdst;
1247
4d57b86d
CY
1248 psrc = f2fs_get_lock_data_page(src_inode,
1249 src + i, true);
0a2aa8fb
JK
1250 if (IS_ERR(psrc))
1251 return PTR_ERR(psrc);
4d57b86d 1252 pdst = f2fs_get_new_data_page(dst_inode, NULL, dst + i,
0a2aa8fb
JK
1253 true);
1254 if (IS_ERR(pdst)) {
1255 f2fs_put_page(psrc, 1);
1256 return PTR_ERR(pdst);
1257 }
1258 f2fs_copy_page(psrc, pdst);
1259 set_page_dirty(pdst);
1260 f2fs_put_page(pdst, 1);
6e2c64ad 1261 f2fs_put_page(psrc, 1);
b4ace337 1262
4d57b86d
CY
1263 ret = f2fs_truncate_hole(src_inode,
1264 src + i, src + i + 1);
0a2aa8fb
JK
1265 if (ret)
1266 return ret;
1267 i++;
1268 }
6e2c64ad
JK
1269 }
1270 return 0;
0a2aa8fb 1271}
b4ace337 1272
0a2aa8fb
JK
1273static int __exchange_data_block(struct inode *src_inode,
1274 struct inode *dst_inode, pgoff_t src, pgoff_t dst,
363cad7f 1275 pgoff_t len, bool full)
0a2aa8fb
JK
1276{
1277 block_t *src_blkaddr;
1278 int *do_replace;
363cad7f 1279 pgoff_t olen;
0a2aa8fb
JK
1280 int ret;
1281
363cad7f 1282 while (len) {
d02a6e61 1283 olen = min((pgoff_t)4 * ADDRS_PER_BLOCK(src_inode), len);
0a2aa8fb 1284
628b3d14 1285 src_blkaddr = f2fs_kvzalloc(F2FS_I_SB(src_inode),
9d2a789c 1286 array_size(olen, sizeof(block_t)),
4f4460c0 1287 GFP_NOFS);
363cad7f
JK
1288 if (!src_blkaddr)
1289 return -ENOMEM;
0a2aa8fb 1290
628b3d14 1291 do_replace = f2fs_kvzalloc(F2FS_I_SB(src_inode),
9d2a789c 1292 array_size(olen, sizeof(int)),
4f4460c0 1293 GFP_NOFS);
363cad7f
JK
1294 if (!do_replace) {
1295 kvfree(src_blkaddr);
1296 return -ENOMEM;
1297 }
0a2aa8fb 1298
363cad7f
JK
1299 ret = __read_out_blkaddrs(src_inode, src_blkaddr,
1300 do_replace, src, olen);
1301 if (ret)
1302 goto roll_back;
0a2aa8fb 1303
363cad7f
JK
1304 ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
1305 do_replace, src, dst, olen, full);
1306 if (ret)
1307 goto roll_back;
1308
1309 src += olen;
1310 dst += olen;
1311 len -= olen;
1312
1313 kvfree(src_blkaddr);
1314 kvfree(do_replace);
1315 }
0a2aa8fb
JK
1316 return 0;
1317
1318roll_back:
9fd62605 1319 __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, olen);
0a2aa8fb
JK
1320 kvfree(src_blkaddr);
1321 kvfree(do_replace);
6e2c64ad
JK
1322 return ret;
1323}
b4ace337 1324
6f8d4455 1325static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
6e2c64ad
JK
1326{
1327 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
f91108b8 1328 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
6f8d4455
JK
1329 pgoff_t start = offset >> PAGE_SHIFT;
1330 pgoff_t end = (offset + len) >> PAGE_SHIFT;
0a2aa8fb 1331 int ret;
6e2c64ad 1332
0a2aa8fb 1333 f2fs_balance_fs(sbi, true);
5f281fab 1334
6f8d4455
JK
1335 /* avoid gc operation during block exchange */
1336 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1337 down_write(&F2FS_I(inode)->i_mmap_sem);
5f281fab 1338
6f8d4455
JK
1339 f2fs_lock_op(sbi);
1340 f2fs_drop_extent_tree(inode);
1341 truncate_pagecache(inode, offset);
0a2aa8fb
JK
1342 ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
1343 f2fs_unlock_op(sbi);
6f8d4455
JK
1344
1345 up_write(&F2FS_I(inode)->i_mmap_sem);
1346 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
b4ace337
CY
1347 return ret;
1348}
1349
1350static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
1351{
b4ace337
CY
1352 loff_t new_size;
1353 int ret;
1354
b4ace337
CY
1355 if (offset + len >= i_size_read(inode))
1356 return -EINVAL;
1357
1358 /* collapse range should be aligned to block size of f2fs. */
1359 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1360 return -EINVAL;
1361
b9d777b8
JK
1362 ret = f2fs_convert_inline_inode(inode);
1363 if (ret)
1364 return ret;
97a7b2c2 1365
b4ace337
CY
1366 /* write out all dirty pages from offset */
1367 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1368 if (ret)
6f8d4455 1369 return ret;
b4ace337 1370
6f8d4455 1371 ret = f2fs_do_collapse(inode, offset, len);
b4ace337 1372 if (ret)
6f8d4455 1373 return ret;
b4ace337 1374
6e2c64ad 1375 /* write out all moved pages, if possible */
6f8d4455 1376 down_write(&F2FS_I(inode)->i_mmap_sem);
6e2c64ad
JK
1377 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1378 truncate_pagecache(inode, offset);
1379
b4ace337 1380 new_size = i_size_read(inode) - len;
c42d28ce 1381 ret = f2fs_truncate_blocks(inode, new_size, true);
6f8d4455 1382 up_write(&F2FS_I(inode)->i_mmap_sem);
b4ace337 1383 if (!ret)
fc9581c8 1384 f2fs_i_size_write(inode, new_size);
b4ace337
CY
1385 return ret;
1386}
1387
6e961949
CY
1388static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start,
1389 pgoff_t end)
1390{
1391 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1392 pgoff_t index = start;
1393 unsigned int ofs_in_node = dn->ofs_in_node;
1394 blkcnt_t count = 0;
1395 int ret;
1396
1397 for (; index < end; index++, dn->ofs_in_node++) {
a2ced1ce 1398 if (f2fs_data_blkaddr(dn) == NULL_ADDR)
6e961949
CY
1399 count++;
1400 }
1401
1402 dn->ofs_in_node = ofs_in_node;
4d57b86d 1403 ret = f2fs_reserve_new_blocks(dn, count);
6e961949
CY
1404 if (ret)
1405 return ret;
1406
1407 dn->ofs_in_node = ofs_in_node;
1408 for (index = start; index < end; index++, dn->ofs_in_node++) {
a2ced1ce 1409 dn->data_blkaddr = f2fs_data_blkaddr(dn);
6e961949 1410 /*
4d57b86d 1411 * f2fs_reserve_new_blocks will not guarantee entire block
6e961949
CY
1412 * allocation.
1413 */
1414 if (dn->data_blkaddr == NULL_ADDR) {
1415 ret = -ENOSPC;
1416 break;
1417 }
1418 if (dn->data_blkaddr != NEW_ADDR) {
4d57b86d 1419 f2fs_invalidate_blocks(sbi, dn->data_blkaddr);
6e961949 1420 dn->data_blkaddr = NEW_ADDR;
4d57b86d 1421 f2fs_set_data_blkaddr(dn);
6e961949
CY
1422 }
1423 }
1424
1425 f2fs_update_extent_cache_range(dn, start, 0, index - start);
1426
1427 return ret;
1428}
1429
75cd4e09
CY
1430static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
1431 int mode)
1432{
1433 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1434 struct address_space *mapping = inode->i_mapping;
1435 pgoff_t index, pg_start, pg_end;
1436 loff_t new_size = i_size_read(inode);
1437 loff_t off_start, off_end;
1438 int ret = 0;
1439
75cd4e09
CY
1440 ret = inode_newsize_ok(inode, (len + offset));
1441 if (ret)
1442 return ret;
1443
b9d777b8
JK
1444 ret = f2fs_convert_inline_inode(inode);
1445 if (ret)
1446 return ret;
75cd4e09
CY
1447
1448 ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
1449 if (ret)
6f8d4455 1450 return ret;
75cd4e09 1451
09cbfeaf
KS
1452 pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
1453 pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
75cd4e09 1454
09cbfeaf
KS
1455 off_start = offset & (PAGE_SIZE - 1);
1456 off_end = (offset + len) & (PAGE_SIZE - 1);
75cd4e09
CY
1457
1458 if (pg_start == pg_end) {
6394328a
CY
1459 ret = fill_zero(inode, pg_start, off_start,
1460 off_end - off_start);
1461 if (ret)
6f8d4455 1462 return ret;
6394328a 1463
75cd4e09
CY
1464 new_size = max_t(loff_t, new_size, offset + len);
1465 } else {
1466 if (off_start) {
6394328a 1467 ret = fill_zero(inode, pg_start++, off_start,
09cbfeaf 1468 PAGE_SIZE - off_start);
6394328a 1469 if (ret)
6f8d4455 1470 return ret;
6394328a 1471
75cd4e09 1472 new_size = max_t(loff_t, new_size,
09cbfeaf 1473 (loff_t)pg_start << PAGE_SHIFT);
75cd4e09
CY
1474 }
1475
6e961949 1476 for (index = pg_start; index < pg_end;) {
75cd4e09 1477 struct dnode_of_data dn;
6e961949
CY
1478 unsigned int end_offset;
1479 pgoff_t end;
75cd4e09 1480
c7079853 1481 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
6f8d4455 1482 down_write(&F2FS_I(inode)->i_mmap_sem);
c7079853
CY
1483
1484 truncate_pagecache_range(inode,
1485 (loff_t)index << PAGE_SHIFT,
1486 ((loff_t)pg_end << PAGE_SHIFT) - 1);
1487
75cd4e09
CY
1488 f2fs_lock_op(sbi);
1489
6e961949 1490 set_new_dnode(&dn, inode, NULL, NULL, 0);
4d57b86d 1491 ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
75cd4e09
CY
1492 if (ret) {
1493 f2fs_unlock_op(sbi);
6f8d4455 1494 up_write(&F2FS_I(inode)->i_mmap_sem);
c7079853 1495 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
75cd4e09
CY
1496 goto out;
1497 }
1498
6e961949
CY
1499 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
1500 end = min(pg_end, end_offset - dn.ofs_in_node + index);
1501
1502 ret = f2fs_do_zero_range(&dn, index, end);
75cd4e09 1503 f2fs_put_dnode(&dn);
c7079853 1504
75cd4e09 1505 f2fs_unlock_op(sbi);
6f8d4455 1506 up_write(&F2FS_I(inode)->i_mmap_sem);
c7079853 1507 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
9434fcde
CY
1508
1509 f2fs_balance_fs(sbi, dn.node_changed);
1510
6e961949
CY
1511 if (ret)
1512 goto out;
75cd4e09 1513
6e961949 1514 index = end;
75cd4e09 1515 new_size = max_t(loff_t, new_size,
6e961949 1516 (loff_t)index << PAGE_SHIFT);
75cd4e09
CY
1517 }
1518
1519 if (off_end) {
6394328a
CY
1520 ret = fill_zero(inode, pg_end, 0, off_end);
1521 if (ret)
1522 goto out;
1523
75cd4e09
CY
1524 new_size = max_t(loff_t, new_size, offset + len);
1525 }
1526 }
1527
1528out:
17cd07ae
CY
1529 if (new_size > i_size_read(inode)) {
1530 if (mode & FALLOC_FL_KEEP_SIZE)
1531 file_set_keep_isize(inode);
1532 else
1533 f2fs_i_size_write(inode, new_size);
1534 }
75cd4e09
CY
1535 return ret;
1536}
1537
f62185d0
CY
1538static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1539{
1540 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
0a2aa8fb 1541 pgoff_t nr, pg_start, pg_end, delta, idx;
f62185d0 1542 loff_t new_size;
6e2c64ad 1543 int ret = 0;
f62185d0 1544
f62185d0 1545 new_size = i_size_read(inode) + len;
46e82fb1
KM
1546 ret = inode_newsize_ok(inode, new_size);
1547 if (ret)
1548 return ret;
f62185d0
CY
1549
1550 if (offset >= i_size_read(inode))
1551 return -EINVAL;
1552
1553 /* insert range should be aligned to block size of f2fs. */
1554 if (offset & (F2FS_BLKSIZE - 1) || len & (F2FS_BLKSIZE - 1))
1555 return -EINVAL;
1556
b9d777b8
JK
1557 ret = f2fs_convert_inline_inode(inode);
1558 if (ret)
1559 return ret;
97a7b2c2 1560
2c4db1a6 1561 f2fs_balance_fs(sbi, true);
2a340760 1562
5a3a2d83 1563 down_write(&F2FS_I(inode)->i_mmap_sem);
c42d28ce 1564 ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
6f8d4455 1565 up_write(&F2FS_I(inode)->i_mmap_sem);
f62185d0 1566 if (ret)
6f8d4455 1567 return ret;
f62185d0
CY
1568
1569 /* write out all dirty pages from offset */
1570 ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1571 if (ret)
6f8d4455 1572 return ret;
f62185d0 1573
09cbfeaf
KS
1574 pg_start = offset >> PAGE_SHIFT;
1575 pg_end = (offset + len) >> PAGE_SHIFT;
f62185d0 1576 delta = pg_end - pg_start;
f91108b8 1577 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
0a2aa8fb 1578
6f8d4455
JK
1579 /* avoid gc operation during block exchange */
1580 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
1581 down_write(&F2FS_I(inode)->i_mmap_sem);
1582 truncate_pagecache(inode, offset);
1583
0a2aa8fb
JK
1584 while (!ret && idx > pg_start) {
1585 nr = idx - pg_start;
1586 if (nr > delta)
1587 nr = delta;
1588 idx -= nr;
f62185d0 1589
f62185d0 1590 f2fs_lock_op(sbi);
5f281fab
JK
1591 f2fs_drop_extent_tree(inode);
1592
0a2aa8fb
JK
1593 ret = __exchange_data_block(inode, inode, idx,
1594 idx + delta, nr, false);
f62185d0
CY
1595 f2fs_unlock_op(sbi);
1596 }
6f8d4455
JK
1597 up_write(&F2FS_I(inode)->i_mmap_sem);
1598 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f62185d0 1599
6e2c64ad 1600 /* write out all moved pages, if possible */
6f8d4455 1601 down_write(&F2FS_I(inode)->i_mmap_sem);
6e2c64ad
JK
1602 filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
1603 truncate_pagecache(inode, offset);
6f8d4455 1604 up_write(&F2FS_I(inode)->i_mmap_sem);
6e2c64ad
JK
1605
1606 if (!ret)
fc9581c8 1607 f2fs_i_size_write(inode, new_size);
f62185d0
CY
1608 return ret;
1609}
1610
fbfa2cc5
JK
1611static int expand_inode_data(struct inode *inode, loff_t offset,
1612 loff_t len, int mode)
1613{
4081363f 1614 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
d5097be5 1615 struct f2fs_map_blocks map = { .m_next_pgofs = NULL,
f9d6d059
CY
1616 .m_next_extent = NULL, .m_seg_type = NO_CHECK_TYPE,
1617 .m_may_create = true };
e12dd7bd 1618 pgoff_t pg_end;
fbfa2cc5 1619 loff_t new_size = i_size_read(inode);
e12dd7bd 1620 loff_t off_end;
a7de6086 1621 int err;
fbfa2cc5 1622
a7de6086
JK
1623 err = inode_newsize_ok(inode, (len + offset));
1624 if (err)
1625 return err;
fbfa2cc5 1626
a7de6086
JK
1627 err = f2fs_convert_inline_inode(inode);
1628 if (err)
1629 return err;
9e09fc85 1630
2c4db1a6 1631 f2fs_balance_fs(sbi, true);
2a340760 1632
e12dd7bd 1633 pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT;
09cbfeaf 1634 off_end = (offset + len) & (PAGE_SIZE - 1);
fbfa2cc5 1635
e12dd7bd
JK
1636 map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT;
1637 map.m_len = pg_end - map.m_lblk;
1638 if (off_end)
1639 map.m_len++;
ead43275 1640
f5a53edc
JK
1641 if (!map.m_len)
1642 return 0;
1643
1644 if (f2fs_is_pinned_file(inode)) {
1645 block_t len = (map.m_len >> sbi->log_blocks_per_seg) <<
1646 sbi->log_blocks_per_seg;
1647 block_t done = 0;
1648
1649 if (map.m_len % sbi->blocks_per_seg)
1650 len += sbi->blocks_per_seg;
1651
1652 map.m_len = sbi->blocks_per_seg;
1653next_alloc:
1654 if (has_not_enough_free_secs(sbi, 0,
1655 GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) {
fb24fea7 1656 down_write(&sbi->gc_lock);
f5a53edc
JK
1657 err = f2fs_gc(sbi, true, false, NULL_SEGNO);
1658 if (err && err != -ENODATA && err != -EAGAIN)
1659 goto out_err;
1660 }
1661
1662 down_write(&sbi->pin_sem);
fd612648
DJ
1663
1664 f2fs_lock_op(sbi);
d0b9e42a 1665 f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA_PINNED);
fd612648
DJ
1666 f2fs_unlock_op(sbi);
1667
d0b9e42a 1668 map.m_seg_type = CURSEG_COLD_DATA_PINNED;
f5a53edc 1669 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
d0b9e42a 1670
f5a53edc 1671 up_write(&sbi->pin_sem);
cad3836f 1672
f5a53edc
JK
1673 done += map.m_len;
1674 len -= map.m_len;
1675 map.m_lblk += map.m_len;
1676 if (!err && len)
1677 goto next_alloc;
1678
1679 map.m_len = done;
1680 } else {
1681 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO);
1682 }
1683out_err:
a7de6086 1684 if (err) {
e12dd7bd 1685 pgoff_t last_off;
fbfa2cc5 1686
e12dd7bd 1687 if (!map.m_len)
a7de6086 1688 return err;
98397ff3 1689
e12dd7bd
JK
1690 last_off = map.m_lblk + map.m_len - 1;
1691
1692 /* update new size to the failed position */
1061fd48 1693 new_size = (last_off == pg_end) ? offset + len :
e12dd7bd
JK
1694 (loff_t)(last_off + 1) << PAGE_SHIFT;
1695 } else {
1696 new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
fbfa2cc5
JK
1697 }
1698
e8ed90a6
CY
1699 if (new_size > i_size_read(inode)) {
1700 if (mode & FALLOC_FL_KEEP_SIZE)
1701 file_set_keep_isize(inode);
1702 else
1703 f2fs_i_size_write(inode, new_size);
1704 }
fbfa2cc5 1705
a7de6086 1706 return err;
fbfa2cc5
JK
1707}
1708
1709static long f2fs_fallocate(struct file *file, int mode,
1710 loff_t offset, loff_t len)
1711{
6131ffaa 1712 struct inode *inode = file_inode(file);
587c0a42 1713 long ret = 0;
fbfa2cc5 1714
1f227a3e
JK
1715 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
1716 return -EIO;
00e09c0b
CY
1717 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(inode)))
1718 return -ENOSPC;
4c8ff709
CY
1719 if (!f2fs_is_compress_backend_ready(inode))
1720 return -EOPNOTSUPP;
1f227a3e 1721
c998012b
CY
1722 /* f2fs only support ->fallocate for regular file */
1723 if (!S_ISREG(inode->i_mode))
1724 return -EINVAL;
1725
62230e0d 1726 if (IS_ENCRYPTED(inode) &&
f62185d0 1727 (mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
fcc85a4d
JK
1728 return -EOPNOTSUPP;
1729
4c8ff709
CY
1730 if (f2fs_compressed_file(inode) &&
1731 (mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
1732 FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
1733 return -EOPNOTSUPP;
1734
b4ace337 1735 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
f62185d0
CY
1736 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE |
1737 FALLOC_FL_INSERT_RANGE))
fbfa2cc5
JK
1738 return -EOPNOTSUPP;
1739
5955102c 1740 inode_lock(inode);
3375f696 1741
587c0a42
TY
1742 if (mode & FALLOC_FL_PUNCH_HOLE) {
1743 if (offset >= inode->i_size)
1744 goto out;
1745
a66c7b2f 1746 ret = punch_hole(inode, offset, len);
b4ace337
CY
1747 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
1748 ret = f2fs_collapse_range(inode, offset, len);
75cd4e09
CY
1749 } else if (mode & FALLOC_FL_ZERO_RANGE) {
1750 ret = f2fs_zero_range(inode, offset, len, mode);
f62185d0
CY
1751 } else if (mode & FALLOC_FL_INSERT_RANGE) {
1752 ret = f2fs_insert_range(inode, offset, len);
b4ace337 1753 } else {
fbfa2cc5 1754 ret = expand_inode_data(inode, offset, len, mode);
b4ace337 1755 }
fbfa2cc5 1756
3af60a49 1757 if (!ret) {
078cd827 1758 inode->i_mtime = inode->i_ctime = current_time(inode);
7c45729a 1759 f2fs_mark_inode_dirty_sync(inode, false);
d0239e1b 1760 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3af60a49 1761 }
3375f696 1762
587c0a42 1763out:
5955102c 1764 inode_unlock(inode);
3375f696 1765
c01e2853 1766 trace_f2fs_fallocate(inode, mode, offset, len, ret);
fbfa2cc5
JK
1767 return ret;
1768}
1769
1e84371f
JK
1770static int f2fs_release_file(struct inode *inode, struct file *filp)
1771{
de5307e4
JK
1772 /*
1773 * f2fs_relase_file is called at every close calls. So we should
1774 * not drop any inmemory pages by close called by other process.
1775 */
1776 if (!(filp->f_mode & FMODE_WRITE) ||
1777 atomic_read(&inode->i_writecount) != 1)
1778 return 0;
1779
1e84371f
JK
1780 /* some remained atomic pages should discarded */
1781 if (f2fs_is_atomic_file(inode))
4d57b86d 1782 f2fs_drop_inmem_pages(inode);
1e84371f 1783 if (f2fs_is_volatile_file(inode)) {
91942321 1784 set_inode_flag(inode, FI_DROP_CACHE);
1e84371f 1785 filemap_fdatawrite(inode->i_mapping);
91942321 1786 clear_inode_flag(inode, FI_DROP_CACHE);
dfa74280
CY
1787 clear_inode_flag(inode, FI_VOLATILE_FILE);
1788 stat_dec_volatile_write(inode);
1e84371f
JK
1789 }
1790 return 0;
1791}
1792
7a10f017 1793static int f2fs_file_flush(struct file *file, fl_owner_t id)
fbfa2cc5 1794{
7a10f017
JK
1795 struct inode *inode = file_inode(file);
1796
1797 /*
1798 * If the process doing a transaction is crashed, we should do
1799 * roll-back. Otherwise, other reader/write can see corrupted database
1800 * until all the writers close its file. Since this should be done
1801 * before dropping file lock, it needs to do in ->flush.
1802 */
1803 if (f2fs_is_atomic_file(inode) &&
1804 F2FS_I(inode)->inmem_task == current)
4d57b86d 1805 f2fs_drop_inmem_pages(inode);
7a10f017 1806 return 0;
fbfa2cc5
JK
1807}
1808
36098557 1809static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
2c1d0305
CY
1810{
1811 struct f2fs_inode_info *fi = F2FS_I(inode);
99eabb91
JK
1812 u32 masked_flags = fi->i_flags & mask;
1813
1814 f2fs_bug_on(F2FS_I_SB(inode), (iflags & ~mask));
2c1d0305
CY
1815
1816 /* Is it quota file? Do not allow user to mess with it */
1817 if (IS_NOQUOTA(inode))
1818 return -EPERM;
1819
99eabb91 1820 if ((iflags ^ masked_flags) & F2FS_CASEFOLD_FL) {
2c2eb7a3
DR
1821 if (!f2fs_sb_has_casefold(F2FS_I_SB(inode)))
1822 return -EOPNOTSUPP;
1823 if (!f2fs_empty_dir(inode))
1824 return -ENOTEMPTY;
1825 }
1826
4c8ff709
CY
1827 if (iflags & (F2FS_COMPR_FL | F2FS_NOCOMP_FL)) {
1828 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
1829 return -EOPNOTSUPP;
1830 if ((iflags & F2FS_COMPR_FL) && (iflags & F2FS_NOCOMP_FL))
1831 return -EINVAL;
1832 }
1833
99eabb91 1834 if ((iflags ^ masked_flags) & F2FS_COMPR_FL) {
aa576970 1835 if (masked_flags & F2FS_COMPR_FL) {
78134d03 1836 if (!f2fs_disable_compressed_file(inode))
2536ac68
CY
1837 return -EINVAL;
1838 }
4c8ff709
CY
1839 if (iflags & F2FS_NOCOMP_FL)
1840 return -EINVAL;
1841 if (iflags & F2FS_COMPR_FL) {
4c8ff709
CY
1842 if (!f2fs_may_compress(inode))
1843 return -EINVAL;
519a5a2f
CY
1844 if (S_ISREG(inode->i_mode) && inode->i_size)
1845 return -EINVAL;
4c8ff709
CY
1846
1847 set_compress_context(inode);
1848 }
1849 }
99eabb91
JK
1850 if ((iflags ^ masked_flags) & F2FS_NOCOMP_FL) {
1851 if (masked_flags & F2FS_COMPR_FL)
4c8ff709
CY
1852 return -EINVAL;
1853 }
1854
d5e5efa2 1855 fi->i_flags = iflags | (fi->i_flags & ~mask);
4c8ff709
CY
1856 f2fs_bug_on(F2FS_I_SB(inode), (fi->i_flags & F2FS_COMPR_FL) &&
1857 (fi->i_flags & F2FS_NOCOMP_FL));
2c1d0305 1858
59c84408 1859 if (fi->i_flags & F2FS_PROJINHERIT_FL)
2c1d0305
CY
1860 set_inode_flag(inode, FI_PROJ_INHERIT);
1861 else
1862 clear_inode_flag(inode, FI_PROJ_INHERIT);
1863
1864 inode->i_ctime = current_time(inode);
1865 f2fs_set_inode_flags(inode);
b32e0190 1866 f2fs_mark_inode_dirty_sync(inode, true);
2c1d0305
CY
1867 return 0;
1868}
1869
36098557
EB
1870/* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1871
1872/*
1873 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1874 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1875 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1876 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1877 */
1878
1879static const struct {
1880 u32 iflag;
1881 u32 fsflag;
1882} f2fs_fsflags_map[] = {
4c8ff709 1883 { F2FS_COMPR_FL, FS_COMPR_FL },
36098557
EB
1884 { F2FS_SYNC_FL, FS_SYNC_FL },
1885 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1886 { F2FS_APPEND_FL, FS_APPEND_FL },
1887 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1888 { F2FS_NOATIME_FL, FS_NOATIME_FL },
4c8ff709 1889 { F2FS_NOCOMP_FL, FS_NOCOMP_FL },
36098557
EB
1890 { F2FS_INDEX_FL, FS_INDEX_FL },
1891 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1892 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
2c2eb7a3 1893 { F2FS_CASEFOLD_FL, FS_CASEFOLD_FL },
36098557
EB
1894};
1895
1896#define F2FS_GETTABLE_FS_FL ( \
4c8ff709 1897 FS_COMPR_FL | \
36098557
EB
1898 FS_SYNC_FL | \
1899 FS_IMMUTABLE_FL | \
1900 FS_APPEND_FL | \
1901 FS_NODUMP_FL | \
1902 FS_NOATIME_FL | \
4c8ff709 1903 FS_NOCOMP_FL | \
36098557
EB
1904 FS_INDEX_FL | \
1905 FS_DIRSYNC_FL | \
1906 FS_PROJINHERIT_FL | \
1907 FS_ENCRYPT_FL | \
1908 FS_INLINE_DATA_FL | \
95ae251f 1909 FS_NOCOW_FL | \
fbc246a1 1910 FS_VERITY_FL | \
2c2eb7a3 1911 FS_CASEFOLD_FL)
36098557
EB
1912
1913#define F2FS_SETTABLE_FS_FL ( \
4c8ff709 1914 FS_COMPR_FL | \
36098557
EB
1915 FS_SYNC_FL | \
1916 FS_IMMUTABLE_FL | \
1917 FS_APPEND_FL | \
1918 FS_NODUMP_FL | \
1919 FS_NOATIME_FL | \
4c8ff709 1920 FS_NOCOMP_FL | \
36098557 1921 FS_DIRSYNC_FL | \
2c2eb7a3
DR
1922 FS_PROJINHERIT_FL | \
1923 FS_CASEFOLD_FL)
36098557
EB
1924
1925/* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1926static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1927{
1928 u32 fsflags = 0;
1929 int i;
1930
1931 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1932 if (iflags & f2fs_fsflags_map[i].iflag)
1933 fsflags |= f2fs_fsflags_map[i].fsflag;
1934
1935 return fsflags;
1936}
1937
1938/* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1939static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1940{
1941 u32 iflags = 0;
1942 int i;
1943
1944 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1945 if (fsflags & f2fs_fsflags_map[i].fsflag)
1946 iflags |= f2fs_fsflags_map[i].iflag;
1947
1948 return iflags;
1949}
1950
1951static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1952{
1953 struct inode *inode = file_inode(filp);
1954 struct f2fs_inode_info *fi = F2FS_I(inode);
1955 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1956
1957 if (IS_ENCRYPTED(inode))
1958 fsflags |= FS_ENCRYPT_FL;
95ae251f
EB
1959 if (IS_VERITY(inode))
1960 fsflags |= FS_VERITY_FL;
36098557
EB
1961 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1962 fsflags |= FS_INLINE_DATA_FL;
1963 if (is_inode_flag_set(inode, FI_PIN_FILE))
1964 fsflags |= FS_NOCOW_FL;
1965
1966 fsflags &= F2FS_GETTABLE_FS_FL;
1967
1968 return put_user(fsflags, (int __user *)arg);
1969}
1970
52656e6c
JK
1971static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1972{
1973 struct inode *inode = file_inode(filp);
a1f32eec
EB
1974 struct f2fs_inode_info *fi = F2FS_I(inode);
1975 u32 fsflags, old_fsflags;
36098557 1976 u32 iflags;
52656e6c 1977 int ret;
fbfa2cc5 1978
7fb17fe4
CY
1979 if (!inode_owner_or_capable(inode))
1980 return -EACCES;
1981
36098557 1982 if (get_user(fsflags, (int __user *)arg))
7fb17fe4
CY
1983 return -EFAULT;
1984
36098557
EB
1985 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1986 return -EOPNOTSUPP;
1987 fsflags &= F2FS_SETTABLE_FS_FL;
1988
1989 iflags = f2fs_fsflags_to_iflags(fsflags);
1990 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1991 return -EOPNOTSUPP;
1992
52656e6c
JK
1993 ret = mnt_want_write_file(filp);
1994 if (ret)
1995 return ret;
fbfa2cc5 1996
5955102c 1997 inode_lock(inode);
fbfa2cc5 1998
a1f32eec
EB
1999 old_fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
2000 ret = vfs_ioc_setflags_prepare(inode, old_fsflags, fsflags);
2001 if (ret)
2002 goto out;
2003
36098557
EB
2004 ret = f2fs_setflags_common(inode, iflags,
2005 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
a1f32eec 2006out:
a72d4b97 2007 inode_unlock(inode);
52656e6c
JK
2008 mnt_drop_write_file(filp);
2009 return ret;
2010}
4b2fecc8 2011
d49f3e89
CY
2012static int f2fs_ioc_getversion(struct file *filp, unsigned long arg)
2013{
2014 struct inode *inode = file_inode(filp);
2015
2016 return put_user(inode->i_generation, (int __user *)arg);
2017}
2018
88b88a66
JK
2019static int f2fs_ioc_start_atomic_write(struct file *filp)
2020{
2021 struct inode *inode = file_inode(filp);
743b620c
JK
2022 struct f2fs_inode_info *fi = F2FS_I(inode);
2023 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
f4c9c743 2024 int ret;
88b88a66
JK
2025
2026 if (!inode_owner_or_capable(inode))
2027 return -EACCES;
2028
e811898c
JK
2029 if (!S_ISREG(inode->i_mode))
2030 return -EINVAL;
2031
038d0698
CY
2032 if (filp->f_flags & O_DIRECT)
2033 return -EINVAL;
2034
7fb17fe4
CY
2035 ret = mnt_want_write_file(filp);
2036 if (ret)
2037 return ret;
2038
0fac558b
CY
2039 inode_lock(inode);
2040
4c8ff709
CY
2041 f2fs_disable_compressed_file(inode);
2042
455e3a58
JK
2043 if (f2fs_is_atomic_file(inode)) {
2044 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
2045 ret = -EINVAL;
7fb17fe4 2046 goto out;
455e3a58 2047 }
88b88a66 2048
f4c9c743
CY
2049 ret = f2fs_convert_inline_inode(inode);
2050 if (ret)
7fb17fe4 2051 goto out;
88b88a66 2052
6f8d4455
JK
2053 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
2054
31867b23
JK
2055 /*
2056 * Should wait end_io to count F2FS_WB_CP_DATA correctly by
2057 * f2fs_is_atomic_file.
2058 */
2059 if (get_dirty_pages(inode))
dcbb4c10
JP
2060 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
2061 inode->i_ino, get_dirty_pages(inode));
c27753d6 2062 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
6f8d4455
JK
2063 if (ret) {
2064 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
684ca7e5 2065 goto out;
6f8d4455 2066 }
31867b23 2067
743b620c
JK
2068 spin_lock(&sbi->inode_lock[ATOMIC_FILE]);
2069 if (list_empty(&fi->inmem_ilist))
2070 list_add_tail(&fi->inmem_ilist, &sbi->inode_list[ATOMIC_FILE]);
677017d1 2071 sbi->atomic_files++;
743b620c
JK
2072 spin_unlock(&sbi->inode_lock[ATOMIC_FILE]);
2073
2074 /* add inode in inmem_list first and set atomic_file */
054afda9 2075 set_inode_flag(inode, FI_ATOMIC_FILE);
2ef79ecb 2076 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
6f8d4455 2077 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
684ca7e5 2078
6f8d4455 2079 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
7a10f017 2080 F2FS_I(inode)->inmem_task = current;
26a28a0c 2081 stat_update_max_atomic_write(inode);
684ca7e5 2082out:
0fac558b 2083 inode_unlock(inode);
7fb17fe4 2084 mnt_drop_write_file(filp);
c27753d6 2085 return ret;
88b88a66
JK
2086}
2087
2088static int f2fs_ioc_commit_atomic_write(struct file *filp)
2089{
2090 struct inode *inode = file_inode(filp);
2091 int ret;
2092
2093 if (!inode_owner_or_capable(inode))
2094 return -EACCES;
2095
2096 ret = mnt_want_write_file(filp);
2097 if (ret)
2098 return ret;
2099
6f8d4455 2100 f2fs_balance_fs(F2FS_I_SB(inode), true);
0fac558b 2101
6f8d4455 2102 inode_lock(inode);
1dc0f899 2103
b169c3c5
CY
2104 if (f2fs_is_volatile_file(inode)) {
2105 ret = -EINVAL;
7fb17fe4 2106 goto err_out;
b169c3c5 2107 }
7fb17fe4 2108
6282adbf 2109 if (f2fs_is_atomic_file(inode)) {
4d57b86d 2110 ret = f2fs_commit_inmem_pages(inode);
5fe45743 2111 if (ret)
edb27dee 2112 goto err_out;
5fe45743 2113
26a28a0c 2114 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
743b620c
JK
2115 if (!ret)
2116 f2fs_drop_inmem_pages(inode);
26a28a0c 2117 } else {
774e1b78 2118 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 1, false);
6282adbf 2119 }
edb27dee 2120err_out:
2ef79ecb
CY
2121 if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
2122 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2123 ret = -EINVAL;
2124 }
0fac558b 2125 inode_unlock(inode);
88b88a66
JK
2126 mnt_drop_write_file(filp);
2127 return ret;
2128}
2129
02a1335f
JK
2130static int f2fs_ioc_start_volatile_write(struct file *filp)
2131{
2132 struct inode *inode = file_inode(filp);
f4c9c743 2133 int ret;
02a1335f
JK
2134
2135 if (!inode_owner_or_capable(inode))
2136 return -EACCES;
2137
8ff0971f
CY
2138 if (!S_ISREG(inode->i_mode))
2139 return -EINVAL;
2140
7fb17fe4
CY
2141 ret = mnt_want_write_file(filp);
2142 if (ret)
2143 return ret;
2144
0fac558b
CY
2145 inode_lock(inode);
2146
1e84371f 2147 if (f2fs_is_volatile_file(inode))
7fb17fe4 2148 goto out;
1e84371f 2149
f4c9c743
CY
2150 ret = f2fs_convert_inline_inode(inode);
2151 if (ret)
7fb17fe4 2152 goto out;
b3d208f9 2153
648d50ba
CY
2154 stat_inc_volatile_write(inode);
2155 stat_update_max_volatile_write(inode);
2156
91942321 2157 set_inode_flag(inode, FI_VOLATILE_FILE);
d0239e1b 2158 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
7fb17fe4 2159out:
0fac558b 2160 inode_unlock(inode);
7fb17fe4
CY
2161 mnt_drop_write_file(filp);
2162 return ret;
02a1335f
JK
2163}
2164
1e84371f
JK
2165static int f2fs_ioc_release_volatile_write(struct file *filp)
2166{
2167 struct inode *inode = file_inode(filp);
7fb17fe4 2168 int ret;
1e84371f
JK
2169
2170 if (!inode_owner_or_capable(inode))
2171 return -EACCES;
2172
7fb17fe4
CY
2173 ret = mnt_want_write_file(filp);
2174 if (ret)
2175 return ret;
2176
0fac558b
CY
2177 inode_lock(inode);
2178
1e84371f 2179 if (!f2fs_is_volatile_file(inode))
7fb17fe4 2180 goto out;
1e84371f 2181
7fb17fe4
CY
2182 if (!f2fs_is_first_block_written(inode)) {
2183 ret = truncate_partial_data_page(inode, 0, true);
2184 goto out;
2185 }
3c6c2beb 2186
7fb17fe4
CY
2187 ret = punch_hole(inode, 0, F2FS_BLKSIZE);
2188out:
0fac558b 2189 inode_unlock(inode);
7fb17fe4
CY
2190 mnt_drop_write_file(filp);
2191 return ret;
1e84371f
JK
2192}
2193
2194static int f2fs_ioc_abort_volatile_write(struct file *filp)
2195{
2196 struct inode *inode = file_inode(filp);
2197 int ret;
2198
2199 if (!inode_owner_or_capable(inode))
2200 return -EACCES;
2201
2202 ret = mnt_want_write_file(filp);
2203 if (ret)
2204 return ret;
2205
0fac558b
CY
2206 inode_lock(inode);
2207
26dc3d44 2208 if (f2fs_is_atomic_file(inode))
4d57b86d 2209 f2fs_drop_inmem_pages(inode);
732d5648 2210 if (f2fs_is_volatile_file(inode)) {
91942321 2211 clear_inode_flag(inode, FI_VOLATILE_FILE);
648d50ba 2212 stat_dec_volatile_write(inode);
608514de 2213 ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
732d5648 2214 }
de6a8ec9 2215
455e3a58
JK
2216 clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
2217
0fac558b
CY
2218 inode_unlock(inode);
2219
1e84371f 2220 mnt_drop_write_file(filp);
d0239e1b 2221 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
1e84371f
JK
2222 return ret;
2223}
2224
1abff93d
JK
2225static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
2226{
2227 struct inode *inode = file_inode(filp);
2228 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2229 struct super_block *sb = sbi->sb;
2230 __u32 in;
2a96d8ad 2231 int ret = 0;
1abff93d
JK
2232
2233 if (!capable(CAP_SYS_ADMIN))
2234 return -EPERM;
2235
2236 if (get_user(in, (__u32 __user *)arg))
2237 return -EFAULT;
2238
60b2b4ee
ST
2239 if (in != F2FS_GOING_DOWN_FULLSYNC) {
2240 ret = mnt_want_write_file(filp);
8626441f
CY
2241 if (ret) {
2242 if (ret == -EROFS) {
2243 ret = 0;
2244 f2fs_stop_checkpoint(sbi, false);
2245 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2246 trace_f2fs_shutdown(sbi, in, ret);
2247 }
60b2b4ee 2248 return ret;
8626441f 2249 }
60b2b4ee 2250 }
7fb17fe4 2251
1abff93d
JK
2252 switch (in) {
2253 case F2FS_GOING_DOWN_FULLSYNC:
040f04bd
CH
2254 ret = freeze_bdev(sb->s_bdev);
2255 if (ret)
d027c484 2256 goto out;
040f04bd
CH
2257 f2fs_stop_checkpoint(sbi, false);
2258 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
2259 thaw_bdev(sb->s_bdev);
1abff93d
JK
2260 break;
2261 case F2FS_GOING_DOWN_METASYNC:
2262 /* do checkpoint only */
d027c484
CY
2263 ret = f2fs_sync_fs(sb, 1);
2264 if (ret)
2265 goto out;
38f91ca8 2266 f2fs_stop_checkpoint(sbi, false);
83a3bfdb 2267 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1abff93d
JK
2268 break;
2269 case F2FS_GOING_DOWN_NOSYNC:
38f91ca8 2270 f2fs_stop_checkpoint(sbi, false);
83a3bfdb 2271 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
1abff93d 2272 break;
c912a829 2273 case F2FS_GOING_DOWN_METAFLUSH:
4d57b86d 2274 f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
38f91ca8 2275 f2fs_stop_checkpoint(sbi, false);
83a3bfdb 2276 set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
c912a829 2277 break;
0cd6d9b0
JK
2278 case F2FS_GOING_DOWN_NEED_FSCK:
2279 set_sbi_flag(sbi, SBI_NEED_FSCK);
db610a64
JK
2280 set_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
2281 set_sbi_flag(sbi, SBI_IS_DIRTY);
0cd6d9b0
JK
2282 /* do checkpoint only */
2283 ret = f2fs_sync_fs(sb, 1);
db610a64 2284 goto out;
1abff93d 2285 default:
7fb17fe4
CY
2286 ret = -EINVAL;
2287 goto out;
1abff93d 2288 }
7950e9ac 2289
4d57b86d
CY
2290 f2fs_stop_gc_thread(sbi);
2291 f2fs_stop_discard_thread(sbi);
7950e9ac 2292
4d57b86d 2293 f2fs_drop_discard_cmd(sbi);
7950e9ac
CY
2294 clear_opt(sbi, DISCARD);
2295
d0239e1b 2296 f2fs_update_time(sbi, REQ_TIME);
7fb17fe4 2297out:
60b2b4ee
ST
2298 if (in != F2FS_GOING_DOWN_FULLSYNC)
2299 mnt_drop_write_file(filp);
559e87c4
CY
2300
2301 trace_f2fs_shutdown(sbi, in, ret);
2302
7fb17fe4 2303 return ret;
1abff93d
JK
2304}
2305
52656e6c
JK
2306static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg)
2307{
2308 struct inode *inode = file_inode(filp);
2309 struct super_block *sb = inode->i_sb;
2310 struct request_queue *q = bdev_get_queue(sb->s_bdev);
2311 struct fstrim_range range;
2312 int ret;
4b2fecc8 2313
52656e6c
JK
2314 if (!capable(CAP_SYS_ADMIN))
2315 return -EPERM;
4b2fecc8 2316
7d20c8ab 2317 if (!f2fs_hw_support_discard(F2FS_SB(sb)))
52656e6c 2318 return -EOPNOTSUPP;
4b2fecc8 2319
52656e6c
JK
2320 if (copy_from_user(&range, (struct fstrim_range __user *)arg,
2321 sizeof(range)))
2322 return -EFAULT;
4b2fecc8 2323
7fb17fe4
CY
2324 ret = mnt_want_write_file(filp);
2325 if (ret)
2326 return ret;
2327
52656e6c
JK
2328 range.minlen = max((unsigned int)range.minlen,
2329 q->limits.discard_granularity);
2330 ret = f2fs_trim_fs(F2FS_SB(sb), &range);
7fb17fe4 2331 mnt_drop_write_file(filp);
52656e6c
JK
2332 if (ret < 0)
2333 return ret;
4b2fecc8 2334
52656e6c
JK
2335 if (copy_to_user((struct fstrim_range __user *)arg, &range,
2336 sizeof(range)))
2337 return -EFAULT;
d0239e1b 2338 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
52656e6c
JK
2339 return 0;
2340}
2341
f424f664
JK
2342static bool uuid_is_nonzero(__u8 u[16])
2343{
2344 int i;
2345
2346 for (i = 0; i < 16; i++)
2347 if (u[i])
2348 return true;
2349 return false;
2350}
2351
2352static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg)
2353{
f424f664
JK
2354 struct inode *inode = file_inode(filp);
2355
7beb01f7 2356 if (!f2fs_sb_has_encrypt(F2FS_I_SB(inode)))
ead710b7
CY
2357 return -EOPNOTSUPP;
2358
d0239e1b 2359 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
7fb17fe4 2360
db717d8e 2361 return fscrypt_ioctl_set_policy(filp, (const void __user *)arg);
f424f664
JK
2362}
2363
2364static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg)
2365{
7beb01f7 2366 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
ead710b7 2367 return -EOPNOTSUPP;
db717d8e 2368 return fscrypt_ioctl_get_policy(filp, (void __user *)arg);
f424f664
JK
2369}
2370
2371static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg)
2372{
2373 struct inode *inode = file_inode(filp);
2374 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2375 int err;
2376
7beb01f7 2377 if (!f2fs_sb_has_encrypt(sbi))
f424f664
JK
2378 return -EOPNOTSUPP;
2379
f424f664
JK
2380 err = mnt_want_write_file(filp);
2381 if (err)
2382 return err;
2383
846ae671 2384 down_write(&sbi->sb_lock);
d0d3f1b3
CY
2385
2386 if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt))
2387 goto got_it;
2388
f424f664
JK
2389 /* update superblock with uuid */
2390 generate_random_uuid(sbi->raw_super->encrypt_pw_salt);
2391
c5bda1c8 2392 err = f2fs_commit_super(sbi, false);
f424f664
JK
2393 if (err) {
2394 /* undo new data */
2395 memset(sbi->raw_super->encrypt_pw_salt, 0, 16);
d0d3f1b3 2396 goto out_err;
f424f664
JK
2397 }
2398got_it:
2399 if (copy_to_user((__u8 __user *)arg, sbi->raw_super->encrypt_pw_salt,
2400 16))
d0d3f1b3
CY
2401 err = -EFAULT;
2402out_err:
846ae671 2403 up_write(&sbi->sb_lock);
d0d3f1b3
CY
2404 mnt_drop_write_file(filp);
2405 return err;
f424f664
JK
2406}
2407
8ce589c7
EB
2408static int f2fs_ioc_get_encryption_policy_ex(struct file *filp,
2409 unsigned long arg)
2410{
2411 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2412 return -EOPNOTSUPP;
2413
2414 return fscrypt_ioctl_get_policy_ex(filp, (void __user *)arg);
2415}
2416
2417static int f2fs_ioc_add_encryption_key(struct file *filp, unsigned long arg)
2418{
2419 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2420 return -EOPNOTSUPP;
2421
2422 return fscrypt_ioctl_add_key(filp, (void __user *)arg);
2423}
2424
2425static int f2fs_ioc_remove_encryption_key(struct file *filp, unsigned long arg)
2426{
2427 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2428 return -EOPNOTSUPP;
2429
2430 return fscrypt_ioctl_remove_key(filp, (void __user *)arg);
2431}
2432
2433static int f2fs_ioc_remove_encryption_key_all_users(struct file *filp,
2434 unsigned long arg)
2435{
2436 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2437 return -EOPNOTSUPP;
2438
2439 return fscrypt_ioctl_remove_key_all_users(filp, (void __user *)arg);
2440}
2441
2442static int f2fs_ioc_get_encryption_key_status(struct file *filp,
2443 unsigned long arg)
2444{
2445 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2446 return -EOPNOTSUPP;
2447
2448 return fscrypt_ioctl_get_key_status(filp, (void __user *)arg);
2449}
2450
ee446e1a
EB
2451static int f2fs_ioc_get_encryption_nonce(struct file *filp, unsigned long arg)
2452{
2453 if (!f2fs_sb_has_encrypt(F2FS_I_SB(file_inode(filp))))
2454 return -EOPNOTSUPP;
2455
2456 return fscrypt_ioctl_get_nonce(filp, (void __user *)arg);
2457}
2458
c1c1b583
CY
2459static int f2fs_ioc_gc(struct file *filp, unsigned long arg)
2460{
2461 struct inode *inode = file_inode(filp);
2462 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
d530d4d8 2463 __u32 sync;
7fb17fe4 2464 int ret;
c1c1b583
CY
2465
2466 if (!capable(CAP_SYS_ADMIN))
2467 return -EPERM;
2468
d530d4d8 2469 if (get_user(sync, (__u32 __user *)arg))
c1c1b583
CY
2470 return -EFAULT;
2471
d530d4d8
CY
2472 if (f2fs_readonly(sbi->sb))
2473 return -EROFS;
c1c1b583 2474
7fb17fe4
CY
2475 ret = mnt_want_write_file(filp);
2476 if (ret)
2477 return ret;
2478
d530d4d8 2479 if (!sync) {
fb24fea7 2480 if (!down_write_trylock(&sbi->gc_lock)) {
7fb17fe4
CY
2481 ret = -EBUSY;
2482 goto out;
2483 }
d530d4d8 2484 } else {
fb24fea7 2485 down_write(&sbi->gc_lock);
c1c1b583
CY
2486 }
2487
e066b83c 2488 ret = f2fs_gc(sbi, sync, true, NULL_SEGNO);
7fb17fe4
CY
2489out:
2490 mnt_drop_write_file(filp);
2491 return ret;
c1c1b583
CY
2492}
2493
34178b1b 2494static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range)
34dc77ad 2495{
34178b1b 2496 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
34dc77ad
JK
2497 u64 end;
2498 int ret;
2499
2500 if (!capable(CAP_SYS_ADMIN))
2501 return -EPERM;
34dc77ad
JK
2502 if (f2fs_readonly(sbi->sb))
2503 return -EROFS;
2504
34178b1b
CY
2505 end = range->start + range->len;
2506 if (end < range->start || range->start < MAIN_BLKADDR(sbi) ||
fbbf7799 2507 end >= MAX_BLKADDR(sbi))
b82f6e34 2508 return -EINVAL;
b82f6e34 2509
34dc77ad
JK
2510 ret = mnt_want_write_file(filp);
2511 if (ret)
2512 return ret;
2513
34dc77ad 2514do_more:
34178b1b 2515 if (!range->sync) {
fb24fea7 2516 if (!down_write_trylock(&sbi->gc_lock)) {
34dc77ad
JK
2517 ret = -EBUSY;
2518 goto out;
2519 }
2520 } else {
fb24fea7 2521 down_write(&sbi->gc_lock);
34dc77ad
JK
2522 }
2523
34178b1b 2524 ret = f2fs_gc(sbi, range->sync, true, GET_SEGNO(sbi, range->start));
97767500
QZ
2525 if (ret) {
2526 if (ret == -EBUSY)
2527 ret = -EAGAIN;
2528 goto out;
2529 }
34178b1b
CY
2530 range->start += BLKS_PER_SEC(sbi);
2531 if (range->start <= end)
34dc77ad
JK
2532 goto do_more;
2533out:
2534 mnt_drop_write_file(filp);
2535 return ret;
2536}
2537
34178b1b
CY
2538static int f2fs_ioc_gc_range(struct file *filp, unsigned long arg)
2539{
2540 struct f2fs_gc_range range;
2541
2542 if (copy_from_user(&range, (struct f2fs_gc_range __user *)arg,
2543 sizeof(range)))
2544 return -EFAULT;
2545 return __f2fs_ioc_gc_range(filp, &range);
2546}
2547
059c0648 2548static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
456b88e4
CY
2549{
2550 struct inode *inode = file_inode(filp);
2551 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
7fb17fe4 2552 int ret;
456b88e4
CY
2553
2554 if (!capable(CAP_SYS_ADMIN))
2555 return -EPERM;
2556
2557 if (f2fs_readonly(sbi->sb))
2558 return -EROFS;
2559
4354994f 2560 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
dcbb4c10 2561 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
4354994f
DR
2562 return -EINVAL;
2563 }
2564
7fb17fe4
CY
2565 ret = mnt_want_write_file(filp);
2566 if (ret)
2567 return ret;
2568
2569 ret = f2fs_sync_fs(sbi->sb, 1);
2570
2571 mnt_drop_write_file(filp);
2572 return ret;
456b88e4
CY
2573}
2574
d323d005
CY
2575static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2576 struct file *filp,
2577 struct f2fs_defragment *range)
2578{
2579 struct inode *inode = file_inode(filp);
f3d98e74 2580 struct f2fs_map_blocks map = { .m_next_extent = NULL,
f4f0b677
JZ
2581 .m_seg_type = NO_CHECK_TYPE ,
2582 .m_may_create = false };
1061fd48 2583 struct extent_info ei = {0, 0, 0};
f3d98e74 2584 pgoff_t pg_start, pg_end, next_pgofs;
3519e3f9 2585 unsigned int blk_per_seg = sbi->blocks_per_seg;
d323d005 2586 unsigned int total = 0, sec_num;
d323d005
CY
2587 block_t blk_end = 0;
2588 bool fragmented = false;
2589 int err;
2590
2591 /* if in-place-update policy is enabled, don't waste time here */
4d57b86d 2592 if (f2fs_should_update_inplace(inode, NULL))
d323d005
CY
2593 return -EINVAL;
2594
09cbfeaf
KS
2595 pg_start = range->start >> PAGE_SHIFT;
2596 pg_end = (range->start + range->len) >> PAGE_SHIFT;
d323d005 2597
2c4db1a6 2598 f2fs_balance_fs(sbi, true);
d323d005 2599
5955102c 2600 inode_lock(inode);
d323d005
CY
2601
2602 /* writeback all dirty pages in the range */
2603 err = filemap_write_and_wait_range(inode->i_mapping, range->start,
d8fe4f0e 2604 range->start + range->len - 1);
d323d005
CY
2605 if (err)
2606 goto out;
2607
2608 /*
2609 * lookup mapping info in extent cache, skip defragmenting if physical
2610 * block addresses are continuous.
2611 */
2612 if (f2fs_lookup_extent_cache(inode, pg_start, &ei)) {
2613 if (ei.fofs + ei.len >= pg_end)
2614 goto out;
2615 }
2616
2617 map.m_lblk = pg_start;
f3d98e74 2618 map.m_next_pgofs = &next_pgofs;
d323d005
CY
2619
2620 /*
2621 * lookup mapping info in dnode page cache, skip defragmenting if all
2622 * physical block addresses are continuous even if there are hole(s)
2623 * in logical blocks.
2624 */
2625 while (map.m_lblk < pg_end) {
a1c1e9b7 2626 map.m_len = pg_end - map.m_lblk;
f2220c7f 2627 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
d323d005
CY
2628 if (err)
2629 goto out;
2630
2631 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
f3d98e74 2632 map.m_lblk = next_pgofs;
d323d005
CY
2633 continue;
2634 }
2635
25a912e5 2636 if (blk_end && blk_end != map.m_pblk)
d323d005 2637 fragmented = true;
25a912e5
CY
2638
2639 /* record total count of block that we're going to move */
2640 total += map.m_len;
2641
d323d005
CY
2642 blk_end = map.m_pblk + map.m_len;
2643
2644 map.m_lblk += map.m_len;
d323d005
CY
2645 }
2646
d3a1a0e1
CY
2647 if (!fragmented) {
2648 total = 0;
d323d005 2649 goto out;
d3a1a0e1 2650 }
d323d005 2651
f91108b8 2652 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
d323d005
CY
2653
2654 /*
2655 * make sure there are enough free section for LFS allocation, this can
2656 * avoid defragment running in SSR mode when free section are allocated
2657 * intensively
2658 */
7f3037a5 2659 if (has_not_enough_free_secs(sbi, 0, sec_num)) {
d323d005
CY
2660 err = -EAGAIN;
2661 goto out;
2662 }
2663
25a912e5
CY
2664 map.m_lblk = pg_start;
2665 map.m_len = pg_end - pg_start;
2666 total = 0;
2667
d323d005
CY
2668 while (map.m_lblk < pg_end) {
2669 pgoff_t idx;
2670 int cnt = 0;
2671
2672do_map:
a1c1e9b7 2673 map.m_len = pg_end - map.m_lblk;
f2220c7f 2674 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_DEFAULT);
d323d005
CY
2675 if (err)
2676 goto clear_out;
2677
2678 if (!(map.m_flags & F2FS_MAP_FLAGS)) {
f3d98e74 2679 map.m_lblk = next_pgofs;
d3a1a0e1 2680 goto check;
d323d005
CY
2681 }
2682
91942321 2683 set_inode_flag(inode, FI_DO_DEFRAG);
d323d005
CY
2684
2685 idx = map.m_lblk;
2686 while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
2687 struct page *page;
2688
4d57b86d 2689 page = f2fs_get_lock_data_page(inode, idx, true);
d323d005
CY
2690 if (IS_ERR(page)) {
2691 err = PTR_ERR(page);
2692 goto clear_out;
2693 }
2694
2695 set_page_dirty(page);
2696 f2fs_put_page(page, 1);
2697
2698 idx++;
2699 cnt++;
2700 total++;
2701 }
2702
2703 map.m_lblk = idx;
d3a1a0e1
CY
2704check:
2705 if (map.m_lblk < pg_end && cnt < blk_per_seg)
d323d005
CY
2706 goto do_map;
2707
91942321 2708 clear_inode_flag(inode, FI_DO_DEFRAG);
d323d005
CY
2709
2710 err = filemap_fdatawrite(inode->i_mapping);
2711 if (err)
2712 goto out;
2713 }
2714clear_out:
91942321 2715 clear_inode_flag(inode, FI_DO_DEFRAG);
d323d005 2716out:
5955102c 2717 inode_unlock(inode);
d323d005 2718 if (!err)
09cbfeaf 2719 range->len = (u64)total << PAGE_SHIFT;
d323d005
CY
2720 return err;
2721}
2722
2723static int f2fs_ioc_defragment(struct file *filp, unsigned long arg)
2724{
2725 struct inode *inode = file_inode(filp);
2726 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2727 struct f2fs_defragment range;
2728 int err;
2729
2730 if (!capable(CAP_SYS_ADMIN))
2731 return -EPERM;
2732
7eab0c0d 2733 if (!S_ISREG(inode->i_mode) || f2fs_is_atomic_file(inode))
d323d005
CY
2734 return -EINVAL;
2735
d7563861
KM
2736 if (f2fs_readonly(sbi->sb))
2737 return -EROFS;
d323d005
CY
2738
2739 if (copy_from_user(&range, (struct f2fs_defragment __user *)arg,
d7563861
KM
2740 sizeof(range)))
2741 return -EFAULT;
d323d005
CY
2742
2743 /* verify alignment of offset & size */
d7563861
KM
2744 if (range.start & (F2FS_BLKSIZE - 1) || range.len & (F2FS_BLKSIZE - 1))
2745 return -EINVAL;
d323d005 2746
1941d7bc 2747 if (unlikely((range.start + range.len) >> PAGE_SHIFT >
6d1451bf 2748 max_file_blocks(inode)))
d7563861
KM
2749 return -EINVAL;
2750
2751 err = mnt_want_write_file(filp);
2752 if (err)
2753 return err;
1941d7bc 2754
d323d005 2755 err = f2fs_defragment_range(sbi, filp, &range);
d7563861
KM
2756 mnt_drop_write_file(filp);
2757
d0239e1b 2758 f2fs_update_time(sbi, REQ_TIME);
d323d005 2759 if (err < 0)
d7563861 2760 return err;
d323d005
CY
2761
2762 if (copy_to_user((struct f2fs_defragment __user *)arg, &range,
2763 sizeof(range)))
d7563861
KM
2764 return -EFAULT;
2765
2766 return 0;
d323d005
CY
2767}
2768
4dd6f977
JK
2769static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
2770 struct file *file_out, loff_t pos_out, size_t len)
2771{
2772 struct inode *src = file_inode(file_in);
2773 struct inode *dst = file_inode(file_out);
2774 struct f2fs_sb_info *sbi = F2FS_I_SB(src);
2775 size_t olen = len, dst_max_i_size = 0;
2776 size_t dst_osize;
2777 int ret;
2778
2779 if (file_in->f_path.mnt != file_out->f_path.mnt ||
2780 src->i_sb != dst->i_sb)
2781 return -EXDEV;
2782
2783 if (unlikely(f2fs_readonly(src->i_sb)))
2784 return -EROFS;
2785
fe8494bf
CY
2786 if (!S_ISREG(src->i_mode) || !S_ISREG(dst->i_mode))
2787 return -EINVAL;
4dd6f977 2788
62230e0d 2789 if (IS_ENCRYPTED(src) || IS_ENCRYPTED(dst))
4dd6f977
JK
2790 return -EOPNOTSUPP;
2791
aad1383c
DR
2792 if (pos_out < 0 || pos_in < 0)
2793 return -EINVAL;
2794
d95fd91c
FL
2795 if (src == dst) {
2796 if (pos_in == pos_out)
2797 return 0;
2798 if (pos_out > pos_in && pos_out < pos_in + len)
2799 return -EINVAL;
2800 }
2801
4dd6f977 2802 inode_lock(src);
20a3d61d 2803 if (src != dst) {
bb06664a
CY
2804 ret = -EBUSY;
2805 if (!inode_trylock(dst))
2806 goto out;
20a3d61d 2807 }
4dd6f977
JK
2808
2809 ret = -EINVAL;
2810 if (pos_in + len > src->i_size || pos_in + len < pos_in)
2811 goto out_unlock;
2812 if (len == 0)
2813 olen = len = src->i_size - pos_in;
2814 if (pos_in + len == src->i_size)
2815 len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
2816 if (len == 0) {
2817 ret = 0;
2818 goto out_unlock;
2819 }
2820
2821 dst_osize = dst->i_size;
2822 if (pos_out + olen > dst->i_size)
2823 dst_max_i_size = pos_out + olen;
2824
2825 /* verify the end result is block aligned */
2826 if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
2827 !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
2828 !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
2829 goto out_unlock;
2830
2831 ret = f2fs_convert_inline_inode(src);
2832 if (ret)
2833 goto out_unlock;
2834
2835 ret = f2fs_convert_inline_inode(dst);
2836 if (ret)
2837 goto out_unlock;
2838
2839 /* write out all dirty pages from offset */
2840 ret = filemap_write_and_wait_range(src->i_mapping,
2841 pos_in, pos_in + len);
2842 if (ret)
2843 goto out_unlock;
2844
2845 ret = filemap_write_and_wait_range(dst->i_mapping,
2846 pos_out, pos_out + len);
2847 if (ret)
2848 goto out_unlock;
2849
2850 f2fs_balance_fs(sbi, true);
6f8d4455
JK
2851
2852 down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2853 if (src != dst) {
2854 ret = -EBUSY;
2855 if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
2856 goto out_src;
2857 }
2858
4dd6f977 2859 f2fs_lock_op(sbi);
61e4da11
FL
2860 ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
2861 pos_out >> F2FS_BLKSIZE_BITS,
2862 len >> F2FS_BLKSIZE_BITS, false);
4dd6f977
JK
2863
2864 if (!ret) {
2865 if (dst_max_i_size)
2866 f2fs_i_size_write(dst, dst_max_i_size);
2867 else if (dst_osize != dst->i_size)
2868 f2fs_i_size_write(dst, dst_osize);
2869 }
2870 f2fs_unlock_op(sbi);
6f8d4455
JK
2871
2872 if (src != dst)
b2532c69 2873 up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
6f8d4455
JK
2874out_src:
2875 up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
2876out_unlock:
2877 if (src != dst)
4dd6f977 2878 inode_unlock(dst);
20a3d61d 2879out:
4dd6f977
JK
2880 inode_unlock(src);
2881 return ret;
2882}
2883
34178b1b
CY
2884static int __f2fs_ioc_move_range(struct file *filp,
2885 struct f2fs_move_range *range)
4dd6f977 2886{
4dd6f977
JK
2887 struct fd dst;
2888 int err;
2889
2890 if (!(filp->f_mode & FMODE_READ) ||
2891 !(filp->f_mode & FMODE_WRITE))
2892 return -EBADF;
2893
34178b1b 2894 dst = fdget(range->dst_fd);
4dd6f977
JK
2895 if (!dst.file)
2896 return -EBADF;
2897
2898 if (!(dst.file->f_mode & FMODE_WRITE)) {
2899 err = -EBADF;
2900 goto err_out;
2901 }
2902
2903 err = mnt_want_write_file(filp);
2904 if (err)
2905 goto err_out;
2906
34178b1b
CY
2907 err = f2fs_move_file_range(filp, range->pos_in, dst.file,
2908 range->pos_out, range->len);
4dd6f977
JK
2909
2910 mnt_drop_write_file(filp);
4dd6f977
JK
2911err_out:
2912 fdput(dst);
2913 return err;
2914}
2915
34178b1b
CY
2916static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
2917{
2918 struct f2fs_move_range range;
2919
2920 if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
2921 sizeof(range)))
2922 return -EFAULT;
2923 return __f2fs_ioc_move_range(filp, &range);
2924}
2925
e066b83c
JK
2926static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2927{
2928 struct inode *inode = file_inode(filp);
2929 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2930 struct sit_info *sm = SIT_I(sbi);
2931 unsigned int start_segno = 0, end_segno = 0;
2932 unsigned int dev_start_segno = 0, dev_end_segno = 0;
2933 struct f2fs_flush_device range;
2934 int ret;
2935
2936 if (!capable(CAP_SYS_ADMIN))
2937 return -EPERM;
2938
2939 if (f2fs_readonly(sbi->sb))
2940 return -EROFS;
2941
4354994f
DR
2942 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
2943 return -EINVAL;
2944
e066b83c
JK
2945 if (copy_from_user(&range, (struct f2fs_flush_device __user *)arg,
2946 sizeof(range)))
2947 return -EFAULT;
2948
0916878d 2949 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2c70c5e3 2950 __is_large_section(sbi)) {
dcbb4c10
JP
2951 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2952 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
e066b83c
JK
2953 return -EINVAL;
2954 }
2955
2956 ret = mnt_want_write_file(filp);
2957 if (ret)
2958 return ret;
2959
2960 if (range.dev_num != 0)
2961 dev_start_segno = GET_SEGNO(sbi, FDEV(range.dev_num).start_blk);
2962 dev_end_segno = GET_SEGNO(sbi, FDEV(range.dev_num).end_blk);
2963
2964 start_segno = sm->last_victim[FLUSH_DEVICE];
2965 if (start_segno < dev_start_segno || start_segno >= dev_end_segno)
2966 start_segno = dev_start_segno;
2967 end_segno = min(start_segno + range.segments, dev_end_segno);
2968
2969 while (start_segno < end_segno) {
fb24fea7 2970 if (!down_write_trylock(&sbi->gc_lock)) {
e066b83c
JK
2971 ret = -EBUSY;
2972 goto out;
2973 }
2974 sm->last_victim[GC_CB] = end_segno + 1;
2975 sm->last_victim[GC_GREEDY] = end_segno + 1;
2976 sm->last_victim[ALLOC_NEXT] = end_segno + 1;
2977 ret = f2fs_gc(sbi, true, true, start_segno);
2978 if (ret == -EAGAIN)
2979 ret = 0;
2980 else if (ret < 0)
2981 break;
2982 start_segno++;
2983 }
2984out:
2985 mnt_drop_write_file(filp);
2986 return ret;
2987}
2988
e65ef207
JK
2989static int f2fs_ioc_get_features(struct file *filp, unsigned long arg)
2990{
2991 struct inode *inode = file_inode(filp);
2992 u32 sb_feature = le32_to_cpu(F2FS_I_SB(inode)->raw_super->feature);
2993
2994 /* Must validate to set it with SQLite behavior in Android. */
2995 sb_feature |= F2FS_FEATURE_ATOMIC_WRITE;
2996
2997 return put_user(sb_feature, (u32 __user *)arg);
2998}
e066b83c 2999
2c1d0305 3000#ifdef CONFIG_QUOTA
78130819
CY
3001int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3002{
3003 struct dquot *transfer_to[MAXQUOTAS] = {};
3004 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3005 struct super_block *sb = sbi->sb;
3006 int err = 0;
3007
3008 transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
3009 if (!IS_ERR(transfer_to[PRJQUOTA])) {
3010 err = __dquot_transfer(inode, transfer_to);
3011 if (err)
3012 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
3013 dqput(transfer_to[PRJQUOTA]);
3014 }
3015 return err;
3016}
3017
2c1d0305
CY
3018static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3019{
3020 struct inode *inode = file_inode(filp);
3021 struct f2fs_inode_info *fi = F2FS_I(inode);
3022 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
2c1d0305
CY
3023 struct page *ipage;
3024 kprojid_t kprojid;
3025 int err;
3026
7beb01f7 3027 if (!f2fs_sb_has_project_quota(sbi)) {
2c1d0305
CY
3028 if (projid != F2FS_DEF_PROJID)
3029 return -EOPNOTSUPP;
3030 else
3031 return 0;
3032 }
3033
3034 if (!f2fs_has_extra_attr(inode))
3035 return -EOPNOTSUPP;
3036
3037 kprojid = make_kprojid(&init_user_ns, (projid_t)projid);
3038
3039 if (projid_eq(kprojid, F2FS_I(inode)->i_projid))
3040 return 0;
3041
2c1d0305 3042 err = -EPERM;
2c1d0305
CY
3043 /* Is it quota file? Do not allow user to mess with it */
3044 if (IS_NOQUOTA(inode))
c8e92757 3045 return err;
2c1d0305 3046
4d57b86d 3047 ipage = f2fs_get_node_page(sbi, inode->i_ino);
c8e92757
WS
3048 if (IS_ERR(ipage))
3049 return PTR_ERR(ipage);
2c1d0305
CY
3050
3051 if (!F2FS_FITS_IN_INODE(F2FS_INODE(ipage), fi->i_extra_isize,
3052 i_projid)) {
3053 err = -EOVERFLOW;
3054 f2fs_put_page(ipage, 1);
c8e92757 3055 return err;
2c1d0305
CY
3056 }
3057 f2fs_put_page(ipage, 1);
3058
c22aecd7
CY
3059 err = dquot_initialize(inode);
3060 if (err)
c8e92757 3061 return err;
2c1d0305 3062
78130819
CY
3063 f2fs_lock_op(sbi);
3064 err = f2fs_transfer_project_quota(inode, kprojid);
3065 if (err)
3066 goto out_unlock;
2c1d0305
CY
3067
3068 F2FS_I(inode)->i_projid = kprojid;
3069 inode->i_ctime = current_time(inode);
2c1d0305 3070 f2fs_mark_inode_dirty_sync(inode, true);
78130819
CY
3071out_unlock:
3072 f2fs_unlock_op(sbi);
2c1d0305
CY
3073 return err;
3074}
3075#else
78130819
CY
3076int f2fs_transfer_project_quota(struct inode *inode, kprojid_t kprojid)
3077{
3078 return 0;
3079}
3080
2c1d0305
CY
3081static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
3082{
3083 if (projid != F2FS_DEF_PROJID)
3084 return -EOPNOTSUPP;
3085 return 0;
3086}
3087#endif
3088
36098557
EB
3089/* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
3090
3091/*
3092 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
3093 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
3094 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
3095 */
3096
3097static const struct {
3098 u32 iflag;
3099 u32 xflag;
3100} f2fs_xflags_map[] = {
3101 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
3102 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
3103 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
3104 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
3105 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
3106 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
3107};
3108
3109#define F2FS_SUPPORTED_XFLAGS ( \
3110 FS_XFLAG_SYNC | \
3111 FS_XFLAG_IMMUTABLE | \
3112 FS_XFLAG_APPEND | \
3113 FS_XFLAG_NODUMP | \
3114 FS_XFLAG_NOATIME | \
3115 FS_XFLAG_PROJINHERIT)
3116
3117/* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
3118static inline u32 f2fs_iflags_to_xflags(u32 iflags)
3119{
3120 u32 xflags = 0;
3121 int i;
3122
3123 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3124 if (iflags & f2fs_xflags_map[i].iflag)
3125 xflags |= f2fs_xflags_map[i].xflag;
3126
2c1d0305
CY
3127 return xflags;
3128}
3129
36098557
EB
3130/* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
3131static inline u32 f2fs_xflags_to_iflags(u32 xflags)
2c1d0305 3132{
36098557
EB
3133 u32 iflags = 0;
3134 int i;
2c1d0305 3135
36098557
EB
3136 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
3137 if (xflags & f2fs_xflags_map[i].xflag)
3138 iflags |= f2fs_xflags_map[i].iflag;
2c1d0305
CY
3139
3140 return iflags;
3141}
3142
6fc93c4e 3143static void f2fs_fill_fsxattr(struct inode *inode, struct fsxattr *fa)
2c1d0305 3144{
2c1d0305 3145 struct f2fs_inode_info *fi = F2FS_I(inode);
2c1d0305 3146
6fc93c4e 3147 simple_fill_fsxattr(fa, f2fs_iflags_to_xflags(fi->i_flags));
2c1d0305 3148
7beb01f7 3149 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
6fc93c4e 3150 fa->fsx_projid = from_kprojid(&init_user_ns, fi->i_projid);
2c1d0305
CY
3151}
3152
6fc93c4e 3153static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
c8e92757 3154{
6fc93c4e
EB
3155 struct inode *inode = file_inode(filp);
3156 struct fsxattr fa;
c8e92757 3157
6fc93c4e 3158 f2fs_fill_fsxattr(inode, &fa);
c8e92757 3159
6fc93c4e
EB
3160 if (copy_to_user((struct fsxattr __user *)arg, &fa, sizeof(fa)))
3161 return -EFAULT;
c8e92757
WS
3162 return 0;
3163}
3164
2c1d0305
CY
3165static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
3166{
3167 struct inode *inode = file_inode(filp);
6fc93c4e 3168 struct fsxattr fa, old_fa;
36098557 3169 u32 iflags;
2c1d0305
CY
3170 int err;
3171
3172 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
3173 return -EFAULT;
3174
3175 /* Make sure caller has proper permission */
3176 if (!inode_owner_or_capable(inode))
3177 return -EACCES;
3178
36098557 3179 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
2c1d0305
CY
3180 return -EOPNOTSUPP;
3181
36098557
EB
3182 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
3183 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2c1d0305
CY
3184 return -EOPNOTSUPP;
3185
3186 err = mnt_want_write_file(filp);
3187 if (err)
3188 return err;
3189
3190 inode_lock(inode);
6fc93c4e
EB
3191
3192 f2fs_fill_fsxattr(inode, &old_fa);
3193 err = vfs_ioc_fssetxattr_check(inode, &old_fa, &fa);
c8e92757
WS
3194 if (err)
3195 goto out;
6fc93c4e 3196
36098557
EB
3197 err = f2fs_setflags_common(inode, iflags,
3198 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
2c1d0305 3199 if (err)
c8e92757 3200 goto out;
2c1d0305
CY
3201
3202 err = f2fs_ioc_setproject(filp, fa.fsx_projid);
c8e92757
WS
3203out:
3204 inode_unlock(inode);
3205 mnt_drop_write_file(filp);
3206 return err;
2c1d0305 3207}
e066b83c 3208
1ad71a27
JK
3209int f2fs_pin_file_control(struct inode *inode, bool inc)
3210{
3211 struct f2fs_inode_info *fi = F2FS_I(inode);
3212 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3213
3214 /* Use i_gc_failures for normal file as a risk signal. */
3215 if (inc)
2ef79ecb
CY
3216 f2fs_i_gc_failures_write(inode,
3217 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
1ad71a27 3218
2ef79ecb 3219 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
dcbb4c10
JP
3220 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
3221 __func__, inode->i_ino,
3222 fi->i_gc_failures[GC_FAILURE_PIN]);
1ad71a27
JK
3223 clear_inode_flag(inode, FI_PIN_FILE);
3224 return -EAGAIN;
3225 }
3226 return 0;
3227}
3228
3229static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
3230{
3231 struct inode *inode = file_inode(filp);
3232 __u32 pin;
3233 int ret = 0;
3234
1ad71a27
JK
3235 if (get_user(pin, (__u32 __user *)arg))
3236 return -EFAULT;
3237
3238 if (!S_ISREG(inode->i_mode))
3239 return -EINVAL;
3240
3241 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3242 return -EROFS;
3243
3244 ret = mnt_want_write_file(filp);
3245 if (ret)
3246 return ret;
3247
3248 inode_lock(inode);
3249
4d57b86d 3250 if (f2fs_should_update_outplace(inode, NULL)) {
bb9e3bb8
CY
3251 ret = -EINVAL;
3252 goto out;
3253 }
3254
1ad71a27
JK
3255 if (!pin) {
3256 clear_inode_flag(inode, FI_PIN_FILE);
30933364 3257 f2fs_i_gc_failures_write(inode, 0);
1ad71a27
JK
3258 goto done;
3259 }
3260
3261 if (f2fs_pin_file_control(inode, false)) {
3262 ret = -EAGAIN;
3263 goto out;
3264 }
4c8ff709 3265
1ad71a27
JK
3266 ret = f2fs_convert_inline_inode(inode);
3267 if (ret)
3268 goto out;
3269
78134d03 3270 if (!f2fs_disable_compressed_file(inode)) {
4c8ff709
CY
3271 ret = -EOPNOTSUPP;
3272 goto out;
3273 }
3274
1ad71a27 3275 set_inode_flag(inode, FI_PIN_FILE);
2ef79ecb 3276 ret = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
1ad71a27
JK
3277done:
3278 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3279out:
3280 inode_unlock(inode);
3281 mnt_drop_write_file(filp);
3282 return ret;
3283}
3284
3285static int f2fs_ioc_get_pin_file(struct file *filp, unsigned long arg)
3286{
3287 struct inode *inode = file_inode(filp);
3288 __u32 pin = 0;
3289
3290 if (is_inode_flag_set(inode, FI_PIN_FILE))
2ef79ecb 3291 pin = F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN];
1ad71a27
JK
3292 return put_user(pin, (u32 __user *)arg);
3293}
3294
c4020b2d
CY
3295int f2fs_precache_extents(struct inode *inode)
3296{
3297 struct f2fs_inode_info *fi = F2FS_I(inode);
3298 struct f2fs_map_blocks map;
3299 pgoff_t m_next_extent;
3300 loff_t end;
3301 int err;
3302
3303 if (is_inode_flag_set(inode, FI_NO_EXTENT))
3304 return -EOPNOTSUPP;
3305
3306 map.m_lblk = 0;
3307 map.m_next_pgofs = NULL;
3308 map.m_next_extent = &m_next_extent;
3309 map.m_seg_type = NO_CHECK_TYPE;
f4f0b677 3310 map.m_may_create = false;
6d1451bf 3311 end = max_file_blocks(inode);
c4020b2d
CY
3312
3313 while (map.m_lblk < end) {
3314 map.m_len = end - map.m_lblk;
3315
b2532c69 3316 down_write(&fi->i_gc_rwsem[WRITE]);
c4020b2d 3317 err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE);
b2532c69 3318 up_write(&fi->i_gc_rwsem[WRITE]);
c4020b2d
CY
3319 if (err)
3320 return err;
3321
3322 map.m_lblk = m_next_extent;
3323 }
3324
3325 return err;
3326}
3327
3328static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
3329{
3330 return f2fs_precache_extents(file_inode(filp));
3331}
3332
04f0b2ea
QS
3333static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3334{
3335 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3336 __u64 block_count;
04f0b2ea
QS
3337
3338 if (!capable(CAP_SYS_ADMIN))
3339 return -EPERM;
3340
3341 if (f2fs_readonly(sbi->sb))
3342 return -EROFS;
3343
3344 if (copy_from_user(&block_count, (void __user *)arg,
3345 sizeof(block_count)))
3346 return -EFAULT;
3347
b4b10061 3348 return f2fs_resize_fs(sbi, block_count);
04f0b2ea
QS
3349}
3350
95ae251f
EB
3351static int f2fs_ioc_enable_verity(struct file *filp, unsigned long arg)
3352{
3353 struct inode *inode = file_inode(filp);
3354
3355 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3356
3357 if (!f2fs_sb_has_verity(F2FS_I_SB(inode))) {
3358 f2fs_warn(F2FS_I_SB(inode),
3359 "Can't enable fs-verity on inode %lu: the verity feature is not enabled on this filesystem.\n",
3360 inode->i_ino);
3361 return -EOPNOTSUPP;
3362 }
3363
3364 return fsverity_ioctl_enable(filp, (const void __user *)arg);
3365}
3366
3367static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg)
3368{
3369 if (!f2fs_sb_has_verity(F2FS_I_SB(file_inode(filp))))
3370 return -EOPNOTSUPP;
3371
3372 return fsverity_ioctl_measure(filp, (void __user *)arg);
3373}
3374
3357af8f 3375static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg)
4507847c
CY
3376{
3377 struct inode *inode = file_inode(filp);
3378 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3379 char *vbuf;
3380 int count;
3381 int err = 0;
3382
3383 vbuf = f2fs_kzalloc(sbi, MAX_VOLUME_NAME, GFP_KERNEL);
3384 if (!vbuf)
3385 return -ENOMEM;
3386
3387 down_read(&sbi->sb_lock);
3388 count = utf16s_to_utf8s(sbi->raw_super->volume_name,
3389 ARRAY_SIZE(sbi->raw_super->volume_name),
3390 UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME);
3391 up_read(&sbi->sb_lock);
3392
3393 if (copy_to_user((char __user *)arg, vbuf,
3394 min(FSLABEL_MAX, count)))
3395 err = -EFAULT;
3396
c8eb7024 3397 kfree(vbuf);
4507847c
CY
3398 return err;
3399}
3400
3357af8f 3401static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg)
4507847c
CY
3402{
3403 struct inode *inode = file_inode(filp);
3404 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3405 char *vbuf;
3406 int err = 0;
3407
3408 if (!capable(CAP_SYS_ADMIN))
3409 return -EPERM;
3410
3411 vbuf = strndup_user((const char __user *)arg, FSLABEL_MAX);
3412 if (IS_ERR(vbuf))
3413 return PTR_ERR(vbuf);
3414
3415 err = mnt_want_write_file(filp);
3416 if (err)
3417 goto out;
3418
3419 down_write(&sbi->sb_lock);
3420
3421 memset(sbi->raw_super->volume_name, 0,
3422 sizeof(sbi->raw_super->volume_name));
3423 utf8s_to_utf16s(vbuf, strlen(vbuf), UTF16_LITTLE_ENDIAN,
3424 sbi->raw_super->volume_name,
3425 ARRAY_SIZE(sbi->raw_super->volume_name));
3426
3427 err = f2fs_commit_super(sbi, false);
3428
3429 up_write(&sbi->sb_lock);
3430
3431 mnt_drop_write_file(filp);
3432out:
3433 kfree(vbuf);
3434 return err;
3435}
3436
439dfb10
CY
3437static int f2fs_get_compress_blocks(struct file *filp, unsigned long arg)
3438{
3439 struct inode *inode = file_inode(filp);
3440 __u64 blocks;
3441
3442 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3443 return -EOPNOTSUPP;
3444
3445 if (!f2fs_compressed_file(inode))
3446 return -EINVAL;
3447
c2759eba 3448 blocks = atomic_read(&F2FS_I(inode)->i_compr_blocks);
439dfb10
CY
3449 return put_user(blocks, (u64 __user *)arg);
3450}
3451
ef8d563f
CY
3452static int release_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3453{
3454 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3455 unsigned int released_blocks = 0;
3456 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3457 block_t blkaddr;
3458 int i;
3459
3460 for (i = 0; i < count; i++) {
3461 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3462 dn->ofs_in_node + i);
3463
3464 if (!__is_valid_data_blkaddr(blkaddr))
3465 continue;
3466 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3467 DATA_GENERIC_ENHANCE)))
3468 return -EFSCORRUPTED;
3469 }
3470
3471 while (count) {
3472 int compr_blocks = 0;
3473
3474 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3475 blkaddr = f2fs_data_blkaddr(dn);
3476
3477 if (i == 0) {
3478 if (blkaddr == COMPRESS_ADDR)
3479 continue;
3480 dn->ofs_in_node += cluster_size;
3481 goto next;
3482 }
3483
3484 if (__is_valid_data_blkaddr(blkaddr))
3485 compr_blocks++;
3486
3487 if (blkaddr != NEW_ADDR)
3488 continue;
3489
3490 dn->data_blkaddr = NULL_ADDR;
3491 f2fs_set_data_blkaddr(dn);
3492 }
3493
3494 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, false);
3495 dec_valid_block_count(sbi, dn->inode,
3496 cluster_size - compr_blocks);
3497
3498 released_blocks += cluster_size - compr_blocks;
3499next:
3500 count -= cluster_size;
3501 }
3502
3503 return released_blocks;
3504}
3505
3506static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg)
3507{
3508 struct inode *inode = file_inode(filp);
3509 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3510 pgoff_t page_idx = 0, last_idx;
3511 unsigned int released_blocks = 0;
3512 int ret;
3513 int writecount;
3514
3515 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3516 return -EOPNOTSUPP;
3517
3518 if (!f2fs_compressed_file(inode))
3519 return -EINVAL;
3520
3521 if (f2fs_readonly(sbi->sb))
3522 return -EROFS;
3523
3524 ret = mnt_want_write_file(filp);
3525 if (ret)
3526 return ret;
3527
3528 f2fs_balance_fs(F2FS_I_SB(inode), true);
3529
3530 inode_lock(inode);
3531
3532 writecount = atomic_read(&inode->i_writecount);
8c8cf26a
DJ
3533 if ((filp->f_mode & FMODE_WRITE && writecount != 1) ||
3534 (!(filp->f_mode & FMODE_WRITE) && writecount)) {
ef8d563f
CY
3535 ret = -EBUSY;
3536 goto out;
3537 }
3538
3539 if (IS_IMMUTABLE(inode)) {
3540 ret = -EINVAL;
3541 goto out;
3542 }
3543
3544 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
3545 if (ret)
3546 goto out;
3547
ef8d563f
CY
3548 F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL;
3549 f2fs_set_inode_flags(inode);
3550 inode->i_ctime = current_time(inode);
3551 f2fs_mark_inode_dirty_sync(inode, true);
3552
c2759eba 3553 if (!atomic_read(&F2FS_I(inode)->i_compr_blocks))
567c4bf5
DJ
3554 goto out;
3555
ef8d563f
CY
3556 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3557 down_write(&F2FS_I(inode)->i_mmap_sem);
3558
3559 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3560
3561 while (page_idx < last_idx) {
3562 struct dnode_of_data dn;
3563 pgoff_t end_offset, count;
3564
3565 set_new_dnode(&dn, inode, NULL, NULL, 0);
3566 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3567 if (ret) {
3568 if (ret == -ENOENT) {
3569 page_idx = f2fs_get_next_page_offset(&dn,
3570 page_idx);
3571 ret = 0;
3572 continue;
3573 }
3574 break;
3575 }
3576
3577 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3578 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
4fec3fc0 3579 count = round_up(count, F2FS_I(inode)->i_cluster_size);
ef8d563f
CY
3580
3581 ret = release_compress_blocks(&dn, count);
3582
3583 f2fs_put_dnode(&dn);
3584
3585 if (ret < 0)
3586 break;
3587
3588 page_idx += count;
3589 released_blocks += ret;
3590 }
3591
3592 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3593 up_write(&F2FS_I(inode)->i_mmap_sem);
3594out:
3595 inode_unlock(inode);
3596
3597 mnt_drop_write_file(filp);
3598
3599 if (ret >= 0) {
3600 ret = put_user(released_blocks, (u64 __user *)arg);
c2759eba
DJ
3601 } else if (released_blocks &&
3602 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
ef8d563f
CY
3603 set_sbi_flag(sbi, SBI_NEED_FSCK);
3604 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
c2759eba 3605 "iblocks=%llu, released=%u, compr_blocks=%u, "
ef8d563f
CY
3606 "run fsck to fix.",
3607 __func__, inode->i_ino, inode->i_blocks,
3608 released_blocks,
c2759eba 3609 atomic_read(&F2FS_I(inode)->i_compr_blocks));
ef8d563f
CY
3610 }
3611
3612 return ret;
3613}
3614
c75488fb
CY
3615static int reserve_compress_blocks(struct dnode_of_data *dn, pgoff_t count)
3616{
3617 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
3618 unsigned int reserved_blocks = 0;
3619 int cluster_size = F2FS_I(dn->inode)->i_cluster_size;
3620 block_t blkaddr;
3621 int i;
3622
3623 for (i = 0; i < count; i++) {
3624 blkaddr = data_blkaddr(dn->inode, dn->node_page,
3625 dn->ofs_in_node + i);
3626
3627 if (!__is_valid_data_blkaddr(blkaddr))
3628 continue;
3629 if (unlikely(!f2fs_is_valid_blkaddr(sbi, blkaddr,
3630 DATA_GENERIC_ENHANCE)))
3631 return -EFSCORRUPTED;
3632 }
3633
3634 while (count) {
3635 int compr_blocks = 0;
3636 blkcnt_t reserved;
3637 int ret;
3638
3639 for (i = 0; i < cluster_size; i++, dn->ofs_in_node++) {
3640 blkaddr = f2fs_data_blkaddr(dn);
3641
3642 if (i == 0) {
3643 if (blkaddr == COMPRESS_ADDR)
3644 continue;
3645 dn->ofs_in_node += cluster_size;
3646 goto next;
3647 }
3648
3649 if (__is_valid_data_blkaddr(blkaddr)) {
3650 compr_blocks++;
3651 continue;
3652 }
3653
3654 dn->data_blkaddr = NEW_ADDR;
3655 f2fs_set_data_blkaddr(dn);
3656 }
3657
3658 reserved = cluster_size - compr_blocks;
3659 ret = inc_valid_block_count(sbi, dn->inode, &reserved);
3660 if (ret)
3661 return ret;
3662
3663 if (reserved != cluster_size - compr_blocks)
3664 return -ENOSPC;
3665
3666 f2fs_i_compr_blocks_update(dn->inode, compr_blocks, true);
3667
3668 reserved_blocks += reserved;
3669next:
3670 count -= cluster_size;
3671 }
3672
3673 return reserved_blocks;
3674}
3675
3676static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg)
3677{
3678 struct inode *inode = file_inode(filp);
3679 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3680 pgoff_t page_idx = 0, last_idx;
3681 unsigned int reserved_blocks = 0;
3682 int ret;
3683
3684 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3685 return -EOPNOTSUPP;
3686
3687 if (!f2fs_compressed_file(inode))
3688 return -EINVAL;
3689
3690 if (f2fs_readonly(sbi->sb))
3691 return -EROFS;
3692
3693 ret = mnt_want_write_file(filp);
3694 if (ret)
3695 return ret;
3696
c2759eba 3697 if (atomic_read(&F2FS_I(inode)->i_compr_blocks))
c75488fb
CY
3698 goto out;
3699
3700 f2fs_balance_fs(F2FS_I_SB(inode), true);
3701
3702 inode_lock(inode);
3703
3704 if (!IS_IMMUTABLE(inode)) {
3705 ret = -EINVAL;
3706 goto unlock_inode;
3707 }
3708
3709 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3710 down_write(&F2FS_I(inode)->i_mmap_sem);
3711
3712 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
3713
3714 while (page_idx < last_idx) {
3715 struct dnode_of_data dn;
3716 pgoff_t end_offset, count;
3717
3718 set_new_dnode(&dn, inode, NULL, NULL, 0);
3719 ret = f2fs_get_dnode_of_data(&dn, page_idx, LOOKUP_NODE);
3720 if (ret) {
3721 if (ret == -ENOENT) {
3722 page_idx = f2fs_get_next_page_offset(&dn,
3723 page_idx);
3724 ret = 0;
3725 continue;
3726 }
3727 break;
3728 }
3729
3730 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3731 count = min(end_offset - dn.ofs_in_node, last_idx - page_idx);
4fec3fc0 3732 count = round_up(count, F2FS_I(inode)->i_cluster_size);
c75488fb
CY
3733
3734 ret = reserve_compress_blocks(&dn, count);
3735
3736 f2fs_put_dnode(&dn);
3737
3738 if (ret < 0)
3739 break;
3740
3741 page_idx += count;
3742 reserved_blocks += ret;
3743 }
3744
3745 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3746 up_write(&F2FS_I(inode)->i_mmap_sem);
3747
3748 if (ret >= 0) {
3749 F2FS_I(inode)->i_flags &= ~F2FS_IMMUTABLE_FL;
3750 f2fs_set_inode_flags(inode);
3751 inode->i_ctime = current_time(inode);
3752 f2fs_mark_inode_dirty_sync(inode, true);
3753 }
3754unlock_inode:
3755 inode_unlock(inode);
3756out:
3757 mnt_drop_write_file(filp);
3758
3759 if (ret >= 0) {
3760 ret = put_user(reserved_blocks, (u64 __user *)arg);
c2759eba
DJ
3761 } else if (reserved_blocks &&
3762 atomic_read(&F2FS_I(inode)->i_compr_blocks)) {
c75488fb
CY
3763 set_sbi_flag(sbi, SBI_NEED_FSCK);
3764 f2fs_warn(sbi, "%s: partial blocks were released i_ino=%lx "
c2759eba 3765 "iblocks=%llu, reserved=%u, compr_blocks=%u, "
c75488fb
CY
3766 "run fsck to fix.",
3767 __func__, inode->i_ino, inode->i_blocks,
3768 reserved_blocks,
c2759eba 3769 atomic_read(&F2FS_I(inode)->i_compr_blocks));
c75488fb
CY
3770 }
3771
3772 return ret;
3773}
3774
9af84648
DJ
3775static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode,
3776 pgoff_t off, block_t block, block_t len, u32 flags)
3777{
3778 struct request_queue *q = bdev_get_queue(bdev);
3779 sector_t sector = SECTOR_FROM_BLOCK(block);
3780 sector_t nr_sects = SECTOR_FROM_BLOCK(len);
3781 int ret = 0;
3782
3783 if (!q)
3784 return -ENXIO;
3785
3786 if (flags & F2FS_TRIM_FILE_DISCARD)
3787 ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS,
3788 blk_queue_secure_erase(q) ?
3789 BLKDEV_DISCARD_SECURE : 0);
3790
3791 if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) {
3792 if (IS_ENCRYPTED(inode))
3793 ret = fscrypt_zeroout_range(inode, off, block, len);
3794 else
3795 ret = blkdev_issue_zeroout(bdev, sector, nr_sects,
3796 GFP_NOFS, 0);
3797 }
3798
3799 return ret;
3800}
3801
3802static int f2fs_sec_trim_file(struct file *filp, unsigned long arg)
3803{
3804 struct inode *inode = file_inode(filp);
3805 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3806 struct address_space *mapping = inode->i_mapping;
3807 struct block_device *prev_bdev = NULL;
3808 struct f2fs_sectrim_range range;
3809 pgoff_t index, pg_end, prev_index = 0;
3810 block_t prev_block = 0, len = 0;
3811 loff_t end_addr;
3812 bool to_end = false;
3813 int ret = 0;
3814
3815 if (!(filp->f_mode & FMODE_WRITE))
3816 return -EBADF;
3817
3818 if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg,
3819 sizeof(range)))
3820 return -EFAULT;
3821
3822 if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) ||
3823 !S_ISREG(inode->i_mode))
3824 return -EINVAL;
3825
3826 if (((range.flags & F2FS_TRIM_FILE_DISCARD) &&
3827 !f2fs_hw_support_discard(sbi)) ||
3828 ((range.flags & F2FS_TRIM_FILE_ZEROOUT) &&
3829 IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi)))
3830 return -EOPNOTSUPP;
3831
3832 file_start_write(filp);
3833 inode_lock(inode);
3834
3835 if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) ||
3836 range.start >= inode->i_size) {
3837 ret = -EINVAL;
3838 goto err;
3839 }
3840
3841 if (range.len == 0)
3842 goto err;
3843
3844 if (inode->i_size - range.start > range.len) {
3845 end_addr = range.start + range.len;
3846 } else {
3847 end_addr = range.len == (u64)-1 ?
3848 sbi->sb->s_maxbytes : inode->i_size;
3849 to_end = true;
3850 }
3851
3852 if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) ||
3853 (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) {
3854 ret = -EINVAL;
3855 goto err;
3856 }
3857
3858 index = F2FS_BYTES_TO_BLK(range.start);
3859 pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE);
3860
3861 ret = f2fs_convert_inline_inode(inode);
3862 if (ret)
3863 goto err;
3864
3865 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3866 down_write(&F2FS_I(inode)->i_mmap_sem);
3867
3868 ret = filemap_write_and_wait_range(mapping, range.start,
3869 to_end ? LLONG_MAX : end_addr - 1);
3870 if (ret)
3871 goto out;
3872
3873 truncate_inode_pages_range(mapping, range.start,
3874 to_end ? -1 : end_addr - 1);
3875
3876 while (index < pg_end) {
3877 struct dnode_of_data dn;
3878 pgoff_t end_offset, count;
3879 int i;
3880
3881 set_new_dnode(&dn, inode, NULL, NULL, 0);
3882 ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
3883 if (ret) {
3884 if (ret == -ENOENT) {
3885 index = f2fs_get_next_page_offset(&dn, index);
3886 continue;
3887 }
3888 goto out;
3889 }
3890
3891 end_offset = ADDRS_PER_PAGE(dn.node_page, inode);
3892 count = min(end_offset - dn.ofs_in_node, pg_end - index);
3893 for (i = 0; i < count; i++, index++, dn.ofs_in_node++) {
3894 struct block_device *cur_bdev;
3895 block_t blkaddr = f2fs_data_blkaddr(&dn);
3896
3897 if (!__is_valid_data_blkaddr(blkaddr))
3898 continue;
3899
3900 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
3901 DATA_GENERIC_ENHANCE)) {
3902 ret = -EFSCORRUPTED;
3903 f2fs_put_dnode(&dn);
3904 goto out;
3905 }
3906
3907 cur_bdev = f2fs_target_device(sbi, blkaddr, NULL);
3908 if (f2fs_is_multi_device(sbi)) {
3909 int di = f2fs_target_device_index(sbi, blkaddr);
3910
3911 blkaddr -= FDEV(di).start_blk;
3912 }
3913
3914 if (len) {
3915 if (prev_bdev == cur_bdev &&
3916 index == prev_index + len &&
3917 blkaddr == prev_block + len) {
3918 len++;
3919 } else {
3920 ret = f2fs_secure_erase(prev_bdev,
3921 inode, prev_index, prev_block,
3922 len, range.flags);
3923 if (ret) {
3924 f2fs_put_dnode(&dn);
3925 goto out;
3926 }
3927
3928 len = 0;
3929 }
3930 }
3931
3932 if (!len) {
3933 prev_bdev = cur_bdev;
3934 prev_index = index;
3935 prev_block = blkaddr;
3936 len = 1;
3937 }
3938 }
3939
3940 f2fs_put_dnode(&dn);
3941
3942 if (fatal_signal_pending(current)) {
3943 ret = -EINTR;
3944 goto out;
3945 }
3946 cond_resched();
3947 }
3948
3949 if (len)
3950 ret = f2fs_secure_erase(prev_bdev, inode, prev_index,
3951 prev_block, len, range.flags);
3952out:
3953 up_write(&F2FS_I(inode)->i_mmap_sem);
3954 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
3955err:
3956 inode_unlock(inode);
3957 file_end_write(filp);
3958
3959 return ret;
3960}
3961
9e2a5f8c 3962static int f2fs_ioc_get_compress_option(struct file *filp, unsigned long arg)
52656e6c 3963{
9e2a5f8c
DJ
3964 struct inode *inode = file_inode(filp);
3965 struct f2fs_comp_option option;
3966
3967 if (!f2fs_sb_has_compression(F2FS_I_SB(inode)))
3968 return -EOPNOTSUPP;
3969
3970 inode_lock_shared(inode);
3971
3972 if (!f2fs_compressed_file(inode)) {
3973 inode_unlock_shared(inode);
3974 return -ENODATA;
3975 }
3976
3977 option.algorithm = F2FS_I(inode)->i_compress_algorithm;
3978 option.log_cluster_size = F2FS_I(inode)->i_log_cluster_size;
3979
3980 inode_unlock_shared(inode);
3981
3982 if (copy_to_user((struct f2fs_comp_option __user *)arg, &option,
3983 sizeof(option)))
3984 return -EFAULT;
3985
3986 return 0;
3987}
3988
e1e8debe
DJ
3989static int f2fs_ioc_set_compress_option(struct file *filp, unsigned long arg)
3990{
3991 struct inode *inode = file_inode(filp);
3992 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
3993 struct f2fs_comp_option option;
3994 int ret = 0;
1f227a3e 3995
e1e8debe
DJ
3996 if (!f2fs_sb_has_compression(sbi))
3997 return -EOPNOTSUPP;
3998
3999 if (!(filp->f_mode & FMODE_WRITE))
4000 return -EBADF;
4001
4002 if (copy_from_user(&option, (struct f2fs_comp_option __user *)arg,
4003 sizeof(option)))
4004 return -EFAULT;
4005
4006 if (!f2fs_compressed_file(inode) ||
4007 option.log_cluster_size < MIN_COMPRESS_LOG_SIZE ||
4008 option.log_cluster_size > MAX_COMPRESS_LOG_SIZE ||
4009 option.algorithm >= COMPRESS_MAX)
4010 return -EINVAL;
4011
4012 file_start_write(filp);
4013 inode_lock(inode);
4014
4015 if (f2fs_is_mmap_file(inode) || get_dirty_pages(inode)) {
4016 ret = -EBUSY;
4017 goto out;
4018 }
4019
4020 if (inode->i_size != 0) {
4021 ret = -EFBIG;
4022 goto out;
4023 }
4024
4025 F2FS_I(inode)->i_compress_algorithm = option.algorithm;
4026 F2FS_I(inode)->i_log_cluster_size = option.log_cluster_size;
4027 F2FS_I(inode)->i_cluster_size = 1 << option.log_cluster_size;
4028 f2fs_mark_inode_dirty_sync(inode, true);
4029
4030 if (!f2fs_is_compress_backend_ready(inode))
4031 f2fs_warn(sbi, "compression algorithm is successfully set, "
4032 "but current kernel doesn't support this algorithm.");
4033out:
4034 inode_unlock(inode);
4035 file_end_write(filp);
4036
4037 return ret;
4038}
4039
5fdb322f
DJ
4040static int redirty_blocks(struct inode *inode, pgoff_t page_idx, int len)
4041{
4042 DEFINE_READAHEAD(ractl, NULL, inode->i_mapping, page_idx);
4043 struct address_space *mapping = inode->i_mapping;
4044 struct page *page;
4045 pgoff_t redirty_idx = page_idx;
4046 int i, page_len = 0, ret = 0;
4047
4048 page_cache_ra_unbounded(&ractl, len, 0);
4049
4050 for (i = 0; i < len; i++, page_idx++) {
4051 page = read_cache_page(mapping, page_idx, NULL, NULL);
4052 if (IS_ERR(page)) {
4053 ret = PTR_ERR(page);
4054 break;
4055 }
4056 page_len++;
4057 }
4058
4059 for (i = 0; i < page_len; i++, redirty_idx++) {
4060 page = find_lock_page(mapping, redirty_idx);
df0736d7
DJ
4061 if (!page) {
4062 ret = -ENOMEM;
4063 break;
4064 }
5fdb322f
DJ
4065 set_page_dirty(page);
4066 f2fs_put_page(page, 1);
4067 f2fs_put_page(page, 0);
4068 }
4069
4070 return ret;
4071}
4072
4073static int f2fs_ioc_decompress_file(struct file *filp, unsigned long arg)
4074{
4075 struct inode *inode = file_inode(filp);
4076 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4077 struct f2fs_inode_info *fi = F2FS_I(inode);
4078 pgoff_t page_idx = 0, last_idx;
4079 unsigned int blk_per_seg = sbi->blocks_per_seg;
4080 int cluster_size = F2FS_I(inode)->i_cluster_size;
4081 int count, ret;
4082
4083 if (!f2fs_sb_has_compression(sbi) ||
4084 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4085 return -EOPNOTSUPP;
4086
4087 if (!(filp->f_mode & FMODE_WRITE))
4088 return -EBADF;
4089
4090 if (!f2fs_compressed_file(inode))
4091 return -EINVAL;
4092
4093 f2fs_balance_fs(F2FS_I_SB(inode), true);
4094
4095 file_start_write(filp);
4096 inode_lock(inode);
4097
4098 if (!f2fs_is_compress_backend_ready(inode)) {
4099 ret = -EOPNOTSUPP;
4100 goto out;
4101 }
4102
4103 if (f2fs_is_mmap_file(inode)) {
4104 ret = -EBUSY;
4105 goto out;
4106 }
4107
4108 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4109 if (ret)
4110 goto out;
4111
4112 if (!atomic_read(&fi->i_compr_blocks))
4113 goto out;
4114
4115 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4116
4117 count = last_idx - page_idx;
4118 while (count) {
4119 int len = min(cluster_size, count);
4120
4121 ret = redirty_blocks(inode, page_idx, len);
4122 if (ret < 0)
4123 break;
4124
4125 if (get_dirty_pages(inode) >= blk_per_seg)
4126 filemap_fdatawrite(inode->i_mapping);
4127
4128 count -= len;
4129 page_idx += len;
4130 }
4131
4132 if (!ret)
4133 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4134 LLONG_MAX);
4135
4136 if (ret)
4137 f2fs_warn(sbi, "%s: The file might be partially decompressed "
4138 "(errno=%d). Please delete the file.\n",
4139 __func__, ret);
4140out:
4141 inode_unlock(inode);
4142 file_end_write(filp);
4143
4144 return ret;
4145}
4146
4147static int f2fs_ioc_compress_file(struct file *filp, unsigned long arg)
4148{
4149 struct inode *inode = file_inode(filp);
4150 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
4151 pgoff_t page_idx = 0, last_idx;
4152 unsigned int blk_per_seg = sbi->blocks_per_seg;
4153 int cluster_size = F2FS_I(inode)->i_cluster_size;
4154 int count, ret;
4155
4156 if (!f2fs_sb_has_compression(sbi) ||
4157 F2FS_OPTION(sbi).compress_mode != COMPR_MODE_USER)
4158 return -EOPNOTSUPP;
4159
4160 if (!(filp->f_mode & FMODE_WRITE))
4161 return -EBADF;
4162
4163 if (!f2fs_compressed_file(inode))
4164 return -EINVAL;
4165
4166 f2fs_balance_fs(F2FS_I_SB(inode), true);
4167
4168 file_start_write(filp);
4169 inode_lock(inode);
4170
4171 if (!f2fs_is_compress_backend_ready(inode)) {
4172 ret = -EOPNOTSUPP;
4173 goto out;
4174 }
4175
4176 if (f2fs_is_mmap_file(inode)) {
4177 ret = -EBUSY;
4178 goto out;
4179 }
4180
4181 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
4182 if (ret)
4183 goto out;
4184
4185 set_inode_flag(inode, FI_ENABLE_COMPRESS);
4186
4187 last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
4188
4189 count = last_idx - page_idx;
4190 while (count) {
4191 int len = min(cluster_size, count);
4192
4193 ret = redirty_blocks(inode, page_idx, len);
4194 if (ret < 0)
4195 break;
4196
4197 if (get_dirty_pages(inode) >= blk_per_seg)
4198 filemap_fdatawrite(inode->i_mapping);
4199
4200 count -= len;
4201 page_idx += len;
4202 }
4203
4204 if (!ret)
4205 ret = filemap_write_and_wait_range(inode->i_mapping, 0,
4206 LLONG_MAX);
4207
4208 clear_inode_flag(inode, FI_ENABLE_COMPRESS);
4209
4210 if (ret)
4211 f2fs_warn(sbi, "%s: The file might be partially compressed "
4212 "(errno=%d). Please delete the file.\n",
4213 __func__, ret);
4214out:
4215 inode_unlock(inode);
4216 file_end_write(filp);
4217
4218 return ret;
4219}
4220
34178b1b 4221static long __f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
52656e6c
JK
4222{
4223 switch (cmd) {
3357af8f 4224 case FS_IOC_GETFLAGS:
52656e6c 4225 return f2fs_ioc_getflags(filp, arg);
3357af8f 4226 case FS_IOC_SETFLAGS:
52656e6c 4227 return f2fs_ioc_setflags(filp, arg);
3357af8f 4228 case FS_IOC_GETVERSION:
d49f3e89 4229 return f2fs_ioc_getversion(filp, arg);
88b88a66
JK
4230 case F2FS_IOC_START_ATOMIC_WRITE:
4231 return f2fs_ioc_start_atomic_write(filp);
4232 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4233 return f2fs_ioc_commit_atomic_write(filp);
02a1335f
JK
4234 case F2FS_IOC_START_VOLATILE_WRITE:
4235 return f2fs_ioc_start_volatile_write(filp);
1e84371f
JK
4236 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4237 return f2fs_ioc_release_volatile_write(filp);
4238 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4239 return f2fs_ioc_abort_volatile_write(filp);
1abff93d
JK
4240 case F2FS_IOC_SHUTDOWN:
4241 return f2fs_ioc_shutdown(filp, arg);
52656e6c
JK
4242 case FITRIM:
4243 return f2fs_ioc_fitrim(filp, arg);
3357af8f 4244 case FS_IOC_SET_ENCRYPTION_POLICY:
f424f664 4245 return f2fs_ioc_set_encryption_policy(filp, arg);
3357af8f 4246 case FS_IOC_GET_ENCRYPTION_POLICY:
f424f664 4247 return f2fs_ioc_get_encryption_policy(filp, arg);
3357af8f 4248 case FS_IOC_GET_ENCRYPTION_PWSALT:
f424f664 4249 return f2fs_ioc_get_encryption_pwsalt(filp, arg);
8ce589c7
EB
4250 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4251 return f2fs_ioc_get_encryption_policy_ex(filp, arg);
4252 case FS_IOC_ADD_ENCRYPTION_KEY:
4253 return f2fs_ioc_add_encryption_key(filp, arg);
4254 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4255 return f2fs_ioc_remove_encryption_key(filp, arg);
4256 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4257 return f2fs_ioc_remove_encryption_key_all_users(filp, arg);
4258 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
4259 return f2fs_ioc_get_encryption_key_status(filp, arg);
ee446e1a
EB
4260 case FS_IOC_GET_ENCRYPTION_NONCE:
4261 return f2fs_ioc_get_encryption_nonce(filp, arg);
c1c1b583
CY
4262 case F2FS_IOC_GARBAGE_COLLECT:
4263 return f2fs_ioc_gc(filp, arg);
34dc77ad
JK
4264 case F2FS_IOC_GARBAGE_COLLECT_RANGE:
4265 return f2fs_ioc_gc_range(filp, arg);
456b88e4 4266 case F2FS_IOC_WRITE_CHECKPOINT:
059c0648 4267 return f2fs_ioc_write_checkpoint(filp, arg);
d323d005
CY
4268 case F2FS_IOC_DEFRAGMENT:
4269 return f2fs_ioc_defragment(filp, arg);
4dd6f977
JK
4270 case F2FS_IOC_MOVE_RANGE:
4271 return f2fs_ioc_move_range(filp, arg);
e066b83c
JK
4272 case F2FS_IOC_FLUSH_DEVICE:
4273 return f2fs_ioc_flush_device(filp, arg);
e65ef207
JK
4274 case F2FS_IOC_GET_FEATURES:
4275 return f2fs_ioc_get_features(filp, arg);
3357af8f 4276 case FS_IOC_FSGETXATTR:
2c1d0305 4277 return f2fs_ioc_fsgetxattr(filp, arg);
3357af8f 4278 case FS_IOC_FSSETXATTR:
2c1d0305 4279 return f2fs_ioc_fssetxattr(filp, arg);
1ad71a27
JK
4280 case F2FS_IOC_GET_PIN_FILE:
4281 return f2fs_ioc_get_pin_file(filp, arg);
4282 case F2FS_IOC_SET_PIN_FILE:
4283 return f2fs_ioc_set_pin_file(filp, arg);
c4020b2d
CY
4284 case F2FS_IOC_PRECACHE_EXTENTS:
4285 return f2fs_ioc_precache_extents(filp, arg);
04f0b2ea
QS
4286 case F2FS_IOC_RESIZE_FS:
4287 return f2fs_ioc_resize_fs(filp, arg);
95ae251f
EB
4288 case FS_IOC_ENABLE_VERITY:
4289 return f2fs_ioc_enable_verity(filp, arg);
4290 case FS_IOC_MEASURE_VERITY:
4291 return f2fs_ioc_measure_verity(filp, arg);
3357af8f
EB
4292 case FS_IOC_GETFSLABEL:
4293 return f2fs_ioc_getfslabel(filp, arg);
4294 case FS_IOC_SETFSLABEL:
4295 return f2fs_ioc_setfslabel(filp, arg);
439dfb10
CY
4296 case F2FS_IOC_GET_COMPRESS_BLOCKS:
4297 return f2fs_get_compress_blocks(filp, arg);
ef8d563f
CY
4298 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
4299 return f2fs_release_compress_blocks(filp, arg);
c75488fb
CY
4300 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
4301 return f2fs_reserve_compress_blocks(filp, arg);
9af84648
DJ
4302 case F2FS_IOC_SEC_TRIM_FILE:
4303 return f2fs_sec_trim_file(filp, arg);
9e2a5f8c
DJ
4304 case F2FS_IOC_GET_COMPRESS_OPTION:
4305 return f2fs_ioc_get_compress_option(filp, arg);
e1e8debe
DJ
4306 case F2FS_IOC_SET_COMPRESS_OPTION:
4307 return f2fs_ioc_set_compress_option(filp, arg);
5fdb322f
DJ
4308 case F2FS_IOC_DECOMPRESS_FILE:
4309 return f2fs_ioc_decompress_file(filp, arg);
4310 case F2FS_IOC_COMPRESS_FILE:
4311 return f2fs_ioc_compress_file(filp, arg);
fbfa2cc5
JK
4312 default:
4313 return -ENOTTY;
4314 }
4315}
4316
34178b1b
CY
4317long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4318{
4319 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
4320 return -EIO;
4321 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(filp))))
4322 return -ENOSPC;
4323
4324 return __f2fs_ioctl(filp, cmd, arg);
4325}
4326
4c8ff709
CY
4327static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
4328{
4329 struct file *file = iocb->ki_filp;
4330 struct inode *inode = file_inode(file);
8b83ac81 4331 int ret;
4c8ff709
CY
4332
4333 if (!f2fs_is_compress_backend_ready(inode))
4334 return -EOPNOTSUPP;
4335
8b83ac81
CY
4336 ret = generic_file_read_iter(iocb, iter);
4337
4338 if (ret > 0)
4339 f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
4340
4341 return ret;
4c8ff709
CY
4342}
4343
fcc85a4d
JK
4344static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
4345{
b439b103
JK
4346 struct file *file = iocb->ki_filp;
4347 struct inode *inode = file_inode(file);
4348 ssize_t ret;
fcc85a4d 4349
126ce721
CY
4350 if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
4351 ret = -EIO;
4352 goto out;
4353 }
1f227a3e 4354
7bd29358
CY
4355 if (!f2fs_is_compress_backend_ready(inode)) {
4356 ret = -EOPNOTSUPP;
4357 goto out;
4358 }
4c8ff709 4359
cb8434f1
GR
4360 if (iocb->ki_flags & IOCB_NOWAIT) {
4361 if (!inode_trylock(inode)) {
126ce721
CY
4362 ret = -EAGAIN;
4363 goto out;
4364 }
cb8434f1 4365 } else {
b91050a8
HL
4366 inode_lock(inode);
4367 }
4368
e0fcd015
CY
4369 if (unlikely(IS_IMMUTABLE(inode))) {
4370 ret = -EPERM;
4371 goto unlock;
4372 }
4373
b439b103
JK
4374 ret = generic_write_checks(iocb, from);
4375 if (ret > 0) {
dc7a10dd
JK
4376 bool preallocated = false;
4377 size_t target_size = 0;
dc91de78 4378 int err;
a7de6086 4379
dc91de78
JK
4380 if (iov_iter_fault_in_readable(from, iov_iter_count(from)))
4381 set_inode_flag(inode, FI_NO_PREALLOC);
a7de6086 4382
d5d5f0c0
CX
4383 if ((iocb->ki_flags & IOCB_NOWAIT)) {
4384 if (!f2fs_overwrite_io(inode, iocb->ki_pos,
b91050a8 4385 iov_iter_count(from)) ||
d5d5f0c0
CX
4386 f2fs_has_inline_data(inode) ||
4387 f2fs_force_buffered_io(inode, iocb, from)) {
4388 clear_inode_flag(inode, FI_NO_PREALLOC);
4389 inode_unlock(inode);
4390 ret = -EAGAIN;
4391 goto out;
4392 }
47501f87
JK
4393 goto write;
4394 }
dc7a10dd 4395
47501f87
JK
4396 if (is_inode_flag_set(inode, FI_NO_PREALLOC))
4397 goto write;
4398
4399 if (iocb->ki_flags & IOCB_DIRECT) {
4400 /*
4401 * Convert inline data for Direct I/O before entering
4402 * f2fs_direct_IO().
4403 */
4404 err = f2fs_convert_inline_inode(inode);
4405 if (err)
4406 goto out_err;
4407 /*
4408 * If force_buffere_io() is true, we have to allocate
4409 * blocks all the time, since f2fs_direct_IO will fall
4410 * back to buffered IO.
4411 */
4412 if (!f2fs_force_buffered_io(inode, iocb, from) &&
4413 allow_outplace_dio(inode, iocb, from))
4414 goto write;
4415 }
4416 preallocated = true;
4417 target_size = iocb->ki_pos + iov_iter_count(from);
4418
4419 err = f2fs_preallocate_blocks(iocb, from);
4420 if (err) {
4421out_err:
4422 clear_inode_flag(inode, FI_NO_PREALLOC);
4423 inode_unlock(inode);
4424 ret = err;
4425 goto out;
9dfa1baf 4426 }
47501f87 4427write:
a7de6086 4428 ret = __generic_file_write_iter(iocb, from);
dc91de78 4429 clear_inode_flag(inode, FI_NO_PREALLOC);
b0af6d49 4430
dc7a10dd
JK
4431 /* if we couldn't write data, we should deallocate blocks. */
4432 if (preallocated && i_size_read(inode) < target_size)
4433 f2fs_truncate(inode);
4434
b0af6d49
CY
4435 if (ret > 0)
4436 f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
b439b103 4437 }
e0fcd015 4438unlock:
b439b103 4439 inode_unlock(inode);
126ce721
CY
4440out:
4441 trace_f2fs_file_write_iter(inode, iocb->ki_pos,
4442 iov_iter_count(from), ret);
e2592217
CH
4443 if (ret > 0)
4444 ret = generic_write_sync(iocb, ret);
b439b103 4445 return ret;
fcc85a4d
JK
4446}
4447
e9750824 4448#ifdef CONFIG_COMPAT
34178b1b
CY
4449struct compat_f2fs_gc_range {
4450 u32 sync;
4451 compat_u64 start;
4452 compat_u64 len;
4453};
4454#define F2FS_IOC32_GARBAGE_COLLECT_RANGE _IOW(F2FS_IOCTL_MAGIC, 11,\
4455 struct compat_f2fs_gc_range)
4456
4457static int f2fs_compat_ioc_gc_range(struct file *file, unsigned long arg)
4458{
4459 struct compat_f2fs_gc_range __user *urange;
4460 struct f2fs_gc_range range;
4461 int err;
4462
4463 urange = compat_ptr(arg);
4464 err = get_user(range.sync, &urange->sync);
4465 err |= get_user(range.start, &urange->start);
4466 err |= get_user(range.len, &urange->len);
4467 if (err)
4468 return -EFAULT;
4469
4470 return __f2fs_ioc_gc_range(file, &range);
4471}
4472
4473struct compat_f2fs_move_range {
4474 u32 dst_fd;
4475 compat_u64 pos_in;
4476 compat_u64 pos_out;
4477 compat_u64 len;
4478};
4479#define F2FS_IOC32_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
4480 struct compat_f2fs_move_range)
4481
4482static int f2fs_compat_ioc_move_range(struct file *file, unsigned long arg)
4483{
4484 struct compat_f2fs_move_range __user *urange;
4485 struct f2fs_move_range range;
4486 int err;
4487
4488 urange = compat_ptr(arg);
4489 err = get_user(range.dst_fd, &urange->dst_fd);
4490 err |= get_user(range.pos_in, &urange->pos_in);
4491 err |= get_user(range.pos_out, &urange->pos_out);
4492 err |= get_user(range.len, &urange->len);
4493 if (err)
4494 return -EFAULT;
4495
4496 return __f2fs_ioc_move_range(file, &range);
4497}
4498
e9750824
NJ
4499long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4500{
34178b1b
CY
4501 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(file)))))
4502 return -EIO;
4503 if (!f2fs_is_checkpoint_ready(F2FS_I_SB(file_inode(file))))
4504 return -ENOSPC;
4505
e9750824 4506 switch (cmd) {
3357af8f
EB
4507 case FS_IOC32_GETFLAGS:
4508 cmd = FS_IOC_GETFLAGS;
e9750824 4509 break;
3357af8f
EB
4510 case FS_IOC32_SETFLAGS:
4511 cmd = FS_IOC_SETFLAGS;
e9750824 4512 break;
3357af8f
EB
4513 case FS_IOC32_GETVERSION:
4514 cmd = FS_IOC_GETVERSION;
04ef4b62 4515 break;
34178b1b
CY
4516 case F2FS_IOC32_GARBAGE_COLLECT_RANGE:
4517 return f2fs_compat_ioc_gc_range(file, arg);
4518 case F2FS_IOC32_MOVE_RANGE:
4519 return f2fs_compat_ioc_move_range(file, arg);
04ef4b62
CY
4520 case F2FS_IOC_START_ATOMIC_WRITE:
4521 case F2FS_IOC_COMMIT_ATOMIC_WRITE:
4522 case F2FS_IOC_START_VOLATILE_WRITE:
4523 case F2FS_IOC_RELEASE_VOLATILE_WRITE:
4524 case F2FS_IOC_ABORT_VOLATILE_WRITE:
4525 case F2FS_IOC_SHUTDOWN:
314999dc 4526 case FITRIM:
3357af8f
EB
4527 case FS_IOC_SET_ENCRYPTION_POLICY:
4528 case FS_IOC_GET_ENCRYPTION_PWSALT:
4529 case FS_IOC_GET_ENCRYPTION_POLICY:
8ce589c7
EB
4530 case FS_IOC_GET_ENCRYPTION_POLICY_EX:
4531 case FS_IOC_ADD_ENCRYPTION_KEY:
4532 case FS_IOC_REMOVE_ENCRYPTION_KEY:
4533 case FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS:
4534 case FS_IOC_GET_ENCRYPTION_KEY_STATUS:
ee446e1a 4535 case FS_IOC_GET_ENCRYPTION_NONCE:
04ef4b62
CY
4536 case F2FS_IOC_GARBAGE_COLLECT:
4537 case F2FS_IOC_WRITE_CHECKPOINT:
4538 case F2FS_IOC_DEFRAGMENT:
e066b83c 4539 case F2FS_IOC_FLUSH_DEVICE:
e65ef207 4540 case F2FS_IOC_GET_FEATURES:
3357af8f
EB
4541 case FS_IOC_FSGETXATTR:
4542 case FS_IOC_FSSETXATTR:
1ad71a27
JK
4543 case F2FS_IOC_GET_PIN_FILE:
4544 case F2FS_IOC_SET_PIN_FILE:
c4020b2d 4545 case F2FS_IOC_PRECACHE_EXTENTS:
04f0b2ea 4546 case F2FS_IOC_RESIZE_FS:
95ae251f
EB
4547 case FS_IOC_ENABLE_VERITY:
4548 case FS_IOC_MEASURE_VERITY:
3357af8f
EB
4549 case FS_IOC_GETFSLABEL:
4550 case FS_IOC_SETFSLABEL:
439dfb10 4551 case F2FS_IOC_GET_COMPRESS_BLOCKS:
ef8d563f 4552 case F2FS_IOC_RELEASE_COMPRESS_BLOCKS:
c75488fb 4553 case F2FS_IOC_RESERVE_COMPRESS_BLOCKS:
9af84648 4554 case F2FS_IOC_SEC_TRIM_FILE:
9e2a5f8c 4555 case F2FS_IOC_GET_COMPRESS_OPTION:
e1e8debe 4556 case F2FS_IOC_SET_COMPRESS_OPTION:
5fdb322f
DJ
4557 case F2FS_IOC_DECOMPRESS_FILE:
4558 case F2FS_IOC_COMPRESS_FILE:
4dd6f977 4559 break;
e9750824
NJ
4560 default:
4561 return -ENOIOCTLCMD;
4562 }
34178b1b 4563 return __f2fs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
e9750824
NJ
4564}
4565#endif
4566
fbfa2cc5 4567const struct file_operations f2fs_file_operations = {
267378d4 4568 .llseek = f2fs_llseek,
4c8ff709 4569 .read_iter = f2fs_file_read_iter,
fcc85a4d
JK
4570 .write_iter = f2fs_file_write_iter,
4571 .open = f2fs_file_open,
12662234 4572 .release = f2fs_release_file,
fbfa2cc5 4573 .mmap = f2fs_file_mmap,
7a10f017 4574 .flush = f2fs_file_flush,
fbfa2cc5
JK
4575 .fsync = f2fs_sync_file,
4576 .fallocate = f2fs_fallocate,
4577 .unlocked_ioctl = f2fs_ioctl,
e9750824
NJ
4578#ifdef CONFIG_COMPAT
4579 .compat_ioctl = f2fs_compat_ioctl,
4580#endif
fbfa2cc5 4581 .splice_read = generic_file_splice_read,
8d020765 4582 .splice_write = iter_file_splice_write,
fbfa2cc5 4583};