]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/xfs/xfs_file.c
xfs: fail _dir_open when readahead fails
[mirror_ubuntu-zesty-kernel.git] / fs / xfs / xfs_file.c
CommitLineData
1da177e4 1/*
7b718769
NS
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
1da177e4 18#include "xfs.h"
dda35b8f 19#include "xfs_fs.h"
70a9883c 20#include "xfs_shared.h"
a4fbe6ab 21#include "xfs_format.h"
239880ef
DC
22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h"
1da177e4 24#include "xfs_mount.h"
57062787
DC
25#include "xfs_da_format.h"
26#include "xfs_da_btree.h"
1da177e4 27#include "xfs_inode.h"
239880ef 28#include "xfs_trans.h"
fd3200be 29#include "xfs_inode_item.h"
dda35b8f 30#include "xfs_bmap.h"
c24b5dfa 31#include "xfs_bmap_util.h"
1da177e4 32#include "xfs_error.h"
2b9ab5ab 33#include "xfs_dir2.h"
c24b5dfa 34#include "xfs_dir2_priv.h"
ddcd856d 35#include "xfs_ioctl.h"
dda35b8f 36#include "xfs_trace.h"
239880ef 37#include "xfs_log.h"
dc06f398 38#include "xfs_icache.h"
781355c6 39#include "xfs_pnfs.h"
68a9f5e7 40#include "xfs_iomap.h"
0613f16c 41#include "xfs_reflink.h"
1da177e4
LT
42
43#include <linux/dcache.h>
2fe17c10 44#include <linux/falloc.h>
d126d43f 45#include <linux/pagevec.h>
66114cad 46#include <linux/backing-dev.h>
1da177e4 47
f0f37e2f 48static const struct vm_operations_struct xfs_file_vm_ops;
1da177e4 49
dda35b8f 50/*
68a9f5e7
CH
51 * Clear the specified ranges to zero through either the pagecache or DAX.
52 * Holes and unwritten extents will be left as-is as they already are zeroed.
dda35b8f 53 */
ef9d8733 54int
7bb41db3 55xfs_zero_range(
68a9f5e7 56 struct xfs_inode *ip,
7bb41db3
CH
57 xfs_off_t pos,
58 xfs_off_t count,
59 bool *did_zero)
dda35b8f 60{
459f0fbc 61 return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
dda35b8f
CH
62}
63
8add71ca
CH
64int
65xfs_update_prealloc_flags(
66 struct xfs_inode *ip,
67 enum xfs_prealloc_flags flags)
68{
69 struct xfs_trans *tp;
70 int error;
71
253f4911
CH
72 error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
73 0, 0, 0, &tp);
74 if (error)
8add71ca 75 return error;
8add71ca
CH
76
77 xfs_ilock(ip, XFS_ILOCK_EXCL);
78 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
79
80 if (!(flags & XFS_PREALLOC_INVISIBLE)) {
c19b3b05
DC
81 VFS_I(ip)->i_mode &= ~S_ISUID;
82 if (VFS_I(ip)->i_mode & S_IXGRP)
83 VFS_I(ip)->i_mode &= ~S_ISGID;
8add71ca
CH
84 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
85 }
86
87 if (flags & XFS_PREALLOC_SET)
88 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
89 if (flags & XFS_PREALLOC_CLEAR)
90 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
91
92 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
93 if (flags & XFS_PREALLOC_SYNC)
94 xfs_trans_set_sync(tp);
70393313 95 return xfs_trans_commit(tp);
8add71ca
CH
96}
97
1da2f2db
CH
98/*
99 * Fsync operations on directories are much simpler than on regular files,
100 * as there is no file data to flush, and thus also no need for explicit
101 * cache flush operations, and there are no non-transaction metadata updates
102 * on directories either.
103 */
104STATIC int
105xfs_dir_fsync(
106 struct file *file,
107 loff_t start,
108 loff_t end,
109 int datasync)
110{
111 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
112 struct xfs_mount *mp = ip->i_mount;
113 xfs_lsn_t lsn = 0;
114
115 trace_xfs_dir_fsync(ip);
116
117 xfs_ilock(ip, XFS_ILOCK_SHARED);
118 if (xfs_ipincount(ip))
119 lsn = ip->i_itemp->ili_last_lsn;
120 xfs_iunlock(ip, XFS_ILOCK_SHARED);
121
122 if (!lsn)
123 return 0;
2451337d 124 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
1da2f2db
CH
125}
126
fd3200be
CH
127STATIC int
128xfs_file_fsync(
129 struct file *file,
02c24a82
JB
130 loff_t start,
131 loff_t end,
fd3200be
CH
132 int datasync)
133{
7ea80859
CH
134 struct inode *inode = file->f_mapping->host;
135 struct xfs_inode *ip = XFS_I(inode);
a27a263b 136 struct xfs_mount *mp = ip->i_mount;
fd3200be
CH
137 int error = 0;
138 int log_flushed = 0;
b1037058 139 xfs_lsn_t lsn = 0;
fd3200be 140
cca28fb8 141 trace_xfs_file_fsync(ip);
fd3200be 142
02c24a82
JB
143 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
144 if (error)
145 return error;
146
a27a263b 147 if (XFS_FORCED_SHUTDOWN(mp))
b474c7ae 148 return -EIO;
fd3200be
CH
149
150 xfs_iflags_clear(ip, XFS_ITRUNCATED);
151
2291dab2
DC
152 /*
153 * If we have an RT and/or log subvolume we need to make sure to flush
154 * the write cache the device used for file data first. This is to
155 * ensure newly written file data make it to disk before logging the new
156 * inode size in case of an extending write.
157 */
158 if (XFS_IS_REALTIME_INODE(ip))
159 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
160 else if (mp->m_logdev_targp != mp->m_ddev_targp)
161 xfs_blkdev_issue_flush(mp->m_ddev_targp);
a27a263b 162
fd3200be 163 /*
fc0561ce
DC
164 * All metadata updates are logged, which means that we just have to
165 * flush the log up to the latest LSN that touched the inode. If we have
166 * concurrent fsync/fdatasync() calls, we need them to all block on the
167 * log force before we clear the ili_fsync_fields field. This ensures
168 * that we don't get a racing sync operation that does not wait for the
169 * metadata to hit the journal before returning. If we race with
170 * clearing the ili_fsync_fields, then all that will happen is the log
171 * force will do nothing as the lsn will already be on disk. We can't
172 * race with setting ili_fsync_fields because that is done under
173 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
174 * until after the ili_fsync_fields is cleared.
fd3200be
CH
175 */
176 xfs_ilock(ip, XFS_ILOCK_SHARED);
8f639dde
CH
177 if (xfs_ipincount(ip)) {
178 if (!datasync ||
fc0561ce 179 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
8f639dde
CH
180 lsn = ip->i_itemp->ili_last_lsn;
181 }
fd3200be 182
fc0561ce 183 if (lsn) {
b1037058 184 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
fc0561ce
DC
185 ip->i_itemp->ili_fsync_fields = 0;
186 }
187 xfs_iunlock(ip, XFS_ILOCK_SHARED);
b1037058 188
a27a263b
CH
189 /*
190 * If we only have a single device, and the log force about was
191 * a no-op we might have to flush the data device cache here.
192 * This can only happen for fdatasync/O_DSYNC if we were overwriting
193 * an already allocated file and thus do not have any metadata to
194 * commit.
195 */
2291dab2
DC
196 if (!log_flushed && !XFS_IS_REALTIME_INODE(ip) &&
197 mp->m_logdev_targp == mp->m_ddev_targp)
a27a263b 198 xfs_blkdev_issue_flush(mp->m_ddev_targp);
fd3200be 199
2451337d 200 return error;
fd3200be
CH
201}
202
00258e36 203STATIC ssize_t
bbc5a740 204xfs_file_dio_aio_read(
dda35b8f 205 struct kiocb *iocb,
b4f5d2c6 206 struct iov_iter *to)
dda35b8f 207{
acdda3aa 208 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
bbc5a740 209 size_t count = iov_iter_count(to);
acdda3aa 210 ssize_t ret;
dda35b8f 211
bbc5a740 212 trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
dda35b8f 213
f1285ff0
CH
214 if (!count)
215 return 0; /* skip atime */
dda35b8f 216
a447d7cd
CH
217 file_accessed(iocb->ki_filp);
218
65523218 219 xfs_ilock(ip, XFS_IOLOCK_SHARED);
acdda3aa 220 ret = iomap_dio_rw(iocb, to, &xfs_iomap_ops, NULL);
65523218 221 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
acdda3aa 222
16d4d435
CH
223 return ret;
224}
225
f021bd07 226static noinline ssize_t
16d4d435
CH
227xfs_file_dax_read(
228 struct kiocb *iocb,
229 struct iov_iter *to)
230{
6c31f495 231 struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host);
16d4d435
CH
232 size_t count = iov_iter_count(to);
233 ssize_t ret = 0;
234
235 trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
236
237 if (!count)
238 return 0; /* skip atime */
239
65523218 240 xfs_ilock(ip, XFS_IOLOCK_SHARED);
11c59c92 241 ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops);
65523218 242 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
bbc5a740 243
f1285ff0 244 file_accessed(iocb->ki_filp);
bbc5a740
CH
245 return ret;
246}
247
248STATIC ssize_t
249xfs_file_buffered_aio_read(
250 struct kiocb *iocb,
251 struct iov_iter *to)
252{
253 struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
254 ssize_t ret;
255
256 trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
dda35b8f 257
65523218 258 xfs_ilock(ip, XFS_IOLOCK_SHARED);
b4f5d2c6 259 ret = generic_file_read_iter(iocb, to);
65523218 260 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
bbc5a740
CH
261
262 return ret;
263}
264
265STATIC ssize_t
266xfs_file_read_iter(
267 struct kiocb *iocb,
268 struct iov_iter *to)
269{
16d4d435
CH
270 struct inode *inode = file_inode(iocb->ki_filp);
271 struct xfs_mount *mp = XFS_I(inode)->i_mount;
bbc5a740
CH
272 ssize_t ret = 0;
273
274 XFS_STATS_INC(mp, xs_read_calls);
275
276 if (XFS_FORCED_SHUTDOWN(mp))
277 return -EIO;
278
16d4d435
CH
279 if (IS_DAX(inode))
280 ret = xfs_file_dax_read(iocb, to);
281 else if (iocb->ki_flags & IOCB_DIRECT)
bbc5a740 282 ret = xfs_file_dio_aio_read(iocb, to);
3176c3e0 283 else
bbc5a740 284 ret = xfs_file_buffered_aio_read(iocb, to);
dda35b8f 285
dda35b8f 286 if (ret > 0)
ff6d6af2 287 XFS_STATS_ADD(mp, xs_read_bytes, ret);
dda35b8f
CH
288 return ret;
289}
290
dda35b8f 291/*
193aec10
CH
292 * Zero any on disk space between the current EOF and the new, larger EOF.
293 *
294 * This handles the normal case of zeroing the remainder of the last block in
295 * the file and the unusual case of zeroing blocks out beyond the size of the
296 * file. This second case only happens with fixed size extents and when the
297 * system crashes before the inode size was updated but after blocks were
298 * allocated.
299 *
300 * Expects the iolock to be held exclusive, and will take the ilock internally.
dda35b8f 301 */
dda35b8f
CH
302int /* error (positive) */
303xfs_zero_eof(
193aec10
CH
304 struct xfs_inode *ip,
305 xfs_off_t offset, /* starting I/O offset */
5885ebda
DC
306 xfs_fsize_t isize, /* current inode size */
307 bool *did_zeroing)
dda35b8f 308{
193aec10 309 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
dda35b8f
CH
310 ASSERT(offset > isize);
311
0a50f162 312 trace_xfs_zero_eof(ip, isize, offset - isize);
570b6211 313 return xfs_zero_range(ip, isize, offset - isize, did_zeroing);
dda35b8f
CH
314}
315
4d8d1581
DC
316/*
317 * Common pre-write limit and setup checks.
318 *
5bf1f262
CH
319 * Called with the iolocked held either shared and exclusive according to
320 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
321 * if called for a direct write beyond i_size.
4d8d1581
DC
322 */
323STATIC ssize_t
324xfs_file_aio_write_checks(
99733fa3
AV
325 struct kiocb *iocb,
326 struct iov_iter *from,
4d8d1581
DC
327 int *iolock)
328{
99733fa3 329 struct file *file = iocb->ki_filp;
4d8d1581
DC
330 struct inode *inode = file->f_mapping->host;
331 struct xfs_inode *ip = XFS_I(inode);
3309dd04 332 ssize_t error = 0;
99733fa3 333 size_t count = iov_iter_count(from);
3136e8bb 334 bool drained_dio = false;
4d8d1581 335
7271d243 336restart:
3309dd04
AV
337 error = generic_write_checks(iocb, from);
338 if (error <= 0)
4d8d1581 339 return error;
4d8d1581 340
65523218 341 error = xfs_break_layouts(inode, iolock);
781355c6
CH
342 if (error)
343 return error;
344
65523218
CH
345 /*
346 * For changing security info in file_remove_privs() we need i_rwsem
347 * exclusively.
348 */
a6de82ca 349 if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) {
65523218 350 xfs_iunlock(ip, *iolock);
a6de82ca 351 *iolock = XFS_IOLOCK_EXCL;
65523218 352 xfs_ilock(ip, *iolock);
a6de82ca
JK
353 goto restart;
354 }
4d8d1581
DC
355 /*
356 * If the offset is beyond the size of the file, we need to zero any
357 * blocks that fall between the existing EOF and the start of this
2813d682 358 * write. If zeroing is needed and we are currently holding the
467f7899
CH
359 * iolock shared, we need to update it to exclusive which implies
360 * having to redo all checks before.
b9d59846
DC
361 *
362 * We need to serialise against EOF updates that occur in IO
363 * completions here. We want to make sure that nobody is changing the
364 * size while we do this check until we have placed an IO barrier (i.e.
365 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
366 * The spinlock effectively forms a memory barrier once we have the
367 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
368 * and hence be able to correctly determine if we need to run zeroing.
4d8d1581 369 */
b9d59846 370 spin_lock(&ip->i_flags_lock);
99733fa3 371 if (iocb->ki_pos > i_size_read(inode)) {
5885ebda
DC
372 bool zero = false;
373
b9d59846 374 spin_unlock(&ip->i_flags_lock);
3136e8bb
BF
375 if (!drained_dio) {
376 if (*iolock == XFS_IOLOCK_SHARED) {
65523218 377 xfs_iunlock(ip, *iolock);
3136e8bb 378 *iolock = XFS_IOLOCK_EXCL;
65523218 379 xfs_ilock(ip, *iolock);
3136e8bb
BF
380 iov_iter_reexpand(from, count);
381 }
40c63fbc
DC
382 /*
383 * We now have an IO submission barrier in place, but
384 * AIO can do EOF updates during IO completion and hence
385 * we now need to wait for all of them to drain. Non-AIO
386 * DIO will have drained before we are given the
387 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
388 * no-op.
389 */
390 inode_dio_wait(inode);
3136e8bb 391 drained_dio = true;
7271d243
DC
392 goto restart;
393 }
99733fa3 394 error = xfs_zero_eof(ip, iocb->ki_pos, i_size_read(inode), &zero);
467f7899
CH
395 if (error)
396 return error;
b9d59846
DC
397 } else
398 spin_unlock(&ip->i_flags_lock);
4d8d1581 399
8a9c9980
CH
400 /*
401 * Updating the timestamps will grab the ilock again from
402 * xfs_fs_dirty_inode, so we have to call it after dropping the
403 * lock above. Eventually we should look into a way to avoid
404 * the pointless lock roundtrip.
405 */
c3b2da31
JB
406 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
407 error = file_update_time(file);
408 if (error)
409 return error;
410 }
8a9c9980 411
4d8d1581
DC
412 /*
413 * If we're writing the file then make sure to clear the setuid and
414 * setgid bits if the process is not being run by root. This keeps
415 * people from modifying setuid and setgid binaries.
416 */
a6de82ca
JK
417 if (!IS_NOSEC(inode))
418 return file_remove_privs(file);
419 return 0;
4d8d1581
DC
420}
421
acdda3aa
CH
422static int
423xfs_dio_write_end_io(
424 struct kiocb *iocb,
425 ssize_t size,
426 unsigned flags)
427{
428 struct inode *inode = file_inode(iocb->ki_filp);
429 struct xfs_inode *ip = XFS_I(inode);
430 loff_t offset = iocb->ki_pos;
431 bool update_size = false;
432 int error = 0;
433
434 trace_xfs_end_io_direct_write(ip, offset, size);
435
436 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
437 return -EIO;
438
439 if (size <= 0)
440 return size;
441
442 /*
443 * We need to update the in-core inode size here so that we don't end up
444 * with the on-disk inode size being outside the in-core inode size. We
445 * have no other method of updating EOF for AIO, so always do it here
446 * if necessary.
447 *
448 * We need to lock the test/set EOF update as we can be racing with
449 * other IO completions here to update the EOF. Failing to serialise
450 * here can result in EOF moving backwards and Bad Things Happen when
451 * that occurs.
452 */
453 spin_lock(&ip->i_flags_lock);
454 if (offset + size > i_size_read(inode)) {
455 i_size_write(inode, offset + size);
456 update_size = true;
457 }
458 spin_unlock(&ip->i_flags_lock);
459
460 if (flags & IOMAP_DIO_COW) {
461 error = xfs_reflink_end_cow(ip, offset, size);
462 if (error)
463 return error;
464 }
465
466 if (flags & IOMAP_DIO_UNWRITTEN)
467 error = xfs_iomap_write_unwritten(ip, offset, size);
468 else if (update_size)
469 error = xfs_setfilesize(ip, offset, size);
470
471 return error;
472}
473
f0d26e86
DC
474/*
475 * xfs_file_dio_aio_write - handle direct IO writes
476 *
477 * Lock the inode appropriately to prepare for and issue a direct IO write.
eda77982 478 * By separating it from the buffered write path we remove all the tricky to
f0d26e86
DC
479 * follow locking changes and looping.
480 *
eda77982
DC
481 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
482 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
483 * pages are flushed out.
484 *
485 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
486 * allowing them to be done in parallel with reads and other direct IO writes.
487 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
488 * needs to do sub-block zeroing and that requires serialisation against other
489 * direct IOs to the same block. In this case we need to serialise the
490 * submission of the unaligned IOs so that we don't get racing block zeroing in
491 * the dio layer. To avoid the problem with aio, we also need to wait for
492 * outstanding IOs to complete so that unwritten extent conversion is completed
493 * before we try to map the overlapping block. This is currently implemented by
4a06fd26 494 * hitting it with a big hammer (i.e. inode_dio_wait()).
eda77982 495 *
f0d26e86
DC
496 * Returns with locks held indicated by @iolock and errors indicated by
497 * negative return values.
498 */
499STATIC ssize_t
500xfs_file_dio_aio_write(
501 struct kiocb *iocb,
b3188919 502 struct iov_iter *from)
f0d26e86
DC
503{
504 struct file *file = iocb->ki_filp;
505 struct address_space *mapping = file->f_mapping;
506 struct inode *inode = mapping->host;
507 struct xfs_inode *ip = XFS_I(inode);
508 struct xfs_mount *mp = ip->i_mount;
509 ssize_t ret = 0;
eda77982 510 int unaligned_io = 0;
d0606464 511 int iolock;
b3188919 512 size_t count = iov_iter_count(from);
acdda3aa 513 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
f0d26e86
DC
514 mp->m_rtdev_targp : mp->m_ddev_targp;
515
7c71ee78 516 /* DIO must be aligned to device logical sector size */
16d4d435 517 if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
b474c7ae 518 return -EINVAL;
f0d26e86 519
7271d243 520 /*
0ee7a3f6
CH
521 * Don't take the exclusive iolock here unless the I/O is unaligned to
522 * the file system block size. We don't need to consider the EOF
523 * extension case here because xfs_file_aio_write_checks() will relock
524 * the inode as necessary for EOF zeroing cases and fill out the new
525 * inode size as appropriate.
7271d243 526 */
0ee7a3f6
CH
527 if ((iocb->ki_pos & mp->m_blockmask) ||
528 ((iocb->ki_pos + count) & mp->m_blockmask)) {
529 unaligned_io = 1;
d0606464 530 iolock = XFS_IOLOCK_EXCL;
0ee7a3f6 531 } else {
d0606464 532 iolock = XFS_IOLOCK_SHARED;
c58cb165 533 }
f0d26e86 534
65523218 535 xfs_ilock(ip, iolock);
0ee7a3f6 536
99733fa3 537 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
4d8d1581 538 if (ret)
d0606464 539 goto out;
99733fa3 540 count = iov_iter_count(from);
f0d26e86 541
eda77982
DC
542 /*
543 * If we are doing unaligned IO, wait for all other IO to drain,
0ee7a3f6
CH
544 * otherwise demote the lock if we had to take the exclusive lock
545 * for other reasons in xfs_file_aio_write_checks.
eda77982
DC
546 */
547 if (unaligned_io)
4a06fd26 548 inode_dio_wait(inode);
d0606464 549 else if (iolock == XFS_IOLOCK_EXCL) {
65523218 550 xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
d0606464 551 iolock = XFS_IOLOCK_SHARED;
f0d26e86
DC
552 }
553
3176c3e0 554 trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
f0d26e86 555
0613f16c
DW
556 /* If this is a block-aligned directio CoW, remap immediately. */
557 if (xfs_is_reflink_inode(ip) && !unaligned_io) {
558 ret = xfs_reflink_allocate_cow_range(ip, iocb->ki_pos, count);
559 if (ret)
560 goto out;
561 }
562
acdda3aa 563 ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
d0606464 564out:
65523218 565 xfs_iunlock(ip, iolock);
d0606464 566
6b698ede 567 /*
16d4d435
CH
568 * No fallback to buffered IO on errors for XFS, direct IO will either
569 * complete fully or fail.
6b698ede 570 */
16d4d435
CH
571 ASSERT(ret < 0 || ret == count);
572 return ret;
573}
574
f021bd07 575static noinline ssize_t
16d4d435
CH
576xfs_file_dax_write(
577 struct kiocb *iocb,
578 struct iov_iter *from)
579{
6c31f495 580 struct inode *inode = iocb->ki_filp->f_mapping->host;
16d4d435 581 struct xfs_inode *ip = XFS_I(inode);
17879e8f 582 int iolock = XFS_IOLOCK_EXCL;
6c31f495
CH
583 ssize_t ret, error = 0;
584 size_t count;
585 loff_t pos;
16d4d435 586
65523218 587 xfs_ilock(ip, iolock);
16d4d435
CH
588 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
589 if (ret)
590 goto out;
591
6c31f495
CH
592 pos = iocb->ki_pos;
593 count = iov_iter_count(from);
8b2180b3 594
6c31f495 595 trace_xfs_file_dax_write(ip, count, pos);
11c59c92 596 ret = dax_iomap_rw(iocb, from, &xfs_iomap_ops);
6c31f495
CH
597 if (ret > 0 && iocb->ki_pos > i_size_read(inode)) {
598 i_size_write(inode, iocb->ki_pos);
599 error = xfs_setfilesize(ip, pos, ret);
16d4d435 600 }
16d4d435 601out:
65523218 602 xfs_iunlock(ip, iolock);
6c31f495 603 return error ? error : ret;
f0d26e86
DC
604}
605
00258e36 606STATIC ssize_t
637bbc75 607xfs_file_buffered_aio_write(
dda35b8f 608 struct kiocb *iocb,
b3188919 609 struct iov_iter *from)
dda35b8f
CH
610{
611 struct file *file = iocb->ki_filp;
612 struct address_space *mapping = file->f_mapping;
613 struct inode *inode = mapping->host;
00258e36 614 struct xfs_inode *ip = XFS_I(inode);
637bbc75
DC
615 ssize_t ret;
616 int enospc = 0;
3f9fa4ab 617 int iolock;
dda35b8f 618
3f9fa4ab
BF
619write_retry:
620 iolock = XFS_IOLOCK_EXCL;
65523218 621 xfs_ilock(ip, iolock);
dda35b8f 622
99733fa3 623 ret = xfs_file_aio_write_checks(iocb, from, &iolock);
4d8d1581 624 if (ret)
d0606464 625 goto out;
dda35b8f
CH
626
627 /* We can write back this queue in page reclaim */
de1414a6 628 current->backing_dev_info = inode_to_bdi(inode);
dda35b8f 629
3176c3e0 630 trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
68a9f5e7 631 ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
0a64bc2c 632 if (likely(ret >= 0))
99733fa3 633 iocb->ki_pos += ret;
dc06f398 634
637bbc75 635 /*
dc06f398
BF
636 * If we hit a space limit, try to free up some lingering preallocated
637 * space before returning an error. In the case of ENOSPC, first try to
638 * write back all dirty inodes to free up some of the excess reserved
639 * metadata space. This reduces the chances that the eofblocks scan
640 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
641 * also behaves as a filter to prevent too many eofblocks scans from
642 * running at the same time.
637bbc75 643 */
dc06f398 644 if (ret == -EDQUOT && !enospc) {
3f9fa4ab 645 xfs_iunlock(ip, iolock);
dc06f398
BF
646 enospc = xfs_inode_free_quota_eofblocks(ip);
647 if (enospc)
648 goto write_retry;
83104d44
DW
649 enospc = xfs_inode_free_quota_cowblocks(ip);
650 if (enospc)
651 goto write_retry;
3f9fa4ab 652 iolock = 0;
dc06f398
BF
653 } else if (ret == -ENOSPC && !enospc) {
654 struct xfs_eofblocks eofb = {0};
655
637bbc75 656 enospc = 1;
9aa05000 657 xfs_flush_inodes(ip->i_mount);
3f9fa4ab
BF
658
659 xfs_iunlock(ip, iolock);
dc06f398
BF
660 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
661 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
9aa05000 662 goto write_retry;
dda35b8f 663 }
d0606464 664
dda35b8f 665 current->backing_dev_info = NULL;
d0606464 666out:
3f9fa4ab
BF
667 if (iolock)
668 xfs_iunlock(ip, iolock);
637bbc75
DC
669 return ret;
670}
671
672STATIC ssize_t
bf97f3bc 673xfs_file_write_iter(
637bbc75 674 struct kiocb *iocb,
bf97f3bc 675 struct iov_iter *from)
637bbc75
DC
676{
677 struct file *file = iocb->ki_filp;
678 struct address_space *mapping = file->f_mapping;
679 struct inode *inode = mapping->host;
680 struct xfs_inode *ip = XFS_I(inode);
681 ssize_t ret;
bf97f3bc 682 size_t ocount = iov_iter_count(from);
637bbc75 683
ff6d6af2 684 XFS_STATS_INC(ip->i_mount, xs_write_calls);
637bbc75 685
637bbc75
DC
686 if (ocount == 0)
687 return 0;
688
bf97f3bc
AV
689 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
690 return -EIO;
637bbc75 691
16d4d435
CH
692 if (IS_DAX(inode))
693 ret = xfs_file_dax_write(iocb, from);
0613f16c
DW
694 else if (iocb->ki_flags & IOCB_DIRECT) {
695 /*
696 * Allow a directio write to fall back to a buffered
697 * write *only* in the case that we're doing a reflink
698 * CoW. In all other directio scenarios we do not
699 * allow an operation to fall back to buffered mode.
700 */
bf97f3bc 701 ret = xfs_file_dio_aio_write(iocb, from);
0613f16c
DW
702 if (ret == -EREMCHG)
703 goto buffered;
704 } else {
705buffered:
bf97f3bc 706 ret = xfs_file_buffered_aio_write(iocb, from);
0613f16c 707 }
dda35b8f 708
d0606464 709 if (ret > 0) {
ff6d6af2 710 XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret);
dda35b8f 711
d0606464 712 /* Handle various SYNC-type writes */
e2592217 713 ret = generic_write_sync(iocb, ret);
dda35b8f 714 }
a363f0c2 715 return ret;
dda35b8f
CH
716}
717
a904b1ca
NJ
718#define XFS_FALLOC_FL_SUPPORTED \
719 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
720 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
98cc2db5 721 FALLOC_FL_INSERT_RANGE | FALLOC_FL_UNSHARE_RANGE)
a904b1ca 722
2fe17c10
CH
723STATIC long
724xfs_file_fallocate(
83aee9e4
CH
725 struct file *file,
726 int mode,
727 loff_t offset,
728 loff_t len)
2fe17c10 729{
83aee9e4
CH
730 struct inode *inode = file_inode(file);
731 struct xfs_inode *ip = XFS_I(inode);
83aee9e4 732 long error;
8add71ca 733 enum xfs_prealloc_flags flags = 0;
781355c6 734 uint iolock = XFS_IOLOCK_EXCL;
83aee9e4 735 loff_t new_size = 0;
a904b1ca 736 bool do_file_insert = 0;
2fe17c10 737
83aee9e4
CH
738 if (!S_ISREG(inode->i_mode))
739 return -EINVAL;
a904b1ca 740 if (mode & ~XFS_FALLOC_FL_SUPPORTED)
2fe17c10
CH
741 return -EOPNOTSUPP;
742
781355c6 743 xfs_ilock(ip, iolock);
65523218 744 error = xfs_break_layouts(inode, &iolock);
781355c6
CH
745 if (error)
746 goto out_unlock;
747
e8e9ad42
DC
748 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
749 iolock |= XFS_MMAPLOCK_EXCL;
750
83aee9e4
CH
751 if (mode & FALLOC_FL_PUNCH_HOLE) {
752 error = xfs_free_file_space(ip, offset, len);
753 if (error)
754 goto out_unlock;
e1d8fb88
NJ
755 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
756 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
757
758 if (offset & blksize_mask || len & blksize_mask) {
2451337d 759 error = -EINVAL;
e1d8fb88
NJ
760 goto out_unlock;
761 }
762
23fffa92
LC
763 /*
764 * There is no need to overlap collapse range with EOF,
765 * in which case it is effectively a truncate operation
766 */
767 if (offset + len >= i_size_read(inode)) {
2451337d 768 error = -EINVAL;
23fffa92
LC
769 goto out_unlock;
770 }
771
e1d8fb88
NJ
772 new_size = i_size_read(inode) - len;
773
774 error = xfs_collapse_file_space(ip, offset, len);
775 if (error)
776 goto out_unlock;
a904b1ca
NJ
777 } else if (mode & FALLOC_FL_INSERT_RANGE) {
778 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
779
780 new_size = i_size_read(inode) + len;
781 if (offset & blksize_mask || len & blksize_mask) {
782 error = -EINVAL;
783 goto out_unlock;
784 }
785
786 /* check the new inode size does not wrap through zero */
787 if (new_size > inode->i_sb->s_maxbytes) {
788 error = -EFBIG;
789 goto out_unlock;
790 }
791
792 /* Offset should be less than i_size */
793 if (offset >= i_size_read(inode)) {
794 error = -EINVAL;
795 goto out_unlock;
796 }
797 do_file_insert = 1;
83aee9e4 798 } else {
8add71ca
CH
799 flags |= XFS_PREALLOC_SET;
800
83aee9e4
CH
801 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
802 offset + len > i_size_read(inode)) {
803 new_size = offset + len;
2451337d 804 error = inode_newsize_ok(inode, new_size);
83aee9e4
CH
805 if (error)
806 goto out_unlock;
807 }
2fe17c10 808
376ba313
LC
809 if (mode & FALLOC_FL_ZERO_RANGE)
810 error = xfs_zero_file_space(ip, offset, len);
98cc2db5
DW
811 else {
812 if (mode & FALLOC_FL_UNSHARE_RANGE) {
813 error = xfs_reflink_unshare(ip, offset, len);
814 if (error)
815 goto out_unlock;
816 }
376ba313
LC
817 error = xfs_alloc_file_space(ip, offset, len,
818 XFS_BMAPI_PREALLOC);
98cc2db5 819 }
2fe17c10
CH
820 if (error)
821 goto out_unlock;
822 }
823
83aee9e4 824 if (file->f_flags & O_DSYNC)
8add71ca
CH
825 flags |= XFS_PREALLOC_SYNC;
826
827 error = xfs_update_prealloc_flags(ip, flags);
2fe17c10
CH
828 if (error)
829 goto out_unlock;
830
831 /* Change file size if needed */
832 if (new_size) {
833 struct iattr iattr;
834
835 iattr.ia_valid = ATTR_SIZE;
836 iattr.ia_size = new_size;
69bca807 837 error = xfs_vn_setattr_size(file_dentry(file), &iattr);
a904b1ca
NJ
838 if (error)
839 goto out_unlock;
2fe17c10
CH
840 }
841
a904b1ca
NJ
842 /*
843 * Perform hole insertion now that the file size has been
844 * updated so that if we crash during the operation we don't
845 * leave shifted extents past EOF and hence losing access to
846 * the data that is contained within them.
847 */
848 if (do_file_insert)
849 error = xfs_insert_file_space(ip, offset, len);
850
2fe17c10 851out_unlock:
781355c6 852 xfs_iunlock(ip, iolock);
2451337d 853 return error;
2fe17c10
CH
854}
855
9fe26045
DW
856STATIC int
857xfs_file_clone_range(
858 struct file *file_in,
859 loff_t pos_in,
860 struct file *file_out,
861 loff_t pos_out,
862 u64 len)
863{
5faaf4fa 864 return xfs_reflink_remap_range(file_in, pos_in, file_out, pos_out,
cc714660
DW
865 len, false);
866}
867
cc714660
DW
868STATIC ssize_t
869xfs_file_dedupe_range(
870 struct file *src_file,
871 u64 loff,
872 u64 len,
873 struct file *dst_file,
874 u64 dst_loff)
875{
876 int error;
877
5faaf4fa 878 error = xfs_reflink_remap_range(src_file, loff, dst_file, dst_loff,
cc714660
DW
879 len, true);
880 if (error)
881 return error;
882 return len;
9fe26045 883}
2fe17c10 884
1da177e4 885STATIC int
3562fd45 886xfs_file_open(
1da177e4 887 struct inode *inode,
f999a5bf 888 struct file *file)
1da177e4 889{
f999a5bf 890 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
1da177e4 891 return -EFBIG;
f999a5bf
CH
892 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
893 return -EIO;
894 return 0;
895}
896
897STATIC int
898xfs_dir_open(
899 struct inode *inode,
900 struct file *file)
901{
902 struct xfs_inode *ip = XFS_I(inode);
903 int mode;
904 int error;
905
906 error = xfs_file_open(inode, file);
907 if (error)
908 return error;
909
910 /*
911 * If there are any blocks, read-ahead block 0 as we're almost
912 * certain to have the next operation be a read there.
913 */
309ecac8 914 mode = xfs_ilock_data_map_shared(ip);
f999a5bf 915 if (ip->i_d.di_nextents > 0)
84a3fe64 916 error = xfs_dir3_data_readahead(ip, 0, -1);
f999a5bf 917 xfs_iunlock(ip, mode);
84a3fe64 918 return error;
1da177e4
LT
919}
920
1da177e4 921STATIC int
3562fd45 922xfs_file_release(
1da177e4
LT
923 struct inode *inode,
924 struct file *filp)
925{
2451337d 926 return xfs_release(XFS_I(inode));
1da177e4
LT
927}
928
1da177e4 929STATIC int
3562fd45 930xfs_file_readdir(
b8227554
AV
931 struct file *file,
932 struct dir_context *ctx)
1da177e4 933{
b8227554 934 struct inode *inode = file_inode(file);
739bfb2a 935 xfs_inode_t *ip = XFS_I(inode);
051e7cd4
CH
936 size_t bufsize;
937
938 /*
939 * The Linux API doesn't pass down the total size of the buffer
940 * we read into down to the filesystem. With the filldir concept
941 * it's not needed for correct information, but the XFS dir2 leaf
942 * code wants an estimate of the buffer size to calculate it's
943 * readahead window and size the buffers used for mapping to
944 * physical blocks.
945 *
946 * Try to give it an estimate that's good enough, maybe at some
947 * point we can change the ->readdir prototype to include the
a9cc799e 948 * buffer size. For now we use the current glibc buffer size.
051e7cd4 949 */
a9cc799e 950 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
051e7cd4 951
8300475e 952 return xfs_readdir(ip, ctx, bufsize);
1da177e4
LT
953}
954
d126d43f
JL
955/*
956 * This type is designed to indicate the type of offset we would like
49c69591 957 * to search from page cache for xfs_seek_hole_data().
d126d43f
JL
958 */
959enum {
960 HOLE_OFF = 0,
961 DATA_OFF,
962};
963
964/*
965 * Lookup the desired type of offset from the given page.
966 *
967 * On success, return true and the offset argument will point to the
968 * start of the region that was found. Otherwise this function will
969 * return false and keep the offset argument unchanged.
970 */
971STATIC bool
972xfs_lookup_buffer_offset(
973 struct page *page,
974 loff_t *offset,
975 unsigned int type)
976{
977 loff_t lastoff = page_offset(page);
978 bool found = false;
979 struct buffer_head *bh, *head;
980
981 bh = head = page_buffers(page);
982 do {
983 /*
984 * Unwritten extents that have data in the page
985 * cache covering them can be identified by the
986 * BH_Unwritten state flag. Pages with multiple
987 * buffers might have a mix of holes, data and
988 * unwritten extents - any buffer with valid
989 * data in it should have BH_Uptodate flag set
990 * on it.
991 */
992 if (buffer_unwritten(bh) ||
993 buffer_uptodate(bh)) {
994 if (type == DATA_OFF)
995 found = true;
996 } else {
997 if (type == HOLE_OFF)
998 found = true;
999 }
1000
1001 if (found) {
1002 *offset = lastoff;
1003 break;
1004 }
1005 lastoff += bh->b_size;
1006 } while ((bh = bh->b_this_page) != head);
1007
1008 return found;
1009}
1010
1011/*
1012 * This routine is called to find out and return a data or hole offset
1013 * from the page cache for unwritten extents according to the desired
49c69591 1014 * type for xfs_seek_hole_data().
d126d43f
JL
1015 *
1016 * The argument offset is used to tell where we start to search from the
1017 * page cache. Map is used to figure out the end points of the range to
1018 * lookup pages.
1019 *
1020 * Return true if the desired type of offset was found, and the argument
1021 * offset is filled with that address. Otherwise, return false and keep
1022 * offset unchanged.
1023 */
1024STATIC bool
1025xfs_find_get_desired_pgoff(
1026 struct inode *inode,
1027 struct xfs_bmbt_irec *map,
1028 unsigned int type,
1029 loff_t *offset)
1030{
1031 struct xfs_inode *ip = XFS_I(inode);
1032 struct xfs_mount *mp = ip->i_mount;
1033 struct pagevec pvec;
1034 pgoff_t index;
1035 pgoff_t end;
1036 loff_t endoff;
1037 loff_t startoff = *offset;
1038 loff_t lastoff = startoff;
1039 bool found = false;
1040
1041 pagevec_init(&pvec, 0);
1042
09cbfeaf 1043 index = startoff >> PAGE_SHIFT;
d126d43f 1044 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
09cbfeaf 1045 end = endoff >> PAGE_SHIFT;
d126d43f
JL
1046 do {
1047 int want;
1048 unsigned nr_pages;
1049 unsigned int i;
1050
1051 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1052 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1053 want);
1054 /*
1055 * No page mapped into given range. If we are searching holes
1056 * and if this is the first time we got into the loop, it means
1057 * that the given offset is landed in a hole, return it.
1058 *
1059 * If we have already stepped through some block buffers to find
1060 * holes but they all contains data. In this case, the last
1061 * offset is already updated and pointed to the end of the last
1062 * mapped page, if it does not reach the endpoint to search,
1063 * that means there should be a hole between them.
1064 */
1065 if (nr_pages == 0) {
1066 /* Data search found nothing */
1067 if (type == DATA_OFF)
1068 break;
1069
1070 ASSERT(type == HOLE_OFF);
1071 if (lastoff == startoff || lastoff < endoff) {
1072 found = true;
1073 *offset = lastoff;
1074 }
1075 break;
1076 }
1077
1078 /*
1079 * At lease we found one page. If this is the first time we
1080 * step into the loop, and if the first page index offset is
1081 * greater than the given search offset, a hole was found.
1082 */
1083 if (type == HOLE_OFF && lastoff == startoff &&
1084 lastoff < page_offset(pvec.pages[0])) {
1085 found = true;
1086 break;
1087 }
1088
1089 for (i = 0; i < nr_pages; i++) {
1090 struct page *page = pvec.pages[i];
1091 loff_t b_offset;
1092
1093 /*
1094 * At this point, the page may be truncated or
1095 * invalidated (changing page->mapping to NULL),
1096 * or even swizzled back from swapper_space to tmpfs
1097 * file mapping. However, page->index will not change
1098 * because we have a reference on the page.
1099 *
1100 * Searching done if the page index is out of range.
1101 * If the current offset is not reaches the end of
1102 * the specified search range, there should be a hole
1103 * between them.
1104 */
1105 if (page->index > end) {
1106 if (type == HOLE_OFF && lastoff < endoff) {
1107 *offset = lastoff;
1108 found = true;
1109 }
1110 goto out;
1111 }
1112
1113 lock_page(page);
1114 /*
1115 * Page truncated or invalidated(page->mapping == NULL).
1116 * We can freely skip it and proceed to check the next
1117 * page.
1118 */
1119 if (unlikely(page->mapping != inode->i_mapping)) {
1120 unlock_page(page);
1121 continue;
1122 }
1123
1124 if (!page_has_buffers(page)) {
1125 unlock_page(page);
1126 continue;
1127 }
1128
1129 found = xfs_lookup_buffer_offset(page, &b_offset, type);
1130 if (found) {
1131 /*
1132 * The found offset may be less than the start
1133 * point to search if this is the first time to
1134 * come here.
1135 */
1136 *offset = max_t(loff_t, startoff, b_offset);
1137 unlock_page(page);
1138 goto out;
1139 }
1140
1141 /*
1142 * We either searching data but nothing was found, or
1143 * searching hole but found a data buffer. In either
1144 * case, probably the next page contains the desired
1145 * things, update the last offset to it so.
1146 */
1147 lastoff = page_offset(page) + PAGE_SIZE;
1148 unlock_page(page);
1149 }
1150
1151 /*
1152 * The number of returned pages less than our desired, search
1153 * done. In this case, nothing was found for searching data,
1154 * but we found a hole behind the last offset.
1155 */
1156 if (nr_pages < want) {
1157 if (type == HOLE_OFF) {
1158 *offset = lastoff;
1159 found = true;
1160 }
1161 break;
1162 }
1163
1164 index = pvec.pages[i - 1]->index + 1;
1165 pagevec_release(&pvec);
1166 } while (index <= end);
1167
1168out:
1169 pagevec_release(&pvec);
1170 return found;
1171}
1172
8aa7d37e
ES
1173/*
1174 * caller must lock inode with xfs_ilock_data_map_shared,
1175 * can we craft an appropriate ASSERT?
1176 *
1177 * end is because the VFS-level lseek interface is defined such that any
1178 * offset past i_size shall return -ENXIO, but we use this for quota code
1179 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
1180 */
1181loff_t
1182__xfs_seek_hole_data(
1183 struct inode *inode,
49c69591 1184 loff_t start,
8aa7d37e 1185 loff_t end,
49c69591 1186 int whence)
3fe3e6b1 1187{
3fe3e6b1
JL
1188 struct xfs_inode *ip = XFS_I(inode);
1189 struct xfs_mount *mp = ip->i_mount;
3fe3e6b1 1190 loff_t uninitialized_var(offset);
3fe3e6b1 1191 xfs_fileoff_t fsbno;
8aa7d37e 1192 xfs_filblks_t lastbno;
3fe3e6b1
JL
1193 int error;
1194
8aa7d37e 1195 if (start >= end) {
2451337d 1196 error = -ENXIO;
8aa7d37e 1197 goto out_error;
3fe3e6b1
JL
1198 }
1199
3fe3e6b1
JL
1200 /*
1201 * Try to read extents from the first block indicated
1202 * by fsbno to the end block of the file.
1203 */
52f1acc8 1204 fsbno = XFS_B_TO_FSBT(mp, start);
8aa7d37e 1205 lastbno = XFS_B_TO_FSB(mp, end);
49c69591 1206
52f1acc8
JL
1207 for (;;) {
1208 struct xfs_bmbt_irec map[2];
1209 int nmap = 2;
1210 unsigned int i;
3fe3e6b1 1211
8aa7d37e 1212 error = xfs_bmapi_read(ip, fsbno, lastbno - fsbno, map, &nmap,
52f1acc8
JL
1213 XFS_BMAPI_ENTIRE);
1214 if (error)
8aa7d37e 1215 goto out_error;
3fe3e6b1 1216
52f1acc8
JL
1217 /* No extents at given offset, must be beyond EOF */
1218 if (nmap == 0) {
2451337d 1219 error = -ENXIO;
8aa7d37e 1220 goto out_error;
52f1acc8
JL
1221 }
1222
1223 for (i = 0; i < nmap; i++) {
1224 offset = max_t(loff_t, start,
1225 XFS_FSB_TO_B(mp, map[i].br_startoff));
1226
49c69591
ES
1227 /* Landed in the hole we wanted? */
1228 if (whence == SEEK_HOLE &&
1229 map[i].br_startblock == HOLESTARTBLOCK)
1230 goto out;
1231
1232 /* Landed in the data extent we wanted? */
1233 if (whence == SEEK_DATA &&
1234 (map[i].br_startblock == DELAYSTARTBLOCK ||
1235 (map[i].br_state == XFS_EXT_NORM &&
1236 !isnullstartblock(map[i].br_startblock))))
52f1acc8
JL
1237 goto out;
1238
1239 /*
49c69591
ES
1240 * Landed in an unwritten extent, try to search
1241 * for hole or data from page cache.
52f1acc8
JL
1242 */
1243 if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1244 if (xfs_find_get_desired_pgoff(inode, &map[i],
49c69591
ES
1245 whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1246 &offset))
52f1acc8
JL
1247 goto out;
1248 }
1249 }
1250
1251 /*
49c69591
ES
1252 * We only received one extent out of the two requested. This
1253 * means we've hit EOF and didn't find what we are looking for.
52f1acc8 1254 */
3fe3e6b1 1255 if (nmap == 1) {
49c69591
ES
1256 /*
1257 * If we were looking for a hole, set offset to
1258 * the end of the file (i.e., there is an implicit
1259 * hole at the end of any file).
1260 */
1261 if (whence == SEEK_HOLE) {
8aa7d37e 1262 offset = end;
49c69591
ES
1263 break;
1264 }
1265 /*
1266 * If we were looking for data, it's nowhere to be found
1267 */
1268 ASSERT(whence == SEEK_DATA);
2451337d 1269 error = -ENXIO;
8aa7d37e 1270 goto out_error;
3fe3e6b1
JL
1271 }
1272
52f1acc8
JL
1273 ASSERT(i > 1);
1274
1275 /*
1276 * Nothing was found, proceed to the next round of search
49c69591 1277 * if the next reading offset is not at or beyond EOF.
52f1acc8
JL
1278 */
1279 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1280 start = XFS_FSB_TO_B(mp, fsbno);
8aa7d37e 1281 if (start >= end) {
49c69591 1282 if (whence == SEEK_HOLE) {
8aa7d37e 1283 offset = end;
49c69591
ES
1284 break;
1285 }
1286 ASSERT(whence == SEEK_DATA);
2451337d 1287 error = -ENXIO;
8aa7d37e 1288 goto out_error;
52f1acc8 1289 }
3fe3e6b1
JL
1290 }
1291
b686d1f7
JL
1292out:
1293 /*
49c69591 1294 * If at this point we have found the hole we wanted, the returned
b686d1f7 1295 * offset may be bigger than the file size as it may be aligned to
49c69591 1296 * page boundary for unwritten extents. We need to deal with this
b686d1f7
JL
1297 * situation in particular.
1298 */
49c69591 1299 if (whence == SEEK_HOLE)
8aa7d37e
ES
1300 offset = min_t(loff_t, offset, end);
1301
1302 return offset;
1303
1304out_error:
1305 return error;
1306}
1307
1308STATIC loff_t
1309xfs_seek_hole_data(
1310 struct file *file,
1311 loff_t start,
1312 int whence)
1313{
1314 struct inode *inode = file->f_mapping->host;
1315 struct xfs_inode *ip = XFS_I(inode);
1316 struct xfs_mount *mp = ip->i_mount;
1317 uint lock;
1318 loff_t offset, end;
1319 int error = 0;
1320
1321 if (XFS_FORCED_SHUTDOWN(mp))
1322 return -EIO;
1323
1324 lock = xfs_ilock_data_map_shared(ip);
1325
1326 end = i_size_read(inode);
1327 offset = __xfs_seek_hole_data(inode, start, end, whence);
1328 if (offset < 0) {
1329 error = offset;
1330 goto out_unlock;
1331 }
1332
46a1c2c7 1333 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
3fe3e6b1
JL
1334
1335out_unlock:
01f4f327 1336 xfs_iunlock(ip, lock);
3fe3e6b1
JL
1337
1338 if (error)
2451337d 1339 return error;
3fe3e6b1
JL
1340 return offset;
1341}
1342
1343STATIC loff_t
1344xfs_file_llseek(
1345 struct file *file,
1346 loff_t offset,
59f9c004 1347 int whence)
3fe3e6b1 1348{
59f9c004 1349 switch (whence) {
3fe3e6b1
JL
1350 case SEEK_END:
1351 case SEEK_CUR:
1352 case SEEK_SET:
59f9c004 1353 return generic_file_llseek(file, offset, whence);
3fe3e6b1 1354 case SEEK_HOLE:
49c69591 1355 case SEEK_DATA:
59f9c004 1356 return xfs_seek_hole_data(file, offset, whence);
3fe3e6b1
JL
1357 default:
1358 return -EINVAL;
1359 }
1360}
1361
de0e8c20
DC
1362/*
1363 * Locking for serialisation of IO during page faults. This results in a lock
1364 * ordering of:
1365 *
1366 * mmap_sem (MM)
6b698ede 1367 * sb_start_pagefault(vfs, freeze)
13ad4fe3 1368 * i_mmaplock (XFS - truncate serialisation)
6b698ede
DC
1369 * page_lock (MM)
1370 * i_lock (XFS - extent map serialisation)
de0e8c20 1371 */
de0e8c20 1372
075a924d
DC
1373/*
1374 * mmap()d file has taken write protection fault and is being made writable. We
1375 * can set the page state up correctly for a writable page, which means we can
1376 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1377 * mapping.
de0e8c20
DC
1378 */
1379STATIC int
075a924d 1380xfs_filemap_page_mkwrite(
de0e8c20
DC
1381 struct vm_area_struct *vma,
1382 struct vm_fault *vmf)
1383{
6b698ede 1384 struct inode *inode = file_inode(vma->vm_file);
ec56b1f1 1385 int ret;
de0e8c20 1386
6b698ede 1387 trace_xfs_filemap_page_mkwrite(XFS_I(inode));
de0e8c20 1388
6b698ede 1389 sb_start_pagefault(inode->i_sb);
ec56b1f1 1390 file_update_time(vma->vm_file);
6b698ede 1391 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
de0e8c20 1392
6b698ede 1393 if (IS_DAX(inode)) {
11c59c92 1394 ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
6b698ede 1395 } else {
68a9f5e7 1396 ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
6b698ede
DC
1397 ret = block_page_mkwrite_return(ret);
1398 }
1399
1400 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1401 sb_end_pagefault(inode->i_sb);
1402
1403 return ret;
de0e8c20
DC
1404}
1405
075a924d 1406STATIC int
6b698ede 1407xfs_filemap_fault(
075a924d
DC
1408 struct vm_area_struct *vma,
1409 struct vm_fault *vmf)
1410{
b2442c5a 1411 struct inode *inode = file_inode(vma->vm_file);
6b698ede 1412 int ret;
ec56b1f1 1413
b2442c5a 1414 trace_xfs_filemap_fault(XFS_I(inode));
075a924d 1415
6b698ede 1416 /* DAX can shortcut the normal fault path on write faults! */
b2442c5a 1417 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
6b698ede 1418 return xfs_filemap_page_mkwrite(vma, vmf);
075a924d 1419
b2442c5a 1420 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
acdda3aa 1421 if (IS_DAX(inode))
11c59c92 1422 ret = dax_iomap_fault(vma, vmf, &xfs_iomap_ops);
acdda3aa 1423 else
b2442c5a
DC
1424 ret = filemap_fault(vma, vmf);
1425 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
075a924d 1426
6b698ede
DC
1427 return ret;
1428}
1429
13ad4fe3
DC
1430/*
1431 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1432 * both read and write faults. Hence we need to handle both cases. There is no
1433 * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1434 * handle both cases here. @flags carries the information on the type of fault
1435 * occuring.
1436 */
acd76e74
MW
1437STATIC int
1438xfs_filemap_pmd_fault(
1439 struct vm_area_struct *vma,
1440 unsigned long addr,
1441 pmd_t *pmd,
1442 unsigned int flags)
1443{
1444 struct inode *inode = file_inode(vma->vm_file);
1445 struct xfs_inode *ip = XFS_I(inode);
1446 int ret;
1447
1448 if (!IS_DAX(inode))
1449 return VM_FAULT_FALLBACK;
1450
1451 trace_xfs_filemap_pmd_fault(ip);
1452
13ad4fe3
DC
1453 if (flags & FAULT_FLAG_WRITE) {
1454 sb_start_pagefault(inode->i_sb);
1455 file_update_time(vma->vm_file);
1456 }
1457
acd76e74 1458 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
862f1b9d 1459 ret = dax_iomap_pmd_fault(vma, addr, pmd, flags, &xfs_iomap_ops);
acd76e74 1460 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
acd76e74 1461
13ad4fe3
DC
1462 if (flags & FAULT_FLAG_WRITE)
1463 sb_end_pagefault(inode->i_sb);
acd76e74
MW
1464
1465 return ret;
1466}
1467
3af49285
DC
1468/*
1469 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1470 * updates on write faults. In reality, it's need to serialise against
5eb88dca
RZ
1471 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1472 * to ensure we serialise the fault barrier in place.
3af49285
DC
1473 */
1474static int
1475xfs_filemap_pfn_mkwrite(
1476 struct vm_area_struct *vma,
1477 struct vm_fault *vmf)
1478{
1479
1480 struct inode *inode = file_inode(vma->vm_file);
1481 struct xfs_inode *ip = XFS_I(inode);
1482 int ret = VM_FAULT_NOPAGE;
1483 loff_t size;
1484
1485 trace_xfs_filemap_pfn_mkwrite(ip);
1486
1487 sb_start_pagefault(inode->i_sb);
1488 file_update_time(vma->vm_file);
1489
1490 /* check if the faulting page hasn't raced with truncate */
1491 xfs_ilock(ip, XFS_MMAPLOCK_SHARED);
1492 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1493 if (vmf->pgoff >= size)
1494 ret = VM_FAULT_SIGBUS;
5eb88dca
RZ
1495 else if (IS_DAX(inode))
1496 ret = dax_pfn_mkwrite(vma, vmf);
3af49285
DC
1497 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED);
1498 sb_end_pagefault(inode->i_sb);
acd76e74 1499 return ret;
3af49285 1500
acd76e74
MW
1501}
1502
6b698ede
DC
1503static const struct vm_operations_struct xfs_file_vm_ops = {
1504 .fault = xfs_filemap_fault,
acd76e74 1505 .pmd_fault = xfs_filemap_pmd_fault,
6b698ede
DC
1506 .map_pages = filemap_map_pages,
1507 .page_mkwrite = xfs_filemap_page_mkwrite,
3af49285 1508 .pfn_mkwrite = xfs_filemap_pfn_mkwrite,
6b698ede
DC
1509};
1510
1511STATIC int
1512xfs_file_mmap(
1513 struct file *filp,
1514 struct vm_area_struct *vma)
1515{
1516 file_accessed(filp);
1517 vma->vm_ops = &xfs_file_vm_ops;
1518 if (IS_DAX(file_inode(filp)))
acd76e74 1519 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
6b698ede 1520 return 0;
075a924d
DC
1521}
1522
4b6f5d20 1523const struct file_operations xfs_file_operations = {
3fe3e6b1 1524 .llseek = xfs_file_llseek,
b4f5d2c6 1525 .read_iter = xfs_file_read_iter,
bf97f3bc 1526 .write_iter = xfs_file_write_iter,
82c156f8 1527 .splice_read = generic_file_splice_read,
8d020765 1528 .splice_write = iter_file_splice_write,
3562fd45 1529 .unlocked_ioctl = xfs_file_ioctl,
1da177e4 1530#ifdef CONFIG_COMPAT
3562fd45 1531 .compat_ioctl = xfs_file_compat_ioctl,
1da177e4 1532#endif
3562fd45
NS
1533 .mmap = xfs_file_mmap,
1534 .open = xfs_file_open,
1535 .release = xfs_file_release,
1536 .fsync = xfs_file_fsync,
dbe6ec81 1537 .get_unmapped_area = thp_get_unmapped_area,
2fe17c10 1538 .fallocate = xfs_file_fallocate,
9fe26045 1539 .clone_file_range = xfs_file_clone_range,
cc714660 1540 .dedupe_file_range = xfs_file_dedupe_range,
1da177e4
LT
1541};
1542
4b6f5d20 1543const struct file_operations xfs_dir_file_operations = {
f999a5bf 1544 .open = xfs_dir_open,
1da177e4 1545 .read = generic_read_dir,
3b0a3c1a 1546 .iterate_shared = xfs_file_readdir,
59af1584 1547 .llseek = generic_file_llseek,
3562fd45 1548 .unlocked_ioctl = xfs_file_ioctl,
d3870398 1549#ifdef CONFIG_COMPAT
3562fd45 1550 .compat_ioctl = xfs_file_compat_ioctl,
d3870398 1551#endif
1da2f2db 1552 .fsync = xfs_dir_fsync,
1da177e4 1553};