2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_da_format.h"
26 #include "xfs_da_btree.h"
27 #include "xfs_inode.h"
28 #include "xfs_trans.h"
29 #include "xfs_inode_item.h"
31 #include "xfs_bmap_util.h"
32 #include "xfs_error.h"
34 #include "xfs_dir2_priv.h"
35 #include "xfs_ioctl.h"
36 #include "xfs_trace.h"
38 #include "xfs_icache.h"
40 #include "xfs_iomap.h"
42 #include <linux/dcache.h>
43 #include <linux/falloc.h>
44 #include <linux/pagevec.h>
45 #include <linux/backing-dev.h>
47 static const struct vm_operations_struct xfs_file_vm_ops
;
50 * Locking primitives for read and write IO paths to ensure we consistently use
51 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
58 if (type
& XFS_IOLOCK_EXCL
)
59 inode_lock(VFS_I(ip
));
68 xfs_iunlock(ip
, type
);
69 if (type
& XFS_IOLOCK_EXCL
)
70 inode_unlock(VFS_I(ip
));
78 xfs_ilock_demote(ip
, type
);
79 if (type
& XFS_IOLOCK_EXCL
)
80 inode_unlock(VFS_I(ip
));
84 * Clear the specified ranges to zero through either the pagecache or DAX.
85 * Holes and unwritten extents will be left as-is as they already are zeroed.
94 return iomap_zero_range(VFS_I(ip
), pos
, count
, NULL
, &xfs_iomap_ops
);
98 xfs_update_prealloc_flags(
100 enum xfs_prealloc_flags flags
)
102 struct xfs_trans
*tp
;
105 error
= xfs_trans_alloc(ip
->i_mount
, &M_RES(ip
->i_mount
)->tr_writeid
,
110 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
111 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
113 if (!(flags
& XFS_PREALLOC_INVISIBLE
)) {
114 VFS_I(ip
)->i_mode
&= ~S_ISUID
;
115 if (VFS_I(ip
)->i_mode
& S_IXGRP
)
116 VFS_I(ip
)->i_mode
&= ~S_ISGID
;
117 xfs_trans_ichgtime(tp
, ip
, XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
120 if (flags
& XFS_PREALLOC_SET
)
121 ip
->i_d
.di_flags
|= XFS_DIFLAG_PREALLOC
;
122 if (flags
& XFS_PREALLOC_CLEAR
)
123 ip
->i_d
.di_flags
&= ~XFS_DIFLAG_PREALLOC
;
125 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
126 if (flags
& XFS_PREALLOC_SYNC
)
127 xfs_trans_set_sync(tp
);
128 return xfs_trans_commit(tp
);
132 * Fsync operations on directories are much simpler than on regular files,
133 * as there is no file data to flush, and thus also no need for explicit
134 * cache flush operations, and there are no non-transaction metadata updates
135 * on directories either.
144 struct xfs_inode
*ip
= XFS_I(file
->f_mapping
->host
);
145 struct xfs_mount
*mp
= ip
->i_mount
;
148 trace_xfs_dir_fsync(ip
);
150 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
151 if (xfs_ipincount(ip
))
152 lsn
= ip
->i_itemp
->ili_last_lsn
;
153 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
157 return _xfs_log_force_lsn(mp
, lsn
, XFS_LOG_SYNC
, NULL
);
167 struct inode
*inode
= file
->f_mapping
->host
;
168 struct xfs_inode
*ip
= XFS_I(inode
);
169 struct xfs_mount
*mp
= ip
->i_mount
;
174 trace_xfs_file_fsync(ip
);
176 error
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
180 if (XFS_FORCED_SHUTDOWN(mp
))
183 xfs_iflags_clear(ip
, XFS_ITRUNCATED
);
185 if (mp
->m_flags
& XFS_MOUNT_BARRIER
) {
187 * If we have an RT and/or log subvolume we need to make sure
188 * to flush the write cache the device used for file data
189 * first. This is to ensure newly written file data make
190 * it to disk before logging the new inode size in case of
191 * an extending write.
193 if (XFS_IS_REALTIME_INODE(ip
))
194 xfs_blkdev_issue_flush(mp
->m_rtdev_targp
);
195 else if (mp
->m_logdev_targp
!= mp
->m_ddev_targp
)
196 xfs_blkdev_issue_flush(mp
->m_ddev_targp
);
200 * All metadata updates are logged, which means that we just have to
201 * flush the log up to the latest LSN that touched the inode. If we have
202 * concurrent fsync/fdatasync() calls, we need them to all block on the
203 * log force before we clear the ili_fsync_fields field. This ensures
204 * that we don't get a racing sync operation that does not wait for the
205 * metadata to hit the journal before returning. If we race with
206 * clearing the ili_fsync_fields, then all that will happen is the log
207 * force will do nothing as the lsn will already be on disk. We can't
208 * race with setting ili_fsync_fields because that is done under
209 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
210 * until after the ili_fsync_fields is cleared.
212 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
213 if (xfs_ipincount(ip
)) {
215 (ip
->i_itemp
->ili_fsync_fields
& ~XFS_ILOG_TIMESTAMP
))
216 lsn
= ip
->i_itemp
->ili_last_lsn
;
220 error
= _xfs_log_force_lsn(mp
, lsn
, XFS_LOG_SYNC
, &log_flushed
);
221 ip
->i_itemp
->ili_fsync_fields
= 0;
223 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
226 * If we only have a single device, and the log force about was
227 * a no-op we might have to flush the data device cache here.
228 * This can only happen for fdatasync/O_DSYNC if we were overwriting
229 * an already allocated file and thus do not have any metadata to
232 if ((mp
->m_flags
& XFS_MOUNT_BARRIER
) &&
233 mp
->m_logdev_targp
== mp
->m_ddev_targp
&&
234 !XFS_IS_REALTIME_INODE(ip
) &&
236 xfs_blkdev_issue_flush(mp
->m_ddev_targp
);
242 xfs_file_dio_aio_read(
246 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
247 struct inode
*inode
= mapping
->host
;
248 struct xfs_inode
*ip
= XFS_I(inode
);
249 loff_t isize
= i_size_read(inode
);
250 size_t count
= iov_iter_count(to
);
251 struct iov_iter data
;
252 struct xfs_buftarg
*target
;
255 trace_xfs_file_direct_read(ip
, count
, iocb
->ki_pos
);
258 return 0; /* skip atime */
260 if (XFS_IS_REALTIME_INODE(ip
))
261 target
= ip
->i_mount
->m_rtdev_targp
;
263 target
= ip
->i_mount
->m_ddev_targp
;
265 /* DIO must be aligned to device logical sector size */
266 if ((iocb
->ki_pos
| count
) & target
->bt_logical_sectormask
) {
267 if (iocb
->ki_pos
== isize
)
272 file_accessed(iocb
->ki_filp
);
275 * Locking is a bit tricky here. If we take an exclusive lock for direct
276 * IO, we effectively serialise all new concurrent read IO to this file
277 * and block it behind IO that is currently in progress because IO in
278 * progress holds the IO lock shared. We only need to hold the lock
279 * exclusive to blow away the page cache, so only take lock exclusively
280 * if the page cache needs invalidation. This allows the normal direct
281 * IO case of no page cache pages to proceeed concurrently without
284 xfs_rw_ilock(ip
, XFS_IOLOCK_SHARED
);
285 if (mapping
->nrpages
) {
286 xfs_rw_iunlock(ip
, XFS_IOLOCK_SHARED
);
287 xfs_rw_ilock(ip
, XFS_IOLOCK_EXCL
);
290 * The generic dio code only flushes the range of the particular
291 * I/O. Because we take an exclusive lock here, this whole
292 * sequence is considerably more expensive for us. This has a
293 * noticeable performance impact for any file with cached pages,
294 * even when outside of the range of the particular I/O.
296 * Hence, amortize the cost of the lock against a full file
297 * flush and reduce the chances of repeated iolock cycles going
300 if (mapping
->nrpages
) {
301 ret
= filemap_write_and_wait(mapping
);
303 xfs_rw_iunlock(ip
, XFS_IOLOCK_EXCL
);
308 * Invalidate whole pages. This can return an error if
309 * we fail to invalidate a page, but this should never
310 * happen on XFS. Warn if it does fail.
312 ret
= invalidate_inode_pages2(mapping
);
316 xfs_rw_ilock_demote(ip
, XFS_IOLOCK_EXCL
);
320 ret
= __blockdev_direct_IO(iocb
, inode
, target
->bt_bdev
, &data
,
321 xfs_get_blocks_direct
, NULL
, NULL
, 0);
324 iov_iter_advance(to
, ret
);
326 xfs_rw_iunlock(ip
, XFS_IOLOCK_SHARED
);
331 static noinline ssize_t
336 struct xfs_inode
*ip
= XFS_I(iocb
->ki_filp
->f_mapping
->host
);
337 size_t count
= iov_iter_count(to
);
340 trace_xfs_file_dax_read(ip
, count
, iocb
->ki_pos
);
343 return 0; /* skip atime */
345 xfs_rw_ilock(ip
, XFS_IOLOCK_SHARED
);
346 ret
= iomap_dax_rw(iocb
, to
, &xfs_iomap_ops
);
347 xfs_rw_iunlock(ip
, XFS_IOLOCK_SHARED
);
349 file_accessed(iocb
->ki_filp
);
354 xfs_file_buffered_aio_read(
358 struct xfs_inode
*ip
= XFS_I(file_inode(iocb
->ki_filp
));
361 trace_xfs_file_buffered_read(ip
, iov_iter_count(to
), iocb
->ki_pos
);
363 xfs_rw_ilock(ip
, XFS_IOLOCK_SHARED
);
364 ret
= generic_file_read_iter(iocb
, to
);
365 xfs_rw_iunlock(ip
, XFS_IOLOCK_SHARED
);
375 struct inode
*inode
= file_inode(iocb
->ki_filp
);
376 struct xfs_mount
*mp
= XFS_I(inode
)->i_mount
;
379 XFS_STATS_INC(mp
, xs_read_calls
);
381 if (XFS_FORCED_SHUTDOWN(mp
))
385 ret
= xfs_file_dax_read(iocb
, to
);
386 else if (iocb
->ki_flags
& IOCB_DIRECT
)
387 ret
= xfs_file_dio_aio_read(iocb
, to
);
389 ret
= xfs_file_buffered_aio_read(iocb
, to
);
392 XFS_STATS_ADD(mp
, xs_read_bytes
, ret
);
397 xfs_file_splice_read(
400 struct pipe_inode_info
*pipe
,
404 struct xfs_inode
*ip
= XFS_I(infilp
->f_mapping
->host
);
407 XFS_STATS_INC(ip
->i_mount
, xs_read_calls
);
409 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
412 trace_xfs_file_splice_read(ip
, count
, *ppos
);
415 * DAX inodes cannot ues the page cache for splice, so we have to push
416 * them through the VFS IO path. This means it goes through
417 * ->read_iter, which for us takes the XFS_IOLOCK_SHARED. Hence we
418 * cannot lock the splice operation at this level for DAX inodes.
420 if (IS_DAX(VFS_I(ip
))) {
421 ret
= default_file_splice_read(infilp
, ppos
, pipe
, count
,
426 xfs_rw_ilock(ip
, XFS_IOLOCK_SHARED
);
427 ret
= generic_file_splice_read(infilp
, ppos
, pipe
, count
, flags
);
428 xfs_rw_iunlock(ip
, XFS_IOLOCK_SHARED
);
431 XFS_STATS_ADD(ip
->i_mount
, xs_read_bytes
, ret
);
436 * Zero any on disk space between the current EOF and the new, larger EOF.
438 * This handles the normal case of zeroing the remainder of the last block in
439 * the file and the unusual case of zeroing blocks out beyond the size of the
440 * file. This second case only happens with fixed size extents and when the
441 * system crashes before the inode size was updated but after blocks were
444 * Expects the iolock to be held exclusive, and will take the ilock internally.
446 int /* error (positive) */
448 struct xfs_inode
*ip
,
449 xfs_off_t offset
, /* starting I/O offset */
450 xfs_fsize_t isize
, /* current inode size */
453 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
454 ASSERT(offset
> isize
);
456 trace_xfs_zero_eof(ip
, isize
, offset
- isize
);
457 return xfs_zero_range(ip
, isize
, offset
- isize
, did_zeroing
);
461 * Common pre-write limit and setup checks.
463 * Called with the iolocked held either shared and exclusive according to
464 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
465 * if called for a direct write beyond i_size.
468 xfs_file_aio_write_checks(
470 struct iov_iter
*from
,
473 struct file
*file
= iocb
->ki_filp
;
474 struct inode
*inode
= file
->f_mapping
->host
;
475 struct xfs_inode
*ip
= XFS_I(inode
);
477 size_t count
= iov_iter_count(from
);
478 bool drained_dio
= false;
481 error
= generic_write_checks(iocb
, from
);
485 error
= xfs_break_layouts(inode
, iolock
, true);
489 /* For changing security info in file_remove_privs() we need i_mutex */
490 if (*iolock
== XFS_IOLOCK_SHARED
&& !IS_NOSEC(inode
)) {
491 xfs_rw_iunlock(ip
, *iolock
);
492 *iolock
= XFS_IOLOCK_EXCL
;
493 xfs_rw_ilock(ip
, *iolock
);
497 * If the offset is beyond the size of the file, we need to zero any
498 * blocks that fall between the existing EOF and the start of this
499 * write. If zeroing is needed and we are currently holding the
500 * iolock shared, we need to update it to exclusive which implies
501 * having to redo all checks before.
503 * We need to serialise against EOF updates that occur in IO
504 * completions here. We want to make sure that nobody is changing the
505 * size while we do this check until we have placed an IO barrier (i.e.
506 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
507 * The spinlock effectively forms a memory barrier once we have the
508 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
509 * and hence be able to correctly determine if we need to run zeroing.
511 spin_lock(&ip
->i_flags_lock
);
512 if (iocb
->ki_pos
> i_size_read(inode
)) {
515 spin_unlock(&ip
->i_flags_lock
);
517 if (*iolock
== XFS_IOLOCK_SHARED
) {
518 xfs_rw_iunlock(ip
, *iolock
);
519 *iolock
= XFS_IOLOCK_EXCL
;
520 xfs_rw_ilock(ip
, *iolock
);
521 iov_iter_reexpand(from
, count
);
524 * We now have an IO submission barrier in place, but
525 * AIO can do EOF updates during IO completion and hence
526 * we now need to wait for all of them to drain. Non-AIO
527 * DIO will have drained before we are given the
528 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
531 inode_dio_wait(inode
);
535 error
= xfs_zero_eof(ip
, iocb
->ki_pos
, i_size_read(inode
), &zero
);
539 spin_unlock(&ip
->i_flags_lock
);
542 * Updating the timestamps will grab the ilock again from
543 * xfs_fs_dirty_inode, so we have to call it after dropping the
544 * lock above. Eventually we should look into a way to avoid
545 * the pointless lock roundtrip.
547 if (likely(!(file
->f_mode
& FMODE_NOCMTIME
))) {
548 error
= file_update_time(file
);
554 * If we're writing the file then make sure to clear the setuid and
555 * setgid bits if the process is not being run by root. This keeps
556 * people from modifying setuid and setgid binaries.
558 if (!IS_NOSEC(inode
))
559 return file_remove_privs(file
);
564 * xfs_file_dio_aio_write - handle direct IO writes
566 * Lock the inode appropriately to prepare for and issue a direct IO write.
567 * By separating it from the buffered write path we remove all the tricky to
568 * follow locking changes and looping.
570 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
571 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
572 * pages are flushed out.
574 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
575 * allowing them to be done in parallel with reads and other direct IO writes.
576 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
577 * needs to do sub-block zeroing and that requires serialisation against other
578 * direct IOs to the same block. In this case we need to serialise the
579 * submission of the unaligned IOs so that we don't get racing block zeroing in
580 * the dio layer. To avoid the problem with aio, we also need to wait for
581 * outstanding IOs to complete so that unwritten extent conversion is completed
582 * before we try to map the overlapping block. This is currently implemented by
583 * hitting it with a big hammer (i.e. inode_dio_wait()).
585 * Returns with locks held indicated by @iolock and errors indicated by
586 * negative return values.
589 xfs_file_dio_aio_write(
591 struct iov_iter
*from
)
593 struct file
*file
= iocb
->ki_filp
;
594 struct address_space
*mapping
= file
->f_mapping
;
595 struct inode
*inode
= mapping
->host
;
596 struct xfs_inode
*ip
= XFS_I(inode
);
597 struct xfs_mount
*mp
= ip
->i_mount
;
599 int unaligned_io
= 0;
601 size_t count
= iov_iter_count(from
);
603 struct iov_iter data
;
604 struct xfs_buftarg
*target
= XFS_IS_REALTIME_INODE(ip
) ?
605 mp
->m_rtdev_targp
: mp
->m_ddev_targp
;
607 /* DIO must be aligned to device logical sector size */
608 if ((iocb
->ki_pos
| count
) & target
->bt_logical_sectormask
)
611 /* "unaligned" here means not aligned to a filesystem block */
612 if ((iocb
->ki_pos
& mp
->m_blockmask
) ||
613 ((iocb
->ki_pos
+ count
) & mp
->m_blockmask
))
617 * We don't need to take an exclusive lock unless there page cache needs
618 * to be invalidated or unaligned IO is being executed. We don't need to
619 * consider the EOF extension case here because
620 * xfs_file_aio_write_checks() will relock the inode as necessary for
621 * EOF zeroing cases and fill out the new inode size as appropriate.
623 if (unaligned_io
|| mapping
->nrpages
)
624 iolock
= XFS_IOLOCK_EXCL
;
626 iolock
= XFS_IOLOCK_SHARED
;
627 xfs_rw_ilock(ip
, iolock
);
630 * Recheck if there are cached pages that need invalidate after we got
631 * the iolock to protect against other threads adding new pages while
632 * we were waiting for the iolock.
634 if (mapping
->nrpages
&& iolock
== XFS_IOLOCK_SHARED
) {
635 xfs_rw_iunlock(ip
, iolock
);
636 iolock
= XFS_IOLOCK_EXCL
;
637 xfs_rw_ilock(ip
, iolock
);
640 ret
= xfs_file_aio_write_checks(iocb
, from
, &iolock
);
643 count
= iov_iter_count(from
);
644 end
= iocb
->ki_pos
+ count
- 1;
647 * See xfs_file_dio_aio_read() for why we do a full-file flush here.
649 if (mapping
->nrpages
) {
650 ret
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
654 * Invalidate whole pages. This can return an error if we fail
655 * to invalidate a page, but this should never happen on XFS.
656 * Warn if it does fail.
658 ret
= invalidate_inode_pages2(VFS_I(ip
)->i_mapping
);
664 * If we are doing unaligned IO, wait for all other IO to drain,
665 * otherwise demote the lock if we had to flush cached pages
668 inode_dio_wait(inode
);
669 else if (iolock
== XFS_IOLOCK_EXCL
) {
670 xfs_rw_ilock_demote(ip
, XFS_IOLOCK_EXCL
);
671 iolock
= XFS_IOLOCK_SHARED
;
674 trace_xfs_file_direct_write(ip
, count
, iocb
->ki_pos
);
677 ret
= __blockdev_direct_IO(iocb
, inode
, target
->bt_bdev
, &data
,
678 xfs_get_blocks_direct
, xfs_end_io_direct_write
,
679 NULL
, DIO_ASYNC_EXTEND
);
681 /* see generic_file_direct_write() for why this is necessary */
682 if (mapping
->nrpages
) {
683 invalidate_inode_pages2_range(mapping
,
684 iocb
->ki_pos
>> PAGE_SHIFT
,
690 iov_iter_advance(from
, ret
);
693 xfs_rw_iunlock(ip
, iolock
);
696 * No fallback to buffered IO on errors for XFS, direct IO will either
697 * complete fully or fail.
699 ASSERT(ret
< 0 || ret
== count
);
703 static noinline ssize_t
706 struct iov_iter
*from
)
708 struct inode
*inode
= iocb
->ki_filp
->f_mapping
->host
;
709 struct xfs_inode
*ip
= XFS_I(inode
);
710 int iolock
= XFS_IOLOCK_EXCL
;
711 ssize_t ret
, error
= 0;
715 xfs_rw_ilock(ip
, iolock
);
716 ret
= xfs_file_aio_write_checks(iocb
, from
, &iolock
);
721 count
= iov_iter_count(from
);
723 trace_xfs_file_dax_write(ip
, count
, pos
);
725 ret
= iomap_dax_rw(iocb
, from
, &xfs_iomap_ops
);
726 if (ret
> 0 && iocb
->ki_pos
> i_size_read(inode
)) {
727 i_size_write(inode
, iocb
->ki_pos
);
728 error
= xfs_setfilesize(ip
, pos
, ret
);
732 xfs_rw_iunlock(ip
, iolock
);
733 return error
? error
: ret
;
737 xfs_file_buffered_aio_write(
739 struct iov_iter
*from
)
741 struct file
*file
= iocb
->ki_filp
;
742 struct address_space
*mapping
= file
->f_mapping
;
743 struct inode
*inode
= mapping
->host
;
744 struct xfs_inode
*ip
= XFS_I(inode
);
747 int iolock
= XFS_IOLOCK_EXCL
;
749 xfs_rw_ilock(ip
, iolock
);
751 ret
= xfs_file_aio_write_checks(iocb
, from
, &iolock
);
755 /* We can write back this queue in page reclaim */
756 current
->backing_dev_info
= inode_to_bdi(inode
);
759 trace_xfs_file_buffered_write(ip
, iov_iter_count(from
), iocb
->ki_pos
);
760 ret
= iomap_file_buffered_write(iocb
, from
, &xfs_iomap_ops
);
761 if (likely(ret
>= 0))
765 * If we hit a space limit, try to free up some lingering preallocated
766 * space before returning an error. In the case of ENOSPC, first try to
767 * write back all dirty inodes to free up some of the excess reserved
768 * metadata space. This reduces the chances that the eofblocks scan
769 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
770 * also behaves as a filter to prevent too many eofblocks scans from
771 * running at the same time.
773 if (ret
== -EDQUOT
&& !enospc
) {
774 enospc
= xfs_inode_free_quota_eofblocks(ip
);
777 } else if (ret
== -ENOSPC
&& !enospc
) {
778 struct xfs_eofblocks eofb
= {0};
781 xfs_flush_inodes(ip
->i_mount
);
782 eofb
.eof_scan_owner
= ip
->i_ino
; /* for locking */
783 eofb
.eof_flags
= XFS_EOF_FLAGS_SYNC
;
784 xfs_icache_free_eofblocks(ip
->i_mount
, &eofb
);
788 current
->backing_dev_info
= NULL
;
790 xfs_rw_iunlock(ip
, iolock
);
797 struct iov_iter
*from
)
799 struct file
*file
= iocb
->ki_filp
;
800 struct address_space
*mapping
= file
->f_mapping
;
801 struct inode
*inode
= mapping
->host
;
802 struct xfs_inode
*ip
= XFS_I(inode
);
804 size_t ocount
= iov_iter_count(from
);
806 XFS_STATS_INC(ip
->i_mount
, xs_write_calls
);
811 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
815 ret
= xfs_file_dax_write(iocb
, from
);
816 else if (iocb
->ki_flags
& IOCB_DIRECT
)
817 ret
= xfs_file_dio_aio_write(iocb
, from
);
819 ret
= xfs_file_buffered_aio_write(iocb
, from
);
822 XFS_STATS_ADD(ip
->i_mount
, xs_write_bytes
, ret
);
824 /* Handle various SYNC-type writes */
825 ret
= generic_write_sync(iocb
, ret
);
830 #define XFS_FALLOC_FL_SUPPORTED \
831 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
832 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
833 FALLOC_FL_INSERT_RANGE)
842 struct inode
*inode
= file_inode(file
);
843 struct xfs_inode
*ip
= XFS_I(inode
);
845 enum xfs_prealloc_flags flags
= 0;
846 uint iolock
= XFS_IOLOCK_EXCL
;
848 bool do_file_insert
= 0;
850 if (!S_ISREG(inode
->i_mode
))
852 if (mode
& ~XFS_FALLOC_FL_SUPPORTED
)
855 xfs_ilock(ip
, iolock
);
856 error
= xfs_break_layouts(inode
, &iolock
, false);
860 xfs_ilock(ip
, XFS_MMAPLOCK_EXCL
);
861 iolock
|= XFS_MMAPLOCK_EXCL
;
863 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
864 error
= xfs_free_file_space(ip
, offset
, len
);
867 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
868 unsigned blksize_mask
= (1 << inode
->i_blkbits
) - 1;
870 if (offset
& blksize_mask
|| len
& blksize_mask
) {
876 * There is no need to overlap collapse range with EOF,
877 * in which case it is effectively a truncate operation
879 if (offset
+ len
>= i_size_read(inode
)) {
884 new_size
= i_size_read(inode
) - len
;
886 error
= xfs_collapse_file_space(ip
, offset
, len
);
889 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
890 unsigned blksize_mask
= (1 << inode
->i_blkbits
) - 1;
892 new_size
= i_size_read(inode
) + len
;
893 if (offset
& blksize_mask
|| len
& blksize_mask
) {
898 /* check the new inode size does not wrap through zero */
899 if (new_size
> inode
->i_sb
->s_maxbytes
) {
904 /* Offset should be less than i_size */
905 if (offset
>= i_size_read(inode
)) {
911 flags
|= XFS_PREALLOC_SET
;
913 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
914 offset
+ len
> i_size_read(inode
)) {
915 new_size
= offset
+ len
;
916 error
= inode_newsize_ok(inode
, new_size
);
921 if (mode
& FALLOC_FL_ZERO_RANGE
)
922 error
= xfs_zero_file_space(ip
, offset
, len
);
924 error
= xfs_alloc_file_space(ip
, offset
, len
,
930 if (file
->f_flags
& O_DSYNC
)
931 flags
|= XFS_PREALLOC_SYNC
;
933 error
= xfs_update_prealloc_flags(ip
, flags
);
937 /* Change file size if needed */
941 iattr
.ia_valid
= ATTR_SIZE
;
942 iattr
.ia_size
= new_size
;
943 error
= xfs_setattr_size(ip
, &iattr
);
949 * Perform hole insertion now that the file size has been
950 * updated so that if we crash during the operation we don't
951 * leave shifted extents past EOF and hence losing access to
952 * the data that is contained within them.
955 error
= xfs_insert_file_space(ip
, offset
, len
);
958 xfs_iunlock(ip
, iolock
);
968 if (!(file
->f_flags
& O_LARGEFILE
) && i_size_read(inode
) > MAX_NON_LFS
)
970 if (XFS_FORCED_SHUTDOWN(XFS_M(inode
->i_sb
)))
980 struct xfs_inode
*ip
= XFS_I(inode
);
984 error
= xfs_file_open(inode
, file
);
989 * If there are any blocks, read-ahead block 0 as we're almost
990 * certain to have the next operation be a read there.
992 mode
= xfs_ilock_data_map_shared(ip
);
993 if (ip
->i_d
.di_nextents
> 0)
994 xfs_dir3_data_readahead(ip
, 0, -1);
995 xfs_iunlock(ip
, mode
);
1001 struct inode
*inode
,
1004 return xfs_release(XFS_I(inode
));
1010 struct dir_context
*ctx
)
1012 struct inode
*inode
= file_inode(file
);
1013 xfs_inode_t
*ip
= XFS_I(inode
);
1017 * The Linux API doesn't pass down the total size of the buffer
1018 * we read into down to the filesystem. With the filldir concept
1019 * it's not needed for correct information, but the XFS dir2 leaf
1020 * code wants an estimate of the buffer size to calculate it's
1021 * readahead window and size the buffers used for mapping to
1024 * Try to give it an estimate that's good enough, maybe at some
1025 * point we can change the ->readdir prototype to include the
1026 * buffer size. For now we use the current glibc buffer size.
1028 bufsize
= (size_t)min_t(loff_t
, 32768, ip
->i_d
.di_size
);
1030 return xfs_readdir(ip
, ctx
, bufsize
);
1034 * This type is designed to indicate the type of offset we would like
1035 * to search from page cache for xfs_seek_hole_data().
1043 * Lookup the desired type of offset from the given page.
1045 * On success, return true and the offset argument will point to the
1046 * start of the region that was found. Otherwise this function will
1047 * return false and keep the offset argument unchanged.
1050 xfs_lookup_buffer_offset(
1055 loff_t lastoff
= page_offset(page
);
1057 struct buffer_head
*bh
, *head
;
1059 bh
= head
= page_buffers(page
);
1062 * Unwritten extents that have data in the page
1063 * cache covering them can be identified by the
1064 * BH_Unwritten state flag. Pages with multiple
1065 * buffers might have a mix of holes, data and
1066 * unwritten extents - any buffer with valid
1067 * data in it should have BH_Uptodate flag set
1070 if (buffer_unwritten(bh
) ||
1071 buffer_uptodate(bh
)) {
1072 if (type
== DATA_OFF
)
1075 if (type
== HOLE_OFF
)
1083 lastoff
+= bh
->b_size
;
1084 } while ((bh
= bh
->b_this_page
) != head
);
1090 * This routine is called to find out and return a data or hole offset
1091 * from the page cache for unwritten extents according to the desired
1092 * type for xfs_seek_hole_data().
1094 * The argument offset is used to tell where we start to search from the
1095 * page cache. Map is used to figure out the end points of the range to
1098 * Return true if the desired type of offset was found, and the argument
1099 * offset is filled with that address. Otherwise, return false and keep
1103 xfs_find_get_desired_pgoff(
1104 struct inode
*inode
,
1105 struct xfs_bmbt_irec
*map
,
1109 struct xfs_inode
*ip
= XFS_I(inode
);
1110 struct xfs_mount
*mp
= ip
->i_mount
;
1111 struct pagevec pvec
;
1115 loff_t startoff
= *offset
;
1116 loff_t lastoff
= startoff
;
1119 pagevec_init(&pvec
, 0);
1121 index
= startoff
>> PAGE_SHIFT
;
1122 endoff
= XFS_FSB_TO_B(mp
, map
->br_startoff
+ map
->br_blockcount
);
1123 end
= endoff
>> PAGE_SHIFT
;
1129 want
= min_t(pgoff_t
, end
- index
, PAGEVEC_SIZE
);
1130 nr_pages
= pagevec_lookup(&pvec
, inode
->i_mapping
, index
,
1133 * No page mapped into given range. If we are searching holes
1134 * and if this is the first time we got into the loop, it means
1135 * that the given offset is landed in a hole, return it.
1137 * If we have already stepped through some block buffers to find
1138 * holes but they all contains data. In this case, the last
1139 * offset is already updated and pointed to the end of the last
1140 * mapped page, if it does not reach the endpoint to search,
1141 * that means there should be a hole between them.
1143 if (nr_pages
== 0) {
1144 /* Data search found nothing */
1145 if (type
== DATA_OFF
)
1148 ASSERT(type
== HOLE_OFF
);
1149 if (lastoff
== startoff
|| lastoff
< endoff
) {
1157 * At lease we found one page. If this is the first time we
1158 * step into the loop, and if the first page index offset is
1159 * greater than the given search offset, a hole was found.
1161 if (type
== HOLE_OFF
&& lastoff
== startoff
&&
1162 lastoff
< page_offset(pvec
.pages
[0])) {
1167 for (i
= 0; i
< nr_pages
; i
++) {
1168 struct page
*page
= pvec
.pages
[i
];
1172 * At this point, the page may be truncated or
1173 * invalidated (changing page->mapping to NULL),
1174 * or even swizzled back from swapper_space to tmpfs
1175 * file mapping. However, page->index will not change
1176 * because we have a reference on the page.
1178 * Searching done if the page index is out of range.
1179 * If the current offset is not reaches the end of
1180 * the specified search range, there should be a hole
1183 if (page
->index
> end
) {
1184 if (type
== HOLE_OFF
&& lastoff
< endoff
) {
1193 * Page truncated or invalidated(page->mapping == NULL).
1194 * We can freely skip it and proceed to check the next
1197 if (unlikely(page
->mapping
!= inode
->i_mapping
)) {
1202 if (!page_has_buffers(page
)) {
1207 found
= xfs_lookup_buffer_offset(page
, &b_offset
, type
);
1210 * The found offset may be less than the start
1211 * point to search if this is the first time to
1214 *offset
= max_t(loff_t
, startoff
, b_offset
);
1220 * We either searching data but nothing was found, or
1221 * searching hole but found a data buffer. In either
1222 * case, probably the next page contains the desired
1223 * things, update the last offset to it so.
1225 lastoff
= page_offset(page
) + PAGE_SIZE
;
1230 * The number of returned pages less than our desired, search
1231 * done. In this case, nothing was found for searching data,
1232 * but we found a hole behind the last offset.
1234 if (nr_pages
< want
) {
1235 if (type
== HOLE_OFF
) {
1242 index
= pvec
.pages
[i
- 1]->index
+ 1;
1243 pagevec_release(&pvec
);
1244 } while (index
<= end
);
1247 pagevec_release(&pvec
);
1252 * caller must lock inode with xfs_ilock_data_map_shared,
1253 * can we craft an appropriate ASSERT?
1255 * end is because the VFS-level lseek interface is defined such that any
1256 * offset past i_size shall return -ENXIO, but we use this for quota code
1257 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
1260 __xfs_seek_hole_data(
1261 struct inode
*inode
,
1266 struct xfs_inode
*ip
= XFS_I(inode
);
1267 struct xfs_mount
*mp
= ip
->i_mount
;
1268 loff_t
uninitialized_var(offset
);
1269 xfs_fileoff_t fsbno
;
1270 xfs_filblks_t lastbno
;
1279 * Try to read extents from the first block indicated
1280 * by fsbno to the end block of the file.
1282 fsbno
= XFS_B_TO_FSBT(mp
, start
);
1283 lastbno
= XFS_B_TO_FSB(mp
, end
);
1286 struct xfs_bmbt_irec map
[2];
1290 error
= xfs_bmapi_read(ip
, fsbno
, lastbno
- fsbno
, map
, &nmap
,
1295 /* No extents at given offset, must be beyond EOF */
1301 for (i
= 0; i
< nmap
; i
++) {
1302 offset
= max_t(loff_t
, start
,
1303 XFS_FSB_TO_B(mp
, map
[i
].br_startoff
));
1305 /* Landed in the hole we wanted? */
1306 if (whence
== SEEK_HOLE
&&
1307 map
[i
].br_startblock
== HOLESTARTBLOCK
)
1310 /* Landed in the data extent we wanted? */
1311 if (whence
== SEEK_DATA
&&
1312 (map
[i
].br_startblock
== DELAYSTARTBLOCK
||
1313 (map
[i
].br_state
== XFS_EXT_NORM
&&
1314 !isnullstartblock(map
[i
].br_startblock
))))
1318 * Landed in an unwritten extent, try to search
1319 * for hole or data from page cache.
1321 if (map
[i
].br_state
== XFS_EXT_UNWRITTEN
) {
1322 if (xfs_find_get_desired_pgoff(inode
, &map
[i
],
1323 whence
== SEEK_HOLE
? HOLE_OFF
: DATA_OFF
,
1330 * We only received one extent out of the two requested. This
1331 * means we've hit EOF and didn't find what we are looking for.
1335 * If we were looking for a hole, set offset to
1336 * the end of the file (i.e., there is an implicit
1337 * hole at the end of any file).
1339 if (whence
== SEEK_HOLE
) {
1344 * If we were looking for data, it's nowhere to be found
1346 ASSERT(whence
== SEEK_DATA
);
1354 * Nothing was found, proceed to the next round of search
1355 * if the next reading offset is not at or beyond EOF.
1357 fsbno
= map
[i
- 1].br_startoff
+ map
[i
- 1].br_blockcount
;
1358 start
= XFS_FSB_TO_B(mp
, fsbno
);
1360 if (whence
== SEEK_HOLE
) {
1364 ASSERT(whence
== SEEK_DATA
);
1372 * If at this point we have found the hole we wanted, the returned
1373 * offset may be bigger than the file size as it may be aligned to
1374 * page boundary for unwritten extents. We need to deal with this
1375 * situation in particular.
1377 if (whence
== SEEK_HOLE
)
1378 offset
= min_t(loff_t
, offset
, end
);
1392 struct inode
*inode
= file
->f_mapping
->host
;
1393 struct xfs_inode
*ip
= XFS_I(inode
);
1394 struct xfs_mount
*mp
= ip
->i_mount
;
1399 if (XFS_FORCED_SHUTDOWN(mp
))
1402 lock
= xfs_ilock_data_map_shared(ip
);
1404 end
= i_size_read(inode
);
1405 offset
= __xfs_seek_hole_data(inode
, start
, end
, whence
);
1411 offset
= vfs_setpos(file
, offset
, inode
->i_sb
->s_maxbytes
);
1414 xfs_iunlock(ip
, lock
);
1431 return generic_file_llseek(file
, offset
, whence
);
1434 return xfs_seek_hole_data(file
, offset
, whence
);
1441 * Locking for serialisation of IO during page faults. This results in a lock
1445 * sb_start_pagefault(vfs, freeze)
1446 * i_mmaplock (XFS - truncate serialisation)
1448 * i_lock (XFS - extent map serialisation)
1452 * mmap()d file has taken write protection fault and is being made writable. We
1453 * can set the page state up correctly for a writable page, which means we can
1454 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1458 xfs_filemap_page_mkwrite(
1459 struct vm_area_struct
*vma
,
1460 struct vm_fault
*vmf
)
1462 struct inode
*inode
= file_inode(vma
->vm_file
);
1465 trace_xfs_filemap_page_mkwrite(XFS_I(inode
));
1467 sb_start_pagefault(inode
->i_sb
);
1468 file_update_time(vma
->vm_file
);
1469 xfs_ilock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1471 if (IS_DAX(inode
)) {
1472 ret
= iomap_dax_fault(vma
, vmf
, &xfs_iomap_ops
);
1474 ret
= iomap_page_mkwrite(vma
, vmf
, &xfs_iomap_ops
);
1475 ret
= block_page_mkwrite_return(ret
);
1478 xfs_iunlock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1479 sb_end_pagefault(inode
->i_sb
);
1486 struct vm_area_struct
*vma
,
1487 struct vm_fault
*vmf
)
1489 struct inode
*inode
= file_inode(vma
->vm_file
);
1492 trace_xfs_filemap_fault(XFS_I(inode
));
1494 /* DAX can shortcut the normal fault path on write faults! */
1495 if ((vmf
->flags
& FAULT_FLAG_WRITE
) && IS_DAX(inode
))
1496 return xfs_filemap_page_mkwrite(vma
, vmf
);
1498 xfs_ilock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1499 if (IS_DAX(inode
)) {
1501 * we do not want to trigger unwritten extent conversion on read
1502 * faults - that is unnecessary overhead and would also require
1503 * changes to xfs_get_blocks_direct() to map unwritten extent
1504 * ioend for conversion on read-only mappings.
1506 ret
= iomap_dax_fault(vma
, vmf
, &xfs_iomap_ops
);
1508 ret
= filemap_fault(vma
, vmf
);
1509 xfs_iunlock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1515 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1516 * both read and write faults. Hence we need to handle both cases. There is no
1517 * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1518 * handle both cases here. @flags carries the information on the type of fault
1522 xfs_filemap_pmd_fault(
1523 struct vm_area_struct
*vma
,
1528 struct inode
*inode
= file_inode(vma
->vm_file
);
1529 struct xfs_inode
*ip
= XFS_I(inode
);
1533 return VM_FAULT_FALLBACK
;
1535 trace_xfs_filemap_pmd_fault(ip
);
1537 if (flags
& FAULT_FLAG_WRITE
) {
1538 sb_start_pagefault(inode
->i_sb
);
1539 file_update_time(vma
->vm_file
);
1542 xfs_ilock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1543 ret
= dax_pmd_fault(vma
, addr
, pmd
, flags
, xfs_get_blocks_dax_fault
);
1544 xfs_iunlock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1546 if (flags
& FAULT_FLAG_WRITE
)
1547 sb_end_pagefault(inode
->i_sb
);
1553 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1554 * updates on write faults. In reality, it's need to serialise against
1555 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1556 * to ensure we serialise the fault barrier in place.
1559 xfs_filemap_pfn_mkwrite(
1560 struct vm_area_struct
*vma
,
1561 struct vm_fault
*vmf
)
1564 struct inode
*inode
= file_inode(vma
->vm_file
);
1565 struct xfs_inode
*ip
= XFS_I(inode
);
1566 int ret
= VM_FAULT_NOPAGE
;
1569 trace_xfs_filemap_pfn_mkwrite(ip
);
1571 sb_start_pagefault(inode
->i_sb
);
1572 file_update_time(vma
->vm_file
);
1574 /* check if the faulting page hasn't raced with truncate */
1575 xfs_ilock(ip
, XFS_MMAPLOCK_SHARED
);
1576 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1577 if (vmf
->pgoff
>= size
)
1578 ret
= VM_FAULT_SIGBUS
;
1579 else if (IS_DAX(inode
))
1580 ret
= dax_pfn_mkwrite(vma
, vmf
);
1581 xfs_iunlock(ip
, XFS_MMAPLOCK_SHARED
);
1582 sb_end_pagefault(inode
->i_sb
);
1587 static const struct vm_operations_struct xfs_file_vm_ops
= {
1588 .fault
= xfs_filemap_fault
,
1589 .pmd_fault
= xfs_filemap_pmd_fault
,
1590 .map_pages
= filemap_map_pages
,
1591 .page_mkwrite
= xfs_filemap_page_mkwrite
,
1592 .pfn_mkwrite
= xfs_filemap_pfn_mkwrite
,
1598 struct vm_area_struct
*vma
)
1600 file_accessed(filp
);
1601 vma
->vm_ops
= &xfs_file_vm_ops
;
1602 if (IS_DAX(file_inode(filp
)))
1603 vma
->vm_flags
|= VM_MIXEDMAP
| VM_HUGEPAGE
;
1607 const struct file_operations xfs_file_operations
= {
1608 .llseek
= xfs_file_llseek
,
1609 .read_iter
= xfs_file_read_iter
,
1610 .write_iter
= xfs_file_write_iter
,
1611 .splice_read
= xfs_file_splice_read
,
1612 .splice_write
= iter_file_splice_write
,
1613 .unlocked_ioctl
= xfs_file_ioctl
,
1614 #ifdef CONFIG_COMPAT
1615 .compat_ioctl
= xfs_file_compat_ioctl
,
1617 .mmap
= xfs_file_mmap
,
1618 .open
= xfs_file_open
,
1619 .release
= xfs_file_release
,
1620 .fsync
= xfs_file_fsync
,
1621 .fallocate
= xfs_file_fallocate
,
1624 const struct file_operations xfs_dir_file_operations
= {
1625 .open
= xfs_dir_open
,
1626 .read
= generic_read_dir
,
1627 .iterate_shared
= xfs_file_readdir
,
1628 .llseek
= generic_file_llseek
,
1629 .unlocked_ioctl
= xfs_file_ioctl
,
1630 #ifdef CONFIG_COMPAT
1631 .compat_ioctl
= xfs_file_compat_ioctl
,
1633 .fsync
= xfs_dir_fsync
,