2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_da_format.h"
26 #include "xfs_da_btree.h"
27 #include "xfs_inode.h"
28 #include "xfs_trans.h"
29 #include "xfs_inode_item.h"
31 #include "xfs_bmap_util.h"
32 #include "xfs_error.h"
34 #include "xfs_dir2_priv.h"
35 #include "xfs_ioctl.h"
36 #include "xfs_trace.h"
38 #include "xfs_icache.h"
41 #include <linux/dcache.h>
42 #include <linux/falloc.h>
43 #include <linux/pagevec.h>
44 #include <linux/backing-dev.h>
46 static const struct vm_operations_struct xfs_file_vm_ops
;
49 * Locking primitives for read and write IO paths to ensure we consistently use
50 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
57 if (type
& XFS_IOLOCK_EXCL
)
58 inode_lock(VFS_I(ip
));
67 xfs_iunlock(ip
, type
);
68 if (type
& XFS_IOLOCK_EXCL
)
69 inode_unlock(VFS_I(ip
));
77 xfs_ilock_demote(ip
, type
);
78 if (type
& XFS_IOLOCK_EXCL
)
79 inode_unlock(VFS_I(ip
));
83 * xfs_iozero clears the specified range supplied via the page cache (except in
84 * the DAX case). Writes through the page cache will allocate blocks over holes,
85 * though the callers usually map the holes first and avoid them. If a block is
86 * not completely zeroed, then it will be read from disk before being partially
89 * In the DAX case, we can just directly write to the underlying pages. This
90 * will not allocate blocks, but will avoid holes and unwritten extents and so
91 * not do unnecessary work.
95 struct xfs_inode
*ip
, /* inode */
96 loff_t pos
, /* offset in file */
97 size_t count
) /* size of data to zero */
100 struct address_space
*mapping
;
104 mapping
= VFS_I(ip
)->i_mapping
;
106 unsigned offset
, bytes
;
109 offset
= (pos
& (PAGE_SIZE
-1)); /* Within page */
110 bytes
= PAGE_SIZE
- offset
;
114 if (IS_DAX(VFS_I(ip
))) {
115 status
= dax_zero_page_range(VFS_I(ip
), pos
, bytes
,
116 xfs_get_blocks_direct
);
120 status
= pagecache_write_begin(NULL
, mapping
, pos
, bytes
,
121 AOP_FLAG_UNINTERRUPTIBLE
,
126 zero_user(page
, offset
, bytes
);
128 status
= pagecache_write_end(NULL
, mapping
, pos
, bytes
,
129 bytes
, page
, fsdata
);
130 WARN_ON(status
<= 0); /* can't return less than zero! */
141 xfs_update_prealloc_flags(
142 struct xfs_inode
*ip
,
143 enum xfs_prealloc_flags flags
)
145 struct xfs_trans
*tp
;
148 error
= xfs_trans_alloc(ip
->i_mount
, &M_RES(ip
->i_mount
)->tr_writeid
,
153 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
154 xfs_trans_ijoin(tp
, ip
, XFS_ILOCK_EXCL
);
156 if (!(flags
& XFS_PREALLOC_INVISIBLE
)) {
157 VFS_I(ip
)->i_mode
&= ~S_ISUID
;
158 if (VFS_I(ip
)->i_mode
& S_IXGRP
)
159 VFS_I(ip
)->i_mode
&= ~S_ISGID
;
160 xfs_trans_ichgtime(tp
, ip
, XFS_ICHGTIME_MOD
| XFS_ICHGTIME_CHG
);
163 if (flags
& XFS_PREALLOC_SET
)
164 ip
->i_d
.di_flags
|= XFS_DIFLAG_PREALLOC
;
165 if (flags
& XFS_PREALLOC_CLEAR
)
166 ip
->i_d
.di_flags
&= ~XFS_DIFLAG_PREALLOC
;
168 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
169 if (flags
& XFS_PREALLOC_SYNC
)
170 xfs_trans_set_sync(tp
);
171 return xfs_trans_commit(tp
);
175 * Fsync operations on directories are much simpler than on regular files,
176 * as there is no file data to flush, and thus also no need for explicit
177 * cache flush operations, and there are no non-transaction metadata updates
178 * on directories either.
187 struct xfs_inode
*ip
= XFS_I(file
->f_mapping
->host
);
188 struct xfs_mount
*mp
= ip
->i_mount
;
191 trace_xfs_dir_fsync(ip
);
193 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
194 if (xfs_ipincount(ip
))
195 lsn
= ip
->i_itemp
->ili_last_lsn
;
196 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
200 return _xfs_log_force_lsn(mp
, lsn
, XFS_LOG_SYNC
, NULL
);
210 struct inode
*inode
= file
->f_mapping
->host
;
211 struct xfs_inode
*ip
= XFS_I(inode
);
212 struct xfs_mount
*mp
= ip
->i_mount
;
217 trace_xfs_file_fsync(ip
);
219 error
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
223 if (XFS_FORCED_SHUTDOWN(mp
))
226 xfs_iflags_clear(ip
, XFS_ITRUNCATED
);
228 if (mp
->m_flags
& XFS_MOUNT_BARRIER
) {
230 * If we have an RT and/or log subvolume we need to make sure
231 * to flush the write cache the device used for file data
232 * first. This is to ensure newly written file data make
233 * it to disk before logging the new inode size in case of
234 * an extending write.
236 if (XFS_IS_REALTIME_INODE(ip
))
237 xfs_blkdev_issue_flush(mp
->m_rtdev_targp
);
238 else if (mp
->m_logdev_targp
!= mp
->m_ddev_targp
)
239 xfs_blkdev_issue_flush(mp
->m_ddev_targp
);
243 * All metadata updates are logged, which means that we just have to
244 * flush the log up to the latest LSN that touched the inode. If we have
245 * concurrent fsync/fdatasync() calls, we need them to all block on the
246 * log force before we clear the ili_fsync_fields field. This ensures
247 * that we don't get a racing sync operation that does not wait for the
248 * metadata to hit the journal before returning. If we race with
249 * clearing the ili_fsync_fields, then all that will happen is the log
250 * force will do nothing as the lsn will already be on disk. We can't
251 * race with setting ili_fsync_fields because that is done under
252 * XFS_ILOCK_EXCL, and that can't happen because we hold the lock shared
253 * until after the ili_fsync_fields is cleared.
255 xfs_ilock(ip
, XFS_ILOCK_SHARED
);
256 if (xfs_ipincount(ip
)) {
258 (ip
->i_itemp
->ili_fsync_fields
& ~XFS_ILOG_TIMESTAMP
))
259 lsn
= ip
->i_itemp
->ili_last_lsn
;
263 error
= _xfs_log_force_lsn(mp
, lsn
, XFS_LOG_SYNC
, &log_flushed
);
264 ip
->i_itemp
->ili_fsync_fields
= 0;
266 xfs_iunlock(ip
, XFS_ILOCK_SHARED
);
269 * If we only have a single device, and the log force about was
270 * a no-op we might have to flush the data device cache here.
271 * This can only happen for fdatasync/O_DSYNC if we were overwriting
272 * an already allocated file and thus do not have any metadata to
275 if ((mp
->m_flags
& XFS_MOUNT_BARRIER
) &&
276 mp
->m_logdev_targp
== mp
->m_ddev_targp
&&
277 !XFS_IS_REALTIME_INODE(ip
) &&
279 xfs_blkdev_issue_flush(mp
->m_ddev_targp
);
285 xfs_file_dio_aio_read(
289 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
290 struct inode
*inode
= mapping
->host
;
291 struct xfs_inode
*ip
= XFS_I(inode
);
292 loff_t isize
= i_size_read(inode
);
293 size_t count
= iov_iter_count(to
);
294 struct iov_iter data
;
295 struct xfs_buftarg
*target
;
298 trace_xfs_file_direct_read(ip
, count
, iocb
->ki_pos
);
301 return 0; /* skip atime */
303 if (XFS_IS_REALTIME_INODE(ip
))
304 target
= ip
->i_mount
->m_rtdev_targp
;
306 target
= ip
->i_mount
->m_ddev_targp
;
308 /* DIO must be aligned to device logical sector size */
309 if ((iocb
->ki_pos
| count
) & target
->bt_logical_sectormask
) {
310 if (iocb
->ki_pos
== isize
)
316 * Locking is a bit tricky here. If we take an exclusive lock for direct
317 * IO, we effectively serialise all new concurrent read IO to this file
318 * and block it behind IO that is currently in progress because IO in
319 * progress holds the IO lock shared. We only need to hold the lock
320 * exclusive to blow away the page cache, so only take lock exclusively
321 * if the page cache needs invalidation. This allows the normal direct
322 * IO case of no page cache pages to proceeed concurrently without
325 xfs_rw_ilock(ip
, XFS_IOLOCK_SHARED
);
326 if (mapping
->nrpages
) {
327 xfs_rw_iunlock(ip
, XFS_IOLOCK_SHARED
);
328 xfs_rw_ilock(ip
, XFS_IOLOCK_EXCL
);
331 * The generic dio code only flushes the range of the particular
332 * I/O. Because we take an exclusive lock here, this whole
333 * sequence is considerably more expensive for us. This has a
334 * noticeable performance impact for any file with cached pages,
335 * even when outside of the range of the particular I/O.
337 * Hence, amortize the cost of the lock against a full file
338 * flush and reduce the chances of repeated iolock cycles going
341 if (mapping
->nrpages
) {
342 ret
= filemap_write_and_wait(mapping
);
344 xfs_rw_iunlock(ip
, XFS_IOLOCK_EXCL
);
349 * Invalidate whole pages. This can return an error if
350 * we fail to invalidate a page, but this should never
351 * happen on XFS. Warn if it does fail.
353 ret
= invalidate_inode_pages2(mapping
);
357 xfs_rw_ilock_demote(ip
, XFS_IOLOCK_EXCL
);
361 ret
= __blockdev_direct_IO(iocb
, inode
, target
->bt_bdev
, &data
,
362 xfs_get_blocks_direct
, NULL
, NULL
, 0);
365 iov_iter_advance(to
, ret
);
367 xfs_rw_iunlock(ip
, XFS_IOLOCK_SHARED
);
369 file_accessed(iocb
->ki_filp
);
378 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
379 struct inode
*inode
= mapping
->host
;
380 struct xfs_inode
*ip
= XFS_I(inode
);
381 struct iov_iter data
= *to
;
382 size_t count
= iov_iter_count(to
);
385 trace_xfs_file_dax_read(ip
, count
, iocb
->ki_pos
);
388 return 0; /* skip atime */
390 xfs_rw_ilock(ip
, XFS_IOLOCK_SHARED
);
391 ret
= dax_do_io(iocb
, inode
, &data
, xfs_get_blocks_direct
, NULL
, 0);
394 iov_iter_advance(to
, ret
);
396 xfs_rw_iunlock(ip
, XFS_IOLOCK_SHARED
);
398 file_accessed(iocb
->ki_filp
);
403 xfs_file_buffered_aio_read(
407 struct xfs_inode
*ip
= XFS_I(file_inode(iocb
->ki_filp
));
410 trace_xfs_file_buffered_read(ip
, iov_iter_count(to
), iocb
->ki_pos
);
412 xfs_rw_ilock(ip
, XFS_IOLOCK_SHARED
);
413 ret
= generic_file_read_iter(iocb
, to
);
414 xfs_rw_iunlock(ip
, XFS_IOLOCK_SHARED
);
424 struct inode
*inode
= file_inode(iocb
->ki_filp
);
425 struct xfs_mount
*mp
= XFS_I(inode
)->i_mount
;
428 XFS_STATS_INC(mp
, xs_read_calls
);
430 if (XFS_FORCED_SHUTDOWN(mp
))
434 ret
= xfs_file_dax_read(iocb
, to
);
435 else if (iocb
->ki_flags
& IOCB_DIRECT
)
436 ret
= xfs_file_dio_aio_read(iocb
, to
);
438 ret
= xfs_file_buffered_aio_read(iocb
, to
);
441 XFS_STATS_ADD(mp
, xs_read_bytes
, ret
);
446 xfs_file_splice_read(
449 struct pipe_inode_info
*pipe
,
453 struct xfs_inode
*ip
= XFS_I(infilp
->f_mapping
->host
);
456 XFS_STATS_INC(ip
->i_mount
, xs_read_calls
);
458 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
461 trace_xfs_file_splice_read(ip
, count
, *ppos
);
464 * DAX inodes cannot ues the page cache for splice, so we have to push
465 * them through the VFS IO path. This means it goes through
466 * ->read_iter, which for us takes the XFS_IOLOCK_SHARED. Hence we
467 * cannot lock the splice operation at this level for DAX inodes.
469 if (IS_DAX(VFS_I(ip
))) {
470 ret
= default_file_splice_read(infilp
, ppos
, pipe
, count
,
475 xfs_rw_ilock(ip
, XFS_IOLOCK_SHARED
);
476 ret
= generic_file_splice_read(infilp
, ppos
, pipe
, count
, flags
);
477 xfs_rw_iunlock(ip
, XFS_IOLOCK_SHARED
);
480 XFS_STATS_ADD(ip
->i_mount
, xs_read_bytes
, ret
);
485 * This routine is called to handle zeroing any space in the last block of the
486 * file that is beyond the EOF. We do this since the size is being increased
487 * without writing anything to that block and we don't want to read the
488 * garbage on the disk.
490 STATIC
int /* error (positive) */
492 struct xfs_inode
*ip
,
497 struct xfs_mount
*mp
= ip
->i_mount
;
498 xfs_fileoff_t last_fsb
= XFS_B_TO_FSBT(mp
, isize
);
499 int zero_offset
= XFS_B_FSB_OFFSET(mp
, isize
);
503 struct xfs_bmbt_irec imap
;
505 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
506 error
= xfs_bmapi_read(ip
, last_fsb
, 1, &imap
, &nimaps
, 0);
507 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
514 * If the block underlying isize is just a hole, then there
515 * is nothing to zero.
517 if (imap
.br_startblock
== HOLESTARTBLOCK
)
520 zero_len
= mp
->m_sb
.sb_blocksize
- zero_offset
;
521 if (isize
+ zero_len
> offset
)
522 zero_len
= offset
- isize
;
524 return xfs_iozero(ip
, isize
, zero_len
);
528 * Zero any on disk space between the current EOF and the new, larger EOF.
530 * This handles the normal case of zeroing the remainder of the last block in
531 * the file and the unusual case of zeroing blocks out beyond the size of the
532 * file. This second case only happens with fixed size extents and when the
533 * system crashes before the inode size was updated but after blocks were
536 * Expects the iolock to be held exclusive, and will take the ilock internally.
538 int /* error (positive) */
540 struct xfs_inode
*ip
,
541 xfs_off_t offset
, /* starting I/O offset */
542 xfs_fsize_t isize
, /* current inode size */
545 struct xfs_mount
*mp
= ip
->i_mount
;
546 xfs_fileoff_t start_zero_fsb
;
547 xfs_fileoff_t end_zero_fsb
;
548 xfs_fileoff_t zero_count_fsb
;
549 xfs_fileoff_t last_fsb
;
550 xfs_fileoff_t zero_off
;
551 xfs_fsize_t zero_len
;
554 struct xfs_bmbt_irec imap
;
556 ASSERT(xfs_isilocked(ip
, XFS_IOLOCK_EXCL
));
557 ASSERT(offset
> isize
);
559 trace_xfs_zero_eof(ip
, isize
, offset
- isize
);
562 * First handle zeroing the block on which isize resides.
564 * We only zero a part of that block so it is handled specially.
566 if (XFS_B_FSB_OFFSET(mp
, isize
) != 0) {
567 error
= xfs_zero_last_block(ip
, offset
, isize
, did_zeroing
);
573 * Calculate the range between the new size and the old where blocks
574 * needing to be zeroed may exist.
576 * To get the block where the last byte in the file currently resides,
577 * we need to subtract one from the size and truncate back to a block
578 * boundary. We subtract 1 in case the size is exactly on a block
581 last_fsb
= isize
? XFS_B_TO_FSBT(mp
, isize
- 1) : (xfs_fileoff_t
)-1;
582 start_zero_fsb
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)isize
);
583 end_zero_fsb
= XFS_B_TO_FSBT(mp
, offset
- 1);
584 ASSERT((xfs_sfiloff_t
)last_fsb
< (xfs_sfiloff_t
)start_zero_fsb
);
585 if (last_fsb
== end_zero_fsb
) {
587 * The size was only incremented on its last block.
588 * We took care of that above, so just return.
593 ASSERT(start_zero_fsb
<= end_zero_fsb
);
594 while (start_zero_fsb
<= end_zero_fsb
) {
596 zero_count_fsb
= end_zero_fsb
- start_zero_fsb
+ 1;
598 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
599 error
= xfs_bmapi_read(ip
, start_zero_fsb
, zero_count_fsb
,
601 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
607 if (imap
.br_state
== XFS_EXT_UNWRITTEN
||
608 imap
.br_startblock
== HOLESTARTBLOCK
) {
609 start_zero_fsb
= imap
.br_startoff
+ imap
.br_blockcount
;
610 ASSERT(start_zero_fsb
<= (end_zero_fsb
+ 1));
615 * There are blocks we need to zero.
617 zero_off
= XFS_FSB_TO_B(mp
, start_zero_fsb
);
618 zero_len
= XFS_FSB_TO_B(mp
, imap
.br_blockcount
);
620 if ((zero_off
+ zero_len
) > offset
)
621 zero_len
= offset
- zero_off
;
623 error
= xfs_iozero(ip
, zero_off
, zero_len
);
628 start_zero_fsb
= imap
.br_startoff
+ imap
.br_blockcount
;
629 ASSERT(start_zero_fsb
<= (end_zero_fsb
+ 1));
636 * Common pre-write limit and setup checks.
638 * Called with the iolocked held either shared and exclusive according to
639 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
640 * if called for a direct write beyond i_size.
643 xfs_file_aio_write_checks(
645 struct iov_iter
*from
,
648 struct file
*file
= iocb
->ki_filp
;
649 struct inode
*inode
= file
->f_mapping
->host
;
650 struct xfs_inode
*ip
= XFS_I(inode
);
652 size_t count
= iov_iter_count(from
);
653 bool drained_dio
= false;
656 error
= generic_write_checks(iocb
, from
);
660 error
= xfs_break_layouts(inode
, iolock
, true);
664 /* For changing security info in file_remove_privs() we need i_mutex */
665 if (*iolock
== XFS_IOLOCK_SHARED
&& !IS_NOSEC(inode
)) {
666 xfs_rw_iunlock(ip
, *iolock
);
667 *iolock
= XFS_IOLOCK_EXCL
;
668 xfs_rw_ilock(ip
, *iolock
);
672 * If the offset is beyond the size of the file, we need to zero any
673 * blocks that fall between the existing EOF and the start of this
674 * write. If zeroing is needed and we are currently holding the
675 * iolock shared, we need to update it to exclusive which implies
676 * having to redo all checks before.
678 * We need to serialise against EOF updates that occur in IO
679 * completions here. We want to make sure that nobody is changing the
680 * size while we do this check until we have placed an IO barrier (i.e.
681 * hold the XFS_IOLOCK_EXCL) that prevents new IO from being dispatched.
682 * The spinlock effectively forms a memory barrier once we have the
683 * XFS_IOLOCK_EXCL so we are guaranteed to see the latest EOF value
684 * and hence be able to correctly determine if we need to run zeroing.
686 spin_lock(&ip
->i_flags_lock
);
687 if (iocb
->ki_pos
> i_size_read(inode
)) {
690 spin_unlock(&ip
->i_flags_lock
);
692 if (*iolock
== XFS_IOLOCK_SHARED
) {
693 xfs_rw_iunlock(ip
, *iolock
);
694 *iolock
= XFS_IOLOCK_EXCL
;
695 xfs_rw_ilock(ip
, *iolock
);
696 iov_iter_reexpand(from
, count
);
699 * We now have an IO submission barrier in place, but
700 * AIO can do EOF updates during IO completion and hence
701 * we now need to wait for all of them to drain. Non-AIO
702 * DIO will have drained before we are given the
703 * XFS_IOLOCK_EXCL, and so for most cases this wait is a
706 inode_dio_wait(inode
);
710 error
= xfs_zero_eof(ip
, iocb
->ki_pos
, i_size_read(inode
), &zero
);
714 spin_unlock(&ip
->i_flags_lock
);
717 * Updating the timestamps will grab the ilock again from
718 * xfs_fs_dirty_inode, so we have to call it after dropping the
719 * lock above. Eventually we should look into a way to avoid
720 * the pointless lock roundtrip.
722 if (likely(!(file
->f_mode
& FMODE_NOCMTIME
))) {
723 error
= file_update_time(file
);
729 * If we're writing the file then make sure to clear the setuid and
730 * setgid bits if the process is not being run by root. This keeps
731 * people from modifying setuid and setgid binaries.
733 if (!IS_NOSEC(inode
))
734 return file_remove_privs(file
);
739 * xfs_file_dio_aio_write - handle direct IO writes
741 * Lock the inode appropriately to prepare for and issue a direct IO write.
742 * By separating it from the buffered write path we remove all the tricky to
743 * follow locking changes and looping.
745 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
746 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
747 * pages are flushed out.
749 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
750 * allowing them to be done in parallel with reads and other direct IO writes.
751 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
752 * needs to do sub-block zeroing and that requires serialisation against other
753 * direct IOs to the same block. In this case we need to serialise the
754 * submission of the unaligned IOs so that we don't get racing block zeroing in
755 * the dio layer. To avoid the problem with aio, we also need to wait for
756 * outstanding IOs to complete so that unwritten extent conversion is completed
757 * before we try to map the overlapping block. This is currently implemented by
758 * hitting it with a big hammer (i.e. inode_dio_wait()).
760 * Returns with locks held indicated by @iolock and errors indicated by
761 * negative return values.
764 xfs_file_dio_aio_write(
766 struct iov_iter
*from
)
768 struct file
*file
= iocb
->ki_filp
;
769 struct address_space
*mapping
= file
->f_mapping
;
770 struct inode
*inode
= mapping
->host
;
771 struct xfs_inode
*ip
= XFS_I(inode
);
772 struct xfs_mount
*mp
= ip
->i_mount
;
774 int unaligned_io
= 0;
776 size_t count
= iov_iter_count(from
);
778 struct iov_iter data
;
779 struct xfs_buftarg
*target
= XFS_IS_REALTIME_INODE(ip
) ?
780 mp
->m_rtdev_targp
: mp
->m_ddev_targp
;
782 /* DIO must be aligned to device logical sector size */
783 if ((iocb
->ki_pos
| count
) & target
->bt_logical_sectormask
)
786 /* "unaligned" here means not aligned to a filesystem block */
787 if ((iocb
->ki_pos
& mp
->m_blockmask
) ||
788 ((iocb
->ki_pos
+ count
) & mp
->m_blockmask
))
792 * We don't need to take an exclusive lock unless there page cache needs
793 * to be invalidated or unaligned IO is being executed. We don't need to
794 * consider the EOF extension case here because
795 * xfs_file_aio_write_checks() will relock the inode as necessary for
796 * EOF zeroing cases and fill out the new inode size as appropriate.
798 if (unaligned_io
|| mapping
->nrpages
)
799 iolock
= XFS_IOLOCK_EXCL
;
801 iolock
= XFS_IOLOCK_SHARED
;
802 xfs_rw_ilock(ip
, iolock
);
805 * Recheck if there are cached pages that need invalidate after we got
806 * the iolock to protect against other threads adding new pages while
807 * we were waiting for the iolock.
809 if (mapping
->nrpages
&& iolock
== XFS_IOLOCK_SHARED
) {
810 xfs_rw_iunlock(ip
, iolock
);
811 iolock
= XFS_IOLOCK_EXCL
;
812 xfs_rw_ilock(ip
, iolock
);
815 ret
= xfs_file_aio_write_checks(iocb
, from
, &iolock
);
818 count
= iov_iter_count(from
);
819 end
= iocb
->ki_pos
+ count
- 1;
822 * See xfs_file_dio_aio_read() for why we do a full-file flush here.
824 if (mapping
->nrpages
) {
825 ret
= filemap_write_and_wait(VFS_I(ip
)->i_mapping
);
829 * Invalidate whole pages. This can return an error if we fail
830 * to invalidate a page, but this should never happen on XFS.
831 * Warn if it does fail.
833 ret
= invalidate_inode_pages2(VFS_I(ip
)->i_mapping
);
839 * If we are doing unaligned IO, wait for all other IO to drain,
840 * otherwise demote the lock if we had to flush cached pages
843 inode_dio_wait(inode
);
844 else if (iolock
== XFS_IOLOCK_EXCL
) {
845 xfs_rw_ilock_demote(ip
, XFS_IOLOCK_EXCL
);
846 iolock
= XFS_IOLOCK_SHARED
;
849 trace_xfs_file_direct_write(ip
, count
, iocb
->ki_pos
);
852 ret
= __blockdev_direct_IO(iocb
, inode
, target
->bt_bdev
, &data
,
853 xfs_get_blocks_direct
, xfs_end_io_direct_write
,
854 NULL
, DIO_ASYNC_EXTEND
);
856 /* see generic_file_direct_write() for why this is necessary */
857 if (mapping
->nrpages
) {
858 invalidate_inode_pages2_range(mapping
,
859 iocb
->ki_pos
>> PAGE_SHIFT
,
865 iov_iter_advance(from
, ret
);
868 xfs_rw_iunlock(ip
, iolock
);
871 * No fallback to buffered IO on errors for XFS, direct IO will either
872 * complete fully or fail.
874 ASSERT(ret
< 0 || ret
== count
);
881 struct iov_iter
*from
)
883 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
884 struct inode
*inode
= mapping
->host
;
885 struct xfs_inode
*ip
= XFS_I(inode
);
886 struct xfs_mount
*mp
= ip
->i_mount
;
888 int unaligned_io
= 0;
890 struct iov_iter data
;
892 /* "unaligned" here means not aligned to a filesystem block */
893 if ((iocb
->ki_pos
& mp
->m_blockmask
) ||
894 ((iocb
->ki_pos
+ iov_iter_count(from
)) & mp
->m_blockmask
)) {
896 iolock
= XFS_IOLOCK_EXCL
;
897 } else if (mapping
->nrpages
) {
898 iolock
= XFS_IOLOCK_EXCL
;
900 iolock
= XFS_IOLOCK_SHARED
;
902 xfs_rw_ilock(ip
, iolock
);
904 ret
= xfs_file_aio_write_checks(iocb
, from
, &iolock
);
909 * Yes, even DAX files can have page cache attached to them: A zeroed
910 * page is inserted into the pagecache when we have to serve a write
911 * fault on a hole. It should never be dirtied and can simply be
912 * dropped from the pagecache once we get real data for the page.
914 if (mapping
->nrpages
) {
915 ret
= invalidate_inode_pages2(mapping
);
919 if (iolock
== XFS_IOLOCK_EXCL
&& !unaligned_io
) {
920 xfs_rw_ilock_demote(ip
, XFS_IOLOCK_EXCL
);
921 iolock
= XFS_IOLOCK_SHARED
;
924 trace_xfs_file_dax_write(ip
, iov_iter_count(from
), iocb
->ki_pos
);
927 ret
= dax_do_io(iocb
, inode
, &data
, xfs_get_blocks_direct
,
928 xfs_end_io_direct_write
, 0);
931 iov_iter_advance(from
, ret
);
934 xfs_rw_iunlock(ip
, iolock
);
939 xfs_file_buffered_aio_write(
941 struct iov_iter
*from
)
943 struct file
*file
= iocb
->ki_filp
;
944 struct address_space
*mapping
= file
->f_mapping
;
945 struct inode
*inode
= mapping
->host
;
946 struct xfs_inode
*ip
= XFS_I(inode
);
949 int iolock
= XFS_IOLOCK_EXCL
;
951 xfs_rw_ilock(ip
, iolock
);
953 ret
= xfs_file_aio_write_checks(iocb
, from
, &iolock
);
957 /* We can write back this queue in page reclaim */
958 current
->backing_dev_info
= inode_to_bdi(inode
);
961 trace_xfs_file_buffered_write(ip
, iov_iter_count(from
), iocb
->ki_pos
);
962 ret
= generic_perform_write(file
, from
, iocb
->ki_pos
);
963 if (likely(ret
>= 0))
967 * If we hit a space limit, try to free up some lingering preallocated
968 * space before returning an error. In the case of ENOSPC, first try to
969 * write back all dirty inodes to free up some of the excess reserved
970 * metadata space. This reduces the chances that the eofblocks scan
971 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
972 * also behaves as a filter to prevent too many eofblocks scans from
973 * running at the same time.
975 if (ret
== -EDQUOT
&& !enospc
) {
976 enospc
= xfs_inode_free_quota_eofblocks(ip
);
979 } else if (ret
== -ENOSPC
&& !enospc
) {
980 struct xfs_eofblocks eofb
= {0};
983 xfs_flush_inodes(ip
->i_mount
);
984 eofb
.eof_scan_owner
= ip
->i_ino
; /* for locking */
985 eofb
.eof_flags
= XFS_EOF_FLAGS_SYNC
;
986 xfs_icache_free_eofblocks(ip
->i_mount
, &eofb
);
990 current
->backing_dev_info
= NULL
;
992 xfs_rw_iunlock(ip
, iolock
);
999 struct iov_iter
*from
)
1001 struct file
*file
= iocb
->ki_filp
;
1002 struct address_space
*mapping
= file
->f_mapping
;
1003 struct inode
*inode
= mapping
->host
;
1004 struct xfs_inode
*ip
= XFS_I(inode
);
1006 size_t ocount
= iov_iter_count(from
);
1008 XFS_STATS_INC(ip
->i_mount
, xs_write_calls
);
1013 if (XFS_FORCED_SHUTDOWN(ip
->i_mount
))
1017 ret
= xfs_file_dax_write(iocb
, from
);
1018 else if (iocb
->ki_flags
& IOCB_DIRECT
)
1019 ret
= xfs_file_dio_aio_write(iocb
, from
);
1021 ret
= xfs_file_buffered_aio_write(iocb
, from
);
1024 XFS_STATS_ADD(ip
->i_mount
, xs_write_bytes
, ret
);
1026 /* Handle various SYNC-type writes */
1027 ret
= generic_write_sync(iocb
, ret
);
1032 #define XFS_FALLOC_FL_SUPPORTED \
1033 (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
1034 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
1035 FALLOC_FL_INSERT_RANGE)
1044 struct inode
*inode
= file_inode(file
);
1045 struct xfs_inode
*ip
= XFS_I(inode
);
1047 enum xfs_prealloc_flags flags
= 0;
1048 uint iolock
= XFS_IOLOCK_EXCL
;
1049 loff_t new_size
= 0;
1050 bool do_file_insert
= 0;
1052 if (!S_ISREG(inode
->i_mode
))
1054 if (mode
& ~XFS_FALLOC_FL_SUPPORTED
)
1057 xfs_ilock(ip
, iolock
);
1058 error
= xfs_break_layouts(inode
, &iolock
, false);
1062 xfs_ilock(ip
, XFS_MMAPLOCK_EXCL
);
1063 iolock
|= XFS_MMAPLOCK_EXCL
;
1065 if (mode
& FALLOC_FL_PUNCH_HOLE
) {
1066 error
= xfs_free_file_space(ip
, offset
, len
);
1069 } else if (mode
& FALLOC_FL_COLLAPSE_RANGE
) {
1070 unsigned blksize_mask
= (1 << inode
->i_blkbits
) - 1;
1072 if (offset
& blksize_mask
|| len
& blksize_mask
) {
1078 * There is no need to overlap collapse range with EOF,
1079 * in which case it is effectively a truncate operation
1081 if (offset
+ len
>= i_size_read(inode
)) {
1086 new_size
= i_size_read(inode
) - len
;
1088 error
= xfs_collapse_file_space(ip
, offset
, len
);
1091 } else if (mode
& FALLOC_FL_INSERT_RANGE
) {
1092 unsigned blksize_mask
= (1 << inode
->i_blkbits
) - 1;
1094 new_size
= i_size_read(inode
) + len
;
1095 if (offset
& blksize_mask
|| len
& blksize_mask
) {
1100 /* check the new inode size does not wrap through zero */
1101 if (new_size
> inode
->i_sb
->s_maxbytes
) {
1106 /* Offset should be less than i_size */
1107 if (offset
>= i_size_read(inode
)) {
1113 flags
|= XFS_PREALLOC_SET
;
1115 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
1116 offset
+ len
> i_size_read(inode
)) {
1117 new_size
= offset
+ len
;
1118 error
= inode_newsize_ok(inode
, new_size
);
1123 if (mode
& FALLOC_FL_ZERO_RANGE
)
1124 error
= xfs_zero_file_space(ip
, offset
, len
);
1126 error
= xfs_alloc_file_space(ip
, offset
, len
,
1127 XFS_BMAPI_PREALLOC
);
1132 if (file
->f_flags
& O_DSYNC
)
1133 flags
|= XFS_PREALLOC_SYNC
;
1135 error
= xfs_update_prealloc_flags(ip
, flags
);
1139 /* Change file size if needed */
1143 iattr
.ia_valid
= ATTR_SIZE
;
1144 iattr
.ia_size
= new_size
;
1145 error
= xfs_setattr_size(ip
, &iattr
);
1151 * Perform hole insertion now that the file size has been
1152 * updated so that if we crash during the operation we don't
1153 * leave shifted extents past EOF and hence losing access to
1154 * the data that is contained within them.
1157 error
= xfs_insert_file_space(ip
, offset
, len
);
1160 xfs_iunlock(ip
, iolock
);
1167 struct inode
*inode
,
1170 if (!(file
->f_flags
& O_LARGEFILE
) && i_size_read(inode
) > MAX_NON_LFS
)
1172 if (XFS_FORCED_SHUTDOWN(XFS_M(inode
->i_sb
)))
1179 struct inode
*inode
,
1182 struct xfs_inode
*ip
= XFS_I(inode
);
1186 error
= xfs_file_open(inode
, file
);
1191 * If there are any blocks, read-ahead block 0 as we're almost
1192 * certain to have the next operation be a read there.
1194 mode
= xfs_ilock_data_map_shared(ip
);
1195 if (ip
->i_d
.di_nextents
> 0)
1196 xfs_dir3_data_readahead(ip
, 0, -1);
1197 xfs_iunlock(ip
, mode
);
1203 struct inode
*inode
,
1206 return xfs_release(XFS_I(inode
));
1212 struct dir_context
*ctx
)
1214 struct inode
*inode
= file_inode(file
);
1215 xfs_inode_t
*ip
= XFS_I(inode
);
1219 * The Linux API doesn't pass down the total size of the buffer
1220 * we read into down to the filesystem. With the filldir concept
1221 * it's not needed for correct information, but the XFS dir2 leaf
1222 * code wants an estimate of the buffer size to calculate it's
1223 * readahead window and size the buffers used for mapping to
1226 * Try to give it an estimate that's good enough, maybe at some
1227 * point we can change the ->readdir prototype to include the
1228 * buffer size. For now we use the current glibc buffer size.
1230 bufsize
= (size_t)min_t(loff_t
, 32768, ip
->i_d
.di_size
);
1232 return xfs_readdir(ip
, ctx
, bufsize
);
1236 * This type is designed to indicate the type of offset we would like
1237 * to search from page cache for xfs_seek_hole_data().
1245 * Lookup the desired type of offset from the given page.
1247 * On success, return true and the offset argument will point to the
1248 * start of the region that was found. Otherwise this function will
1249 * return false and keep the offset argument unchanged.
1252 xfs_lookup_buffer_offset(
1257 loff_t lastoff
= page_offset(page
);
1259 struct buffer_head
*bh
, *head
;
1261 bh
= head
= page_buffers(page
);
1264 * Unwritten extents that have data in the page
1265 * cache covering them can be identified by the
1266 * BH_Unwritten state flag. Pages with multiple
1267 * buffers might have a mix of holes, data and
1268 * unwritten extents - any buffer with valid
1269 * data in it should have BH_Uptodate flag set
1272 if (buffer_unwritten(bh
) ||
1273 buffer_uptodate(bh
)) {
1274 if (type
== DATA_OFF
)
1277 if (type
== HOLE_OFF
)
1285 lastoff
+= bh
->b_size
;
1286 } while ((bh
= bh
->b_this_page
) != head
);
1292 * This routine is called to find out and return a data or hole offset
1293 * from the page cache for unwritten extents according to the desired
1294 * type for xfs_seek_hole_data().
1296 * The argument offset is used to tell where we start to search from the
1297 * page cache. Map is used to figure out the end points of the range to
1300 * Return true if the desired type of offset was found, and the argument
1301 * offset is filled with that address. Otherwise, return false and keep
1305 xfs_find_get_desired_pgoff(
1306 struct inode
*inode
,
1307 struct xfs_bmbt_irec
*map
,
1311 struct xfs_inode
*ip
= XFS_I(inode
);
1312 struct xfs_mount
*mp
= ip
->i_mount
;
1313 struct pagevec pvec
;
1317 loff_t startoff
= *offset
;
1318 loff_t lastoff
= startoff
;
1321 pagevec_init(&pvec
, 0);
1323 index
= startoff
>> PAGE_SHIFT
;
1324 endoff
= XFS_FSB_TO_B(mp
, map
->br_startoff
+ map
->br_blockcount
);
1325 end
= endoff
>> PAGE_SHIFT
;
1331 want
= min_t(pgoff_t
, end
- index
, PAGEVEC_SIZE
);
1332 nr_pages
= pagevec_lookup(&pvec
, inode
->i_mapping
, index
,
1335 * No page mapped into given range. If we are searching holes
1336 * and if this is the first time we got into the loop, it means
1337 * that the given offset is landed in a hole, return it.
1339 * If we have already stepped through some block buffers to find
1340 * holes but they all contains data. In this case, the last
1341 * offset is already updated and pointed to the end of the last
1342 * mapped page, if it does not reach the endpoint to search,
1343 * that means there should be a hole between them.
1345 if (nr_pages
== 0) {
1346 /* Data search found nothing */
1347 if (type
== DATA_OFF
)
1350 ASSERT(type
== HOLE_OFF
);
1351 if (lastoff
== startoff
|| lastoff
< endoff
) {
1359 * At lease we found one page. If this is the first time we
1360 * step into the loop, and if the first page index offset is
1361 * greater than the given search offset, a hole was found.
1363 if (type
== HOLE_OFF
&& lastoff
== startoff
&&
1364 lastoff
< page_offset(pvec
.pages
[0])) {
1369 for (i
= 0; i
< nr_pages
; i
++) {
1370 struct page
*page
= pvec
.pages
[i
];
1374 * At this point, the page may be truncated or
1375 * invalidated (changing page->mapping to NULL),
1376 * or even swizzled back from swapper_space to tmpfs
1377 * file mapping. However, page->index will not change
1378 * because we have a reference on the page.
1380 * Searching done if the page index is out of range.
1381 * If the current offset is not reaches the end of
1382 * the specified search range, there should be a hole
1385 if (page
->index
> end
) {
1386 if (type
== HOLE_OFF
&& lastoff
< endoff
) {
1395 * Page truncated or invalidated(page->mapping == NULL).
1396 * We can freely skip it and proceed to check the next
1399 if (unlikely(page
->mapping
!= inode
->i_mapping
)) {
1404 if (!page_has_buffers(page
)) {
1409 found
= xfs_lookup_buffer_offset(page
, &b_offset
, type
);
1412 * The found offset may be less than the start
1413 * point to search if this is the first time to
1416 *offset
= max_t(loff_t
, startoff
, b_offset
);
1422 * We either searching data but nothing was found, or
1423 * searching hole but found a data buffer. In either
1424 * case, probably the next page contains the desired
1425 * things, update the last offset to it so.
1427 lastoff
= page_offset(page
) + PAGE_SIZE
;
1432 * The number of returned pages less than our desired, search
1433 * done. In this case, nothing was found for searching data,
1434 * but we found a hole behind the last offset.
1436 if (nr_pages
< want
) {
1437 if (type
== HOLE_OFF
) {
1444 index
= pvec
.pages
[i
- 1]->index
+ 1;
1445 pagevec_release(&pvec
);
1446 } while (index
<= end
);
1449 pagevec_release(&pvec
);
1454 * caller must lock inode with xfs_ilock_data_map_shared,
1455 * can we craft an appropriate ASSERT?
1457 * end is because the VFS-level lseek interface is defined such that any
1458 * offset past i_size shall return -ENXIO, but we use this for quota code
1459 * which does not maintain i_size, and we want to SEEK_DATA past i_size.
1462 __xfs_seek_hole_data(
1463 struct inode
*inode
,
1468 struct xfs_inode
*ip
= XFS_I(inode
);
1469 struct xfs_mount
*mp
= ip
->i_mount
;
1470 loff_t
uninitialized_var(offset
);
1471 xfs_fileoff_t fsbno
;
1472 xfs_filblks_t lastbno
;
1481 * Try to read extents from the first block indicated
1482 * by fsbno to the end block of the file.
1484 fsbno
= XFS_B_TO_FSBT(mp
, start
);
1485 lastbno
= XFS_B_TO_FSB(mp
, end
);
1488 struct xfs_bmbt_irec map
[2];
1492 error
= xfs_bmapi_read(ip
, fsbno
, lastbno
- fsbno
, map
, &nmap
,
1497 /* No extents at given offset, must be beyond EOF */
1503 for (i
= 0; i
< nmap
; i
++) {
1504 offset
= max_t(loff_t
, start
,
1505 XFS_FSB_TO_B(mp
, map
[i
].br_startoff
));
1507 /* Landed in the hole we wanted? */
1508 if (whence
== SEEK_HOLE
&&
1509 map
[i
].br_startblock
== HOLESTARTBLOCK
)
1512 /* Landed in the data extent we wanted? */
1513 if (whence
== SEEK_DATA
&&
1514 (map
[i
].br_startblock
== DELAYSTARTBLOCK
||
1515 (map
[i
].br_state
== XFS_EXT_NORM
&&
1516 !isnullstartblock(map
[i
].br_startblock
))))
1520 * Landed in an unwritten extent, try to search
1521 * for hole or data from page cache.
1523 if (map
[i
].br_state
== XFS_EXT_UNWRITTEN
) {
1524 if (xfs_find_get_desired_pgoff(inode
, &map
[i
],
1525 whence
== SEEK_HOLE
? HOLE_OFF
: DATA_OFF
,
1532 * We only received one extent out of the two requested. This
1533 * means we've hit EOF and didn't find what we are looking for.
1537 * If we were looking for a hole, set offset to
1538 * the end of the file (i.e., there is an implicit
1539 * hole at the end of any file).
1541 if (whence
== SEEK_HOLE
) {
1546 * If we were looking for data, it's nowhere to be found
1548 ASSERT(whence
== SEEK_DATA
);
1556 * Nothing was found, proceed to the next round of search
1557 * if the next reading offset is not at or beyond EOF.
1559 fsbno
= map
[i
- 1].br_startoff
+ map
[i
- 1].br_blockcount
;
1560 start
= XFS_FSB_TO_B(mp
, fsbno
);
1562 if (whence
== SEEK_HOLE
) {
1566 ASSERT(whence
== SEEK_DATA
);
1574 * If at this point we have found the hole we wanted, the returned
1575 * offset may be bigger than the file size as it may be aligned to
1576 * page boundary for unwritten extents. We need to deal with this
1577 * situation in particular.
1579 if (whence
== SEEK_HOLE
)
1580 offset
= min_t(loff_t
, offset
, end
);
1594 struct inode
*inode
= file
->f_mapping
->host
;
1595 struct xfs_inode
*ip
= XFS_I(inode
);
1596 struct xfs_mount
*mp
= ip
->i_mount
;
1601 if (XFS_FORCED_SHUTDOWN(mp
))
1604 lock
= xfs_ilock_data_map_shared(ip
);
1606 end
= i_size_read(inode
);
1607 offset
= __xfs_seek_hole_data(inode
, start
, end
, whence
);
1613 offset
= vfs_setpos(file
, offset
, inode
->i_sb
->s_maxbytes
);
1616 xfs_iunlock(ip
, lock
);
1633 return generic_file_llseek(file
, offset
, whence
);
1636 return xfs_seek_hole_data(file
, offset
, whence
);
1643 * Locking for serialisation of IO during page faults. This results in a lock
1647 * sb_start_pagefault(vfs, freeze)
1648 * i_mmaplock (XFS - truncate serialisation)
1650 * i_lock (XFS - extent map serialisation)
1654 * mmap()d file has taken write protection fault and is being made writable. We
1655 * can set the page state up correctly for a writable page, which means we can
1656 * do correct delalloc accounting (ENOSPC checking!) and unwritten extent
1660 xfs_filemap_page_mkwrite(
1661 struct vm_area_struct
*vma
,
1662 struct vm_fault
*vmf
)
1664 struct inode
*inode
= file_inode(vma
->vm_file
);
1667 trace_xfs_filemap_page_mkwrite(XFS_I(inode
));
1669 sb_start_pagefault(inode
->i_sb
);
1670 file_update_time(vma
->vm_file
);
1671 xfs_ilock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1673 if (IS_DAX(inode
)) {
1674 ret
= __dax_mkwrite(vma
, vmf
, xfs_get_blocks_dax_fault
);
1676 ret
= block_page_mkwrite(vma
, vmf
, xfs_get_blocks
);
1677 ret
= block_page_mkwrite_return(ret
);
1680 xfs_iunlock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1681 sb_end_pagefault(inode
->i_sb
);
1688 struct vm_area_struct
*vma
,
1689 struct vm_fault
*vmf
)
1691 struct inode
*inode
= file_inode(vma
->vm_file
);
1694 trace_xfs_filemap_fault(XFS_I(inode
));
1696 /* DAX can shortcut the normal fault path on write faults! */
1697 if ((vmf
->flags
& FAULT_FLAG_WRITE
) && IS_DAX(inode
))
1698 return xfs_filemap_page_mkwrite(vma
, vmf
);
1700 xfs_ilock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1701 if (IS_DAX(inode
)) {
1703 * we do not want to trigger unwritten extent conversion on read
1704 * faults - that is unnecessary overhead and would also require
1705 * changes to xfs_get_blocks_direct() to map unwritten extent
1706 * ioend for conversion on read-only mappings.
1708 ret
= __dax_fault(vma
, vmf
, xfs_get_blocks_dax_fault
);
1710 ret
= filemap_fault(vma
, vmf
);
1711 xfs_iunlock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1717 * Similar to xfs_filemap_fault(), the DAX fault path can call into here on
1718 * both read and write faults. Hence we need to handle both cases. There is no
1719 * ->pmd_mkwrite callout for huge pages, so we have a single function here to
1720 * handle both cases here. @flags carries the information on the type of fault
1724 xfs_filemap_pmd_fault(
1725 struct vm_area_struct
*vma
,
1730 struct inode
*inode
= file_inode(vma
->vm_file
);
1731 struct xfs_inode
*ip
= XFS_I(inode
);
1735 return VM_FAULT_FALLBACK
;
1737 trace_xfs_filemap_pmd_fault(ip
);
1739 if (flags
& FAULT_FLAG_WRITE
) {
1740 sb_start_pagefault(inode
->i_sb
);
1741 file_update_time(vma
->vm_file
);
1744 xfs_ilock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1745 ret
= __dax_pmd_fault(vma
, addr
, pmd
, flags
, xfs_get_blocks_dax_fault
);
1746 xfs_iunlock(XFS_I(inode
), XFS_MMAPLOCK_SHARED
);
1748 if (flags
& FAULT_FLAG_WRITE
)
1749 sb_end_pagefault(inode
->i_sb
);
1755 * pfn_mkwrite was originally inteneded to ensure we capture time stamp
1756 * updates on write faults. In reality, it's need to serialise against
1757 * truncate similar to page_mkwrite. Hence we cycle the XFS_MMAPLOCK_SHARED
1758 * to ensure we serialise the fault barrier in place.
1761 xfs_filemap_pfn_mkwrite(
1762 struct vm_area_struct
*vma
,
1763 struct vm_fault
*vmf
)
1766 struct inode
*inode
= file_inode(vma
->vm_file
);
1767 struct xfs_inode
*ip
= XFS_I(inode
);
1768 int ret
= VM_FAULT_NOPAGE
;
1771 trace_xfs_filemap_pfn_mkwrite(ip
);
1773 sb_start_pagefault(inode
->i_sb
);
1774 file_update_time(vma
->vm_file
);
1776 /* check if the faulting page hasn't raced with truncate */
1777 xfs_ilock(ip
, XFS_MMAPLOCK_SHARED
);
1778 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1779 if (vmf
->pgoff
>= size
)
1780 ret
= VM_FAULT_SIGBUS
;
1781 else if (IS_DAX(inode
))
1782 ret
= dax_pfn_mkwrite(vma
, vmf
);
1783 xfs_iunlock(ip
, XFS_MMAPLOCK_SHARED
);
1784 sb_end_pagefault(inode
->i_sb
);
1789 static const struct vm_operations_struct xfs_file_vm_ops
= {
1790 .fault
= xfs_filemap_fault
,
1791 .pmd_fault
= xfs_filemap_pmd_fault
,
1792 .map_pages
= filemap_map_pages
,
1793 .page_mkwrite
= xfs_filemap_page_mkwrite
,
1794 .pfn_mkwrite
= xfs_filemap_pfn_mkwrite
,
1800 struct vm_area_struct
*vma
)
1802 file_accessed(filp
);
1803 vma
->vm_ops
= &xfs_file_vm_ops
;
1804 if (IS_DAX(file_inode(filp
)))
1805 vma
->vm_flags
|= VM_MIXEDMAP
| VM_HUGEPAGE
;
1809 const struct file_operations xfs_file_operations
= {
1810 .llseek
= xfs_file_llseek
,
1811 .read_iter
= xfs_file_read_iter
,
1812 .write_iter
= xfs_file_write_iter
,
1813 .splice_read
= xfs_file_splice_read
,
1814 .splice_write
= iter_file_splice_write
,
1815 .unlocked_ioctl
= xfs_file_ioctl
,
1816 #ifdef CONFIG_COMPAT
1817 .compat_ioctl
= xfs_file_compat_ioctl
,
1819 .mmap
= xfs_file_mmap
,
1820 .open
= xfs_file_open
,
1821 .release
= xfs_file_release
,
1822 .fsync
= xfs_file_fsync
,
1823 .fallocate
= xfs_file_fallocate
,
1826 const struct file_operations xfs_dir_file_operations
= {
1827 .open
= xfs_dir_open
,
1828 .read
= generic_read_dir
,
1829 .iterate_shared
= xfs_file_readdir
,
1830 .llseek
= generic_file_llseek
,
1831 .unlocked_ioctl
= xfs_file_ioctl
,
1832 #ifdef CONFIG_COMPAT
1833 .compat_ioctl
= xfs_file_compat_ioctl
,
1835 .fsync
= xfs_dir_fsync
,