]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/xfs/xfs_file.c
mm: drop vm_ops->remap_pages and generic_file_remap_pages() stub
[mirror_ubuntu-artful-kernel.git] / fs / xfs / xfs_file.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_mount.h"
25 #include "xfs_da_format.h"
26 #include "xfs_da_btree.h"
27 #include "xfs_inode.h"
28 #include "xfs_trans.h"
29 #include "xfs_inode_item.h"
30 #include "xfs_bmap.h"
31 #include "xfs_bmap_util.h"
32 #include "xfs_error.h"
33 #include "xfs_dir2.h"
34 #include "xfs_dir2_priv.h"
35 #include "xfs_ioctl.h"
36 #include "xfs_trace.h"
37 #include "xfs_log.h"
38 #include "xfs_icache.h"
39
40 #include <linux/aio.h>
41 #include <linux/dcache.h>
42 #include <linux/falloc.h>
43 #include <linux/pagevec.h>
44
45 static const struct vm_operations_struct xfs_file_vm_ops;
46
47 /*
48 * Locking primitives for read and write IO paths to ensure we consistently use
49 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
50 */
51 static inline void
52 xfs_rw_ilock(
53 struct xfs_inode *ip,
54 int type)
55 {
56 if (type & XFS_IOLOCK_EXCL)
57 mutex_lock(&VFS_I(ip)->i_mutex);
58 xfs_ilock(ip, type);
59 }
60
61 static inline void
62 xfs_rw_iunlock(
63 struct xfs_inode *ip,
64 int type)
65 {
66 xfs_iunlock(ip, type);
67 if (type & XFS_IOLOCK_EXCL)
68 mutex_unlock(&VFS_I(ip)->i_mutex);
69 }
70
71 static inline void
72 xfs_rw_ilock_demote(
73 struct xfs_inode *ip,
74 int type)
75 {
76 xfs_ilock_demote(ip, type);
77 if (type & XFS_IOLOCK_EXCL)
78 mutex_unlock(&VFS_I(ip)->i_mutex);
79 }
80
81 /*
82 * xfs_iozero
83 *
84 * xfs_iozero clears the specified range of buffer supplied,
85 * and marks all the affected blocks as valid and modified. If
86 * an affected block is not allocated, it will be allocated. If
87 * an affected block is not completely overwritten, and is not
88 * valid before the operation, it will be read from disk before
89 * being partially zeroed.
90 */
91 int
92 xfs_iozero(
93 struct xfs_inode *ip, /* inode */
94 loff_t pos, /* offset in file */
95 size_t count) /* size of data to zero */
96 {
97 struct page *page;
98 struct address_space *mapping;
99 int status;
100
101 mapping = VFS_I(ip)->i_mapping;
102 do {
103 unsigned offset, bytes;
104 void *fsdata;
105
106 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
107 bytes = PAGE_CACHE_SIZE - offset;
108 if (bytes > count)
109 bytes = count;
110
111 status = pagecache_write_begin(NULL, mapping, pos, bytes,
112 AOP_FLAG_UNINTERRUPTIBLE,
113 &page, &fsdata);
114 if (status)
115 break;
116
117 zero_user(page, offset, bytes);
118
119 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes,
120 page, fsdata);
121 WARN_ON(status <= 0); /* can't return less than zero! */
122 pos += bytes;
123 count -= bytes;
124 status = 0;
125 } while (count);
126
127 return (-status);
128 }
129
130 /*
131 * Fsync operations on directories are much simpler than on regular files,
132 * as there is no file data to flush, and thus also no need for explicit
133 * cache flush operations, and there are no non-transaction metadata updates
134 * on directories either.
135 */
136 STATIC int
137 xfs_dir_fsync(
138 struct file *file,
139 loff_t start,
140 loff_t end,
141 int datasync)
142 {
143 struct xfs_inode *ip = XFS_I(file->f_mapping->host);
144 struct xfs_mount *mp = ip->i_mount;
145 xfs_lsn_t lsn = 0;
146
147 trace_xfs_dir_fsync(ip);
148
149 xfs_ilock(ip, XFS_ILOCK_SHARED);
150 if (xfs_ipincount(ip))
151 lsn = ip->i_itemp->ili_last_lsn;
152 xfs_iunlock(ip, XFS_ILOCK_SHARED);
153
154 if (!lsn)
155 return 0;
156 return _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, NULL);
157 }
158
159 STATIC int
160 xfs_file_fsync(
161 struct file *file,
162 loff_t start,
163 loff_t end,
164 int datasync)
165 {
166 struct inode *inode = file->f_mapping->host;
167 struct xfs_inode *ip = XFS_I(inode);
168 struct xfs_mount *mp = ip->i_mount;
169 int error = 0;
170 int log_flushed = 0;
171 xfs_lsn_t lsn = 0;
172
173 trace_xfs_file_fsync(ip);
174
175 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
176 if (error)
177 return error;
178
179 if (XFS_FORCED_SHUTDOWN(mp))
180 return -EIO;
181
182 xfs_iflags_clear(ip, XFS_ITRUNCATED);
183
184 if (mp->m_flags & XFS_MOUNT_BARRIER) {
185 /*
186 * If we have an RT and/or log subvolume we need to make sure
187 * to flush the write cache the device used for file data
188 * first. This is to ensure newly written file data make
189 * it to disk before logging the new inode size in case of
190 * an extending write.
191 */
192 if (XFS_IS_REALTIME_INODE(ip))
193 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
194 else if (mp->m_logdev_targp != mp->m_ddev_targp)
195 xfs_blkdev_issue_flush(mp->m_ddev_targp);
196 }
197
198 /*
199 * All metadata updates are logged, which means that we just have
200 * to flush the log up to the latest LSN that touched the inode.
201 */
202 xfs_ilock(ip, XFS_ILOCK_SHARED);
203 if (xfs_ipincount(ip)) {
204 if (!datasync ||
205 (ip->i_itemp->ili_fields & ~XFS_ILOG_TIMESTAMP))
206 lsn = ip->i_itemp->ili_last_lsn;
207 }
208 xfs_iunlock(ip, XFS_ILOCK_SHARED);
209
210 if (lsn)
211 error = _xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed);
212
213 /*
214 * If we only have a single device, and the log force about was
215 * a no-op we might have to flush the data device cache here.
216 * This can only happen for fdatasync/O_DSYNC if we were overwriting
217 * an already allocated file and thus do not have any metadata to
218 * commit.
219 */
220 if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
221 mp->m_logdev_targp == mp->m_ddev_targp &&
222 !XFS_IS_REALTIME_INODE(ip) &&
223 !log_flushed)
224 xfs_blkdev_issue_flush(mp->m_ddev_targp);
225
226 return error;
227 }
228
229 STATIC ssize_t
230 xfs_file_read_iter(
231 struct kiocb *iocb,
232 struct iov_iter *to)
233 {
234 struct file *file = iocb->ki_filp;
235 struct inode *inode = file->f_mapping->host;
236 struct xfs_inode *ip = XFS_I(inode);
237 struct xfs_mount *mp = ip->i_mount;
238 size_t size = iov_iter_count(to);
239 ssize_t ret = 0;
240 int ioflags = 0;
241 xfs_fsize_t n;
242 loff_t pos = iocb->ki_pos;
243
244 XFS_STATS_INC(xs_read_calls);
245
246 if (unlikely(file->f_flags & O_DIRECT))
247 ioflags |= XFS_IO_ISDIRECT;
248 if (file->f_mode & FMODE_NOCMTIME)
249 ioflags |= XFS_IO_INVIS;
250
251 if (unlikely(ioflags & XFS_IO_ISDIRECT)) {
252 xfs_buftarg_t *target =
253 XFS_IS_REALTIME_INODE(ip) ?
254 mp->m_rtdev_targp : mp->m_ddev_targp;
255 /* DIO must be aligned to device logical sector size */
256 if ((pos | size) & target->bt_logical_sectormask) {
257 if (pos == i_size_read(inode))
258 return 0;
259 return -EINVAL;
260 }
261 }
262
263 n = mp->m_super->s_maxbytes - pos;
264 if (n <= 0 || size == 0)
265 return 0;
266
267 if (n < size)
268 size = n;
269
270 if (XFS_FORCED_SHUTDOWN(mp))
271 return -EIO;
272
273 /*
274 * Locking is a bit tricky here. If we take an exclusive lock
275 * for direct IO, we effectively serialise all new concurrent
276 * read IO to this file and block it behind IO that is currently in
277 * progress because IO in progress holds the IO lock shared. We only
278 * need to hold the lock exclusive to blow away the page cache, so
279 * only take lock exclusively if the page cache needs invalidation.
280 * This allows the normal direct IO case of no page cache pages to
281 * proceeed concurrently without serialisation.
282 */
283 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
284 if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {
285 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
286 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
287
288 if (inode->i_mapping->nrpages) {
289 ret = filemap_write_and_wait_range(
290 VFS_I(ip)->i_mapping,
291 pos, pos + size - 1);
292 if (ret) {
293 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
294 return ret;
295 }
296
297 /*
298 * Invalidate whole pages. This can return an error if
299 * we fail to invalidate a page, but this should never
300 * happen on XFS. Warn if it does fail.
301 */
302 ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
303 pos >> PAGE_CACHE_SHIFT,
304 (pos + size - 1) >> PAGE_CACHE_SHIFT);
305 WARN_ON_ONCE(ret);
306 ret = 0;
307 }
308 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
309 }
310
311 trace_xfs_file_read(ip, size, pos, ioflags);
312
313 ret = generic_file_read_iter(iocb, to);
314 if (ret > 0)
315 XFS_STATS_ADD(xs_read_bytes, ret);
316
317 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
318 return ret;
319 }
320
321 STATIC ssize_t
322 xfs_file_splice_read(
323 struct file *infilp,
324 loff_t *ppos,
325 struct pipe_inode_info *pipe,
326 size_t count,
327 unsigned int flags)
328 {
329 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
330 int ioflags = 0;
331 ssize_t ret;
332
333 XFS_STATS_INC(xs_read_calls);
334
335 if (infilp->f_mode & FMODE_NOCMTIME)
336 ioflags |= XFS_IO_INVIS;
337
338 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
339 return -EIO;
340
341 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
342
343 trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
344
345 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags);
346 if (ret > 0)
347 XFS_STATS_ADD(xs_read_bytes, ret);
348
349 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
350 return ret;
351 }
352
353 /*
354 * This routine is called to handle zeroing any space in the last block of the
355 * file that is beyond the EOF. We do this since the size is being increased
356 * without writing anything to that block and we don't want to read the
357 * garbage on the disk.
358 */
359 STATIC int /* error (positive) */
360 xfs_zero_last_block(
361 struct xfs_inode *ip,
362 xfs_fsize_t offset,
363 xfs_fsize_t isize)
364 {
365 struct xfs_mount *mp = ip->i_mount;
366 xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
367 int zero_offset = XFS_B_FSB_OFFSET(mp, isize);
368 int zero_len;
369 int nimaps = 1;
370 int error = 0;
371 struct xfs_bmbt_irec imap;
372
373 xfs_ilock(ip, XFS_ILOCK_EXCL);
374 error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
375 xfs_iunlock(ip, XFS_ILOCK_EXCL);
376 if (error)
377 return error;
378
379 ASSERT(nimaps > 0);
380
381 /*
382 * If the block underlying isize is just a hole, then there
383 * is nothing to zero.
384 */
385 if (imap.br_startblock == HOLESTARTBLOCK)
386 return 0;
387
388 zero_len = mp->m_sb.sb_blocksize - zero_offset;
389 if (isize + zero_len > offset)
390 zero_len = offset - isize;
391 return xfs_iozero(ip, isize, zero_len);
392 }
393
394 /*
395 * Zero any on disk space between the current EOF and the new, larger EOF.
396 *
397 * This handles the normal case of zeroing the remainder of the last block in
398 * the file and the unusual case of zeroing blocks out beyond the size of the
399 * file. This second case only happens with fixed size extents and when the
400 * system crashes before the inode size was updated but after blocks were
401 * allocated.
402 *
403 * Expects the iolock to be held exclusive, and will take the ilock internally.
404 */
405 int /* error (positive) */
406 xfs_zero_eof(
407 struct xfs_inode *ip,
408 xfs_off_t offset, /* starting I/O offset */
409 xfs_fsize_t isize) /* current inode size */
410 {
411 struct xfs_mount *mp = ip->i_mount;
412 xfs_fileoff_t start_zero_fsb;
413 xfs_fileoff_t end_zero_fsb;
414 xfs_fileoff_t zero_count_fsb;
415 xfs_fileoff_t last_fsb;
416 xfs_fileoff_t zero_off;
417 xfs_fsize_t zero_len;
418 int nimaps;
419 int error = 0;
420 struct xfs_bmbt_irec imap;
421
422 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
423 ASSERT(offset > isize);
424
425 /*
426 * First handle zeroing the block on which isize resides.
427 *
428 * We only zero a part of that block so it is handled specially.
429 */
430 if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
431 error = xfs_zero_last_block(ip, offset, isize);
432 if (error)
433 return error;
434 }
435
436 /*
437 * Calculate the range between the new size and the old where blocks
438 * needing to be zeroed may exist.
439 *
440 * To get the block where the last byte in the file currently resides,
441 * we need to subtract one from the size and truncate back to a block
442 * boundary. We subtract 1 in case the size is exactly on a block
443 * boundary.
444 */
445 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
446 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
447 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
448 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
449 if (last_fsb == end_zero_fsb) {
450 /*
451 * The size was only incremented on its last block.
452 * We took care of that above, so just return.
453 */
454 return 0;
455 }
456
457 ASSERT(start_zero_fsb <= end_zero_fsb);
458 while (start_zero_fsb <= end_zero_fsb) {
459 nimaps = 1;
460 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
461
462 xfs_ilock(ip, XFS_ILOCK_EXCL);
463 error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
464 &imap, &nimaps, 0);
465 xfs_iunlock(ip, XFS_ILOCK_EXCL);
466 if (error)
467 return error;
468
469 ASSERT(nimaps > 0);
470
471 if (imap.br_state == XFS_EXT_UNWRITTEN ||
472 imap.br_startblock == HOLESTARTBLOCK) {
473 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
474 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
475 continue;
476 }
477
478 /*
479 * There are blocks we need to zero.
480 */
481 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
482 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
483
484 if ((zero_off + zero_len) > offset)
485 zero_len = offset - zero_off;
486
487 error = xfs_iozero(ip, zero_off, zero_len);
488 if (error)
489 return error;
490
491 start_zero_fsb = imap.br_startoff + imap.br_blockcount;
492 ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
493 }
494
495 return 0;
496 }
497
498 /*
499 * Common pre-write limit and setup checks.
500 *
501 * Called with the iolocked held either shared and exclusive according to
502 * @iolock, and returns with it held. Might upgrade the iolock to exclusive
503 * if called for a direct write beyond i_size.
504 */
505 STATIC ssize_t
506 xfs_file_aio_write_checks(
507 struct file *file,
508 loff_t *pos,
509 size_t *count,
510 int *iolock)
511 {
512 struct inode *inode = file->f_mapping->host;
513 struct xfs_inode *ip = XFS_I(inode);
514 int error = 0;
515
516 restart:
517 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
518 if (error)
519 return error;
520
521 /*
522 * If the offset is beyond the size of the file, we need to zero any
523 * blocks that fall between the existing EOF and the start of this
524 * write. If zeroing is needed and we are currently holding the
525 * iolock shared, we need to update it to exclusive which implies
526 * having to redo all checks before.
527 */
528 if (*pos > i_size_read(inode)) {
529 if (*iolock == XFS_IOLOCK_SHARED) {
530 xfs_rw_iunlock(ip, *iolock);
531 *iolock = XFS_IOLOCK_EXCL;
532 xfs_rw_ilock(ip, *iolock);
533 goto restart;
534 }
535 error = xfs_zero_eof(ip, *pos, i_size_read(inode));
536 if (error)
537 return error;
538 }
539
540 /*
541 * Updating the timestamps will grab the ilock again from
542 * xfs_fs_dirty_inode, so we have to call it after dropping the
543 * lock above. Eventually we should look into a way to avoid
544 * the pointless lock roundtrip.
545 */
546 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
547 error = file_update_time(file);
548 if (error)
549 return error;
550 }
551
552 /*
553 * If we're writing the file then make sure to clear the setuid and
554 * setgid bits if the process is not being run by root. This keeps
555 * people from modifying setuid and setgid binaries.
556 */
557 return file_remove_suid(file);
558 }
559
560 /*
561 * xfs_file_dio_aio_write - handle direct IO writes
562 *
563 * Lock the inode appropriately to prepare for and issue a direct IO write.
564 * By separating it from the buffered write path we remove all the tricky to
565 * follow locking changes and looping.
566 *
567 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL
568 * until we're sure the bytes at the new EOF have been zeroed and/or the cached
569 * pages are flushed out.
570 *
571 * In most cases the direct IO writes will be done holding IOLOCK_SHARED
572 * allowing them to be done in parallel with reads and other direct IO writes.
573 * However, if the IO is not aligned to filesystem blocks, the direct IO layer
574 * needs to do sub-block zeroing and that requires serialisation against other
575 * direct IOs to the same block. In this case we need to serialise the
576 * submission of the unaligned IOs so that we don't get racing block zeroing in
577 * the dio layer. To avoid the problem with aio, we also need to wait for
578 * outstanding IOs to complete so that unwritten extent conversion is completed
579 * before we try to map the overlapping block. This is currently implemented by
580 * hitting it with a big hammer (i.e. inode_dio_wait()).
581 *
582 * Returns with locks held indicated by @iolock and errors indicated by
583 * negative return values.
584 */
585 STATIC ssize_t
586 xfs_file_dio_aio_write(
587 struct kiocb *iocb,
588 struct iov_iter *from)
589 {
590 struct file *file = iocb->ki_filp;
591 struct address_space *mapping = file->f_mapping;
592 struct inode *inode = mapping->host;
593 struct xfs_inode *ip = XFS_I(inode);
594 struct xfs_mount *mp = ip->i_mount;
595 ssize_t ret = 0;
596 int unaligned_io = 0;
597 int iolock;
598 size_t count = iov_iter_count(from);
599 loff_t pos = iocb->ki_pos;
600 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ?
601 mp->m_rtdev_targp : mp->m_ddev_targp;
602
603 /* DIO must be aligned to device logical sector size */
604 if ((pos | count) & target->bt_logical_sectormask)
605 return -EINVAL;
606
607 /* "unaligned" here means not aligned to a filesystem block */
608 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask))
609 unaligned_io = 1;
610
611 /*
612 * We don't need to take an exclusive lock unless there page cache needs
613 * to be invalidated or unaligned IO is being executed. We don't need to
614 * consider the EOF extension case here because
615 * xfs_file_aio_write_checks() will relock the inode as necessary for
616 * EOF zeroing cases and fill out the new inode size as appropriate.
617 */
618 if (unaligned_io || mapping->nrpages)
619 iolock = XFS_IOLOCK_EXCL;
620 else
621 iolock = XFS_IOLOCK_SHARED;
622 xfs_rw_ilock(ip, iolock);
623
624 /*
625 * Recheck if there are cached pages that need invalidate after we got
626 * the iolock to protect against other threads adding new pages while
627 * we were waiting for the iolock.
628 */
629 if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
630 xfs_rw_iunlock(ip, iolock);
631 iolock = XFS_IOLOCK_EXCL;
632 xfs_rw_ilock(ip, iolock);
633 }
634
635 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
636 if (ret)
637 goto out;
638 iov_iter_truncate(from, count);
639
640 if (mapping->nrpages) {
641 ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
642 pos, pos + count - 1);
643 if (ret)
644 goto out;
645 /*
646 * Invalidate whole pages. This can return an error if
647 * we fail to invalidate a page, but this should never
648 * happen on XFS. Warn if it does fail.
649 */
650 ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
651 pos >> PAGE_CACHE_SHIFT,
652 (pos + count - 1) >> PAGE_CACHE_SHIFT);
653 WARN_ON_ONCE(ret);
654 ret = 0;
655 }
656
657 /*
658 * If we are doing unaligned IO, wait for all other IO to drain,
659 * otherwise demote the lock if we had to flush cached pages
660 */
661 if (unaligned_io)
662 inode_dio_wait(inode);
663 else if (iolock == XFS_IOLOCK_EXCL) {
664 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
665 iolock = XFS_IOLOCK_SHARED;
666 }
667
668 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
669 ret = generic_file_direct_write(iocb, from, pos);
670
671 out:
672 xfs_rw_iunlock(ip, iolock);
673
674 /* No fallback to buffered IO on errors for XFS. */
675 ASSERT(ret < 0 || ret == count);
676 return ret;
677 }
678
679 STATIC ssize_t
680 xfs_file_buffered_aio_write(
681 struct kiocb *iocb,
682 struct iov_iter *from)
683 {
684 struct file *file = iocb->ki_filp;
685 struct address_space *mapping = file->f_mapping;
686 struct inode *inode = mapping->host;
687 struct xfs_inode *ip = XFS_I(inode);
688 ssize_t ret;
689 int enospc = 0;
690 int iolock = XFS_IOLOCK_EXCL;
691 loff_t pos = iocb->ki_pos;
692 size_t count = iov_iter_count(from);
693
694 xfs_rw_ilock(ip, iolock);
695
696 ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
697 if (ret)
698 goto out;
699
700 iov_iter_truncate(from, count);
701 /* We can write back this queue in page reclaim */
702 current->backing_dev_info = mapping->backing_dev_info;
703
704 write_retry:
705 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0);
706 ret = generic_perform_write(file, from, pos);
707 if (likely(ret >= 0))
708 iocb->ki_pos = pos + ret;
709
710 /*
711 * If we hit a space limit, try to free up some lingering preallocated
712 * space before returning an error. In the case of ENOSPC, first try to
713 * write back all dirty inodes to free up some of the excess reserved
714 * metadata space. This reduces the chances that the eofblocks scan
715 * waits on dirty mappings. Since xfs_flush_inodes() is serialized, this
716 * also behaves as a filter to prevent too many eofblocks scans from
717 * running at the same time.
718 */
719 if (ret == -EDQUOT && !enospc) {
720 enospc = xfs_inode_free_quota_eofblocks(ip);
721 if (enospc)
722 goto write_retry;
723 } else if (ret == -ENOSPC && !enospc) {
724 struct xfs_eofblocks eofb = {0};
725
726 enospc = 1;
727 xfs_flush_inodes(ip->i_mount);
728 eofb.eof_scan_owner = ip->i_ino; /* for locking */
729 eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
730 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
731 goto write_retry;
732 }
733
734 current->backing_dev_info = NULL;
735 out:
736 xfs_rw_iunlock(ip, iolock);
737 return ret;
738 }
739
740 STATIC ssize_t
741 xfs_file_write_iter(
742 struct kiocb *iocb,
743 struct iov_iter *from)
744 {
745 struct file *file = iocb->ki_filp;
746 struct address_space *mapping = file->f_mapping;
747 struct inode *inode = mapping->host;
748 struct xfs_inode *ip = XFS_I(inode);
749 ssize_t ret;
750 size_t ocount = iov_iter_count(from);
751
752 XFS_STATS_INC(xs_write_calls);
753
754 if (ocount == 0)
755 return 0;
756
757 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
758 return -EIO;
759
760 if (unlikely(file->f_flags & O_DIRECT))
761 ret = xfs_file_dio_aio_write(iocb, from);
762 else
763 ret = xfs_file_buffered_aio_write(iocb, from);
764
765 if (ret > 0) {
766 ssize_t err;
767
768 XFS_STATS_ADD(xs_write_bytes, ret);
769
770 /* Handle various SYNC-type writes */
771 err = generic_write_sync(file, iocb->ki_pos - ret, ret);
772 if (err < 0)
773 ret = err;
774 }
775 return ret;
776 }
777
778 STATIC long
779 xfs_file_fallocate(
780 struct file *file,
781 int mode,
782 loff_t offset,
783 loff_t len)
784 {
785 struct inode *inode = file_inode(file);
786 struct xfs_inode *ip = XFS_I(inode);
787 struct xfs_trans *tp;
788 long error;
789 loff_t new_size = 0;
790
791 if (!S_ISREG(inode->i_mode))
792 return -EINVAL;
793 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
794 FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
795 return -EOPNOTSUPP;
796
797 xfs_ilock(ip, XFS_IOLOCK_EXCL);
798 if (mode & FALLOC_FL_PUNCH_HOLE) {
799 error = xfs_free_file_space(ip, offset, len);
800 if (error)
801 goto out_unlock;
802 } else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
803 unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
804
805 if (offset & blksize_mask || len & blksize_mask) {
806 error = -EINVAL;
807 goto out_unlock;
808 }
809
810 /*
811 * There is no need to overlap collapse range with EOF,
812 * in which case it is effectively a truncate operation
813 */
814 if (offset + len >= i_size_read(inode)) {
815 error = -EINVAL;
816 goto out_unlock;
817 }
818
819 new_size = i_size_read(inode) - len;
820
821 error = xfs_collapse_file_space(ip, offset, len);
822 if (error)
823 goto out_unlock;
824 } else {
825 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
826 offset + len > i_size_read(inode)) {
827 new_size = offset + len;
828 error = inode_newsize_ok(inode, new_size);
829 if (error)
830 goto out_unlock;
831 }
832
833 if (mode & FALLOC_FL_ZERO_RANGE)
834 error = xfs_zero_file_space(ip, offset, len);
835 else
836 error = xfs_alloc_file_space(ip, offset, len,
837 XFS_BMAPI_PREALLOC);
838 if (error)
839 goto out_unlock;
840 }
841
842 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_WRITEID);
843 error = xfs_trans_reserve(tp, &M_RES(ip->i_mount)->tr_writeid, 0, 0);
844 if (error) {
845 xfs_trans_cancel(tp, 0);
846 goto out_unlock;
847 }
848
849 xfs_ilock(ip, XFS_ILOCK_EXCL);
850 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
851 ip->i_d.di_mode &= ~S_ISUID;
852 if (ip->i_d.di_mode & S_IXGRP)
853 ip->i_d.di_mode &= ~S_ISGID;
854
855 if (!(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE)))
856 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
857
858 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
859 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
860
861 if (file->f_flags & O_DSYNC)
862 xfs_trans_set_sync(tp);
863 error = xfs_trans_commit(tp, 0);
864 if (error)
865 goto out_unlock;
866
867 /* Change file size if needed */
868 if (new_size) {
869 struct iattr iattr;
870
871 iattr.ia_valid = ATTR_SIZE;
872 iattr.ia_size = new_size;
873 error = xfs_setattr_size(ip, &iattr);
874 }
875
876 out_unlock:
877 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
878 return error;
879 }
880
881
882 STATIC int
883 xfs_file_open(
884 struct inode *inode,
885 struct file *file)
886 {
887 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
888 return -EFBIG;
889 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb)))
890 return -EIO;
891 return 0;
892 }
893
894 STATIC int
895 xfs_dir_open(
896 struct inode *inode,
897 struct file *file)
898 {
899 struct xfs_inode *ip = XFS_I(inode);
900 int mode;
901 int error;
902
903 error = xfs_file_open(inode, file);
904 if (error)
905 return error;
906
907 /*
908 * If there are any blocks, read-ahead block 0 as we're almost
909 * certain to have the next operation be a read there.
910 */
911 mode = xfs_ilock_data_map_shared(ip);
912 if (ip->i_d.di_nextents > 0)
913 xfs_dir3_data_readahead(ip, 0, -1);
914 xfs_iunlock(ip, mode);
915 return 0;
916 }
917
918 STATIC int
919 xfs_file_release(
920 struct inode *inode,
921 struct file *filp)
922 {
923 return xfs_release(XFS_I(inode));
924 }
925
926 STATIC int
927 xfs_file_readdir(
928 struct file *file,
929 struct dir_context *ctx)
930 {
931 struct inode *inode = file_inode(file);
932 xfs_inode_t *ip = XFS_I(inode);
933 size_t bufsize;
934
935 /*
936 * The Linux API doesn't pass down the total size of the buffer
937 * we read into down to the filesystem. With the filldir concept
938 * it's not needed for correct information, but the XFS dir2 leaf
939 * code wants an estimate of the buffer size to calculate it's
940 * readahead window and size the buffers used for mapping to
941 * physical blocks.
942 *
943 * Try to give it an estimate that's good enough, maybe at some
944 * point we can change the ->readdir prototype to include the
945 * buffer size. For now we use the current glibc buffer size.
946 */
947 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
948
949 return xfs_readdir(ip, ctx, bufsize);
950 }
951
952 STATIC int
953 xfs_file_mmap(
954 struct file *filp,
955 struct vm_area_struct *vma)
956 {
957 vma->vm_ops = &xfs_file_vm_ops;
958
959 file_accessed(filp);
960 return 0;
961 }
962
963 /*
964 * mmap()d file has taken write protection fault and is being made
965 * writable. We can set the page state up correctly for a writable
966 * page, which means we can do correct delalloc accounting (ENOSPC
967 * checking!) and unwritten extent mapping.
968 */
969 STATIC int
970 xfs_vm_page_mkwrite(
971 struct vm_area_struct *vma,
972 struct vm_fault *vmf)
973 {
974 return block_page_mkwrite(vma, vmf, xfs_get_blocks);
975 }
976
977 /*
978 * This type is designed to indicate the type of offset we would like
979 * to search from page cache for xfs_seek_hole_data().
980 */
981 enum {
982 HOLE_OFF = 0,
983 DATA_OFF,
984 };
985
986 /*
987 * Lookup the desired type of offset from the given page.
988 *
989 * On success, return true and the offset argument will point to the
990 * start of the region that was found. Otherwise this function will
991 * return false and keep the offset argument unchanged.
992 */
993 STATIC bool
994 xfs_lookup_buffer_offset(
995 struct page *page,
996 loff_t *offset,
997 unsigned int type)
998 {
999 loff_t lastoff = page_offset(page);
1000 bool found = false;
1001 struct buffer_head *bh, *head;
1002
1003 bh = head = page_buffers(page);
1004 do {
1005 /*
1006 * Unwritten extents that have data in the page
1007 * cache covering them can be identified by the
1008 * BH_Unwritten state flag. Pages with multiple
1009 * buffers might have a mix of holes, data and
1010 * unwritten extents - any buffer with valid
1011 * data in it should have BH_Uptodate flag set
1012 * on it.
1013 */
1014 if (buffer_unwritten(bh) ||
1015 buffer_uptodate(bh)) {
1016 if (type == DATA_OFF)
1017 found = true;
1018 } else {
1019 if (type == HOLE_OFF)
1020 found = true;
1021 }
1022
1023 if (found) {
1024 *offset = lastoff;
1025 break;
1026 }
1027 lastoff += bh->b_size;
1028 } while ((bh = bh->b_this_page) != head);
1029
1030 return found;
1031 }
1032
1033 /*
1034 * This routine is called to find out and return a data or hole offset
1035 * from the page cache for unwritten extents according to the desired
1036 * type for xfs_seek_hole_data().
1037 *
1038 * The argument offset is used to tell where we start to search from the
1039 * page cache. Map is used to figure out the end points of the range to
1040 * lookup pages.
1041 *
1042 * Return true if the desired type of offset was found, and the argument
1043 * offset is filled with that address. Otherwise, return false and keep
1044 * offset unchanged.
1045 */
1046 STATIC bool
1047 xfs_find_get_desired_pgoff(
1048 struct inode *inode,
1049 struct xfs_bmbt_irec *map,
1050 unsigned int type,
1051 loff_t *offset)
1052 {
1053 struct xfs_inode *ip = XFS_I(inode);
1054 struct xfs_mount *mp = ip->i_mount;
1055 struct pagevec pvec;
1056 pgoff_t index;
1057 pgoff_t end;
1058 loff_t endoff;
1059 loff_t startoff = *offset;
1060 loff_t lastoff = startoff;
1061 bool found = false;
1062
1063 pagevec_init(&pvec, 0);
1064
1065 index = startoff >> PAGE_CACHE_SHIFT;
1066 endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount);
1067 end = endoff >> PAGE_CACHE_SHIFT;
1068 do {
1069 int want;
1070 unsigned nr_pages;
1071 unsigned int i;
1072
1073 want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
1074 nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
1075 want);
1076 /*
1077 * No page mapped into given range. If we are searching holes
1078 * and if this is the first time we got into the loop, it means
1079 * that the given offset is landed in a hole, return it.
1080 *
1081 * If we have already stepped through some block buffers to find
1082 * holes but they all contains data. In this case, the last
1083 * offset is already updated and pointed to the end of the last
1084 * mapped page, if it does not reach the endpoint to search,
1085 * that means there should be a hole between them.
1086 */
1087 if (nr_pages == 0) {
1088 /* Data search found nothing */
1089 if (type == DATA_OFF)
1090 break;
1091
1092 ASSERT(type == HOLE_OFF);
1093 if (lastoff == startoff || lastoff < endoff) {
1094 found = true;
1095 *offset = lastoff;
1096 }
1097 break;
1098 }
1099
1100 /*
1101 * At lease we found one page. If this is the first time we
1102 * step into the loop, and if the first page index offset is
1103 * greater than the given search offset, a hole was found.
1104 */
1105 if (type == HOLE_OFF && lastoff == startoff &&
1106 lastoff < page_offset(pvec.pages[0])) {
1107 found = true;
1108 break;
1109 }
1110
1111 for (i = 0; i < nr_pages; i++) {
1112 struct page *page = pvec.pages[i];
1113 loff_t b_offset;
1114
1115 /*
1116 * At this point, the page may be truncated or
1117 * invalidated (changing page->mapping to NULL),
1118 * or even swizzled back from swapper_space to tmpfs
1119 * file mapping. However, page->index will not change
1120 * because we have a reference on the page.
1121 *
1122 * Searching done if the page index is out of range.
1123 * If the current offset is not reaches the end of
1124 * the specified search range, there should be a hole
1125 * between them.
1126 */
1127 if (page->index > end) {
1128 if (type == HOLE_OFF && lastoff < endoff) {
1129 *offset = lastoff;
1130 found = true;
1131 }
1132 goto out;
1133 }
1134
1135 lock_page(page);
1136 /*
1137 * Page truncated or invalidated(page->mapping == NULL).
1138 * We can freely skip it and proceed to check the next
1139 * page.
1140 */
1141 if (unlikely(page->mapping != inode->i_mapping)) {
1142 unlock_page(page);
1143 continue;
1144 }
1145
1146 if (!page_has_buffers(page)) {
1147 unlock_page(page);
1148 continue;
1149 }
1150
1151 found = xfs_lookup_buffer_offset(page, &b_offset, type);
1152 if (found) {
1153 /*
1154 * The found offset may be less than the start
1155 * point to search if this is the first time to
1156 * come here.
1157 */
1158 *offset = max_t(loff_t, startoff, b_offset);
1159 unlock_page(page);
1160 goto out;
1161 }
1162
1163 /*
1164 * We either searching data but nothing was found, or
1165 * searching hole but found a data buffer. In either
1166 * case, probably the next page contains the desired
1167 * things, update the last offset to it so.
1168 */
1169 lastoff = page_offset(page) + PAGE_SIZE;
1170 unlock_page(page);
1171 }
1172
1173 /*
1174 * The number of returned pages less than our desired, search
1175 * done. In this case, nothing was found for searching data,
1176 * but we found a hole behind the last offset.
1177 */
1178 if (nr_pages < want) {
1179 if (type == HOLE_OFF) {
1180 *offset = lastoff;
1181 found = true;
1182 }
1183 break;
1184 }
1185
1186 index = pvec.pages[i - 1]->index + 1;
1187 pagevec_release(&pvec);
1188 } while (index <= end);
1189
1190 out:
1191 pagevec_release(&pvec);
1192 return found;
1193 }
1194
1195 STATIC loff_t
1196 xfs_seek_hole_data(
1197 struct file *file,
1198 loff_t start,
1199 int whence)
1200 {
1201 struct inode *inode = file->f_mapping->host;
1202 struct xfs_inode *ip = XFS_I(inode);
1203 struct xfs_mount *mp = ip->i_mount;
1204 loff_t uninitialized_var(offset);
1205 xfs_fsize_t isize;
1206 xfs_fileoff_t fsbno;
1207 xfs_filblks_t end;
1208 uint lock;
1209 int error;
1210
1211 if (XFS_FORCED_SHUTDOWN(mp))
1212 return -EIO;
1213
1214 lock = xfs_ilock_data_map_shared(ip);
1215
1216 isize = i_size_read(inode);
1217 if (start >= isize) {
1218 error = -ENXIO;
1219 goto out_unlock;
1220 }
1221
1222 /*
1223 * Try to read extents from the first block indicated
1224 * by fsbno to the end block of the file.
1225 */
1226 fsbno = XFS_B_TO_FSBT(mp, start);
1227 end = XFS_B_TO_FSB(mp, isize);
1228
1229 for (;;) {
1230 struct xfs_bmbt_irec map[2];
1231 int nmap = 2;
1232 unsigned int i;
1233
1234 error = xfs_bmapi_read(ip, fsbno, end - fsbno, map, &nmap,
1235 XFS_BMAPI_ENTIRE);
1236 if (error)
1237 goto out_unlock;
1238
1239 /* No extents at given offset, must be beyond EOF */
1240 if (nmap == 0) {
1241 error = -ENXIO;
1242 goto out_unlock;
1243 }
1244
1245 for (i = 0; i < nmap; i++) {
1246 offset = max_t(loff_t, start,
1247 XFS_FSB_TO_B(mp, map[i].br_startoff));
1248
1249 /* Landed in the hole we wanted? */
1250 if (whence == SEEK_HOLE &&
1251 map[i].br_startblock == HOLESTARTBLOCK)
1252 goto out;
1253
1254 /* Landed in the data extent we wanted? */
1255 if (whence == SEEK_DATA &&
1256 (map[i].br_startblock == DELAYSTARTBLOCK ||
1257 (map[i].br_state == XFS_EXT_NORM &&
1258 !isnullstartblock(map[i].br_startblock))))
1259 goto out;
1260
1261 /*
1262 * Landed in an unwritten extent, try to search
1263 * for hole or data from page cache.
1264 */
1265 if (map[i].br_state == XFS_EXT_UNWRITTEN) {
1266 if (xfs_find_get_desired_pgoff(inode, &map[i],
1267 whence == SEEK_HOLE ? HOLE_OFF : DATA_OFF,
1268 &offset))
1269 goto out;
1270 }
1271 }
1272
1273 /*
1274 * We only received one extent out of the two requested. This
1275 * means we've hit EOF and didn't find what we are looking for.
1276 */
1277 if (nmap == 1) {
1278 /*
1279 * If we were looking for a hole, set offset to
1280 * the end of the file (i.e., there is an implicit
1281 * hole at the end of any file).
1282 */
1283 if (whence == SEEK_HOLE) {
1284 offset = isize;
1285 break;
1286 }
1287 /*
1288 * If we were looking for data, it's nowhere to be found
1289 */
1290 ASSERT(whence == SEEK_DATA);
1291 error = -ENXIO;
1292 goto out_unlock;
1293 }
1294
1295 ASSERT(i > 1);
1296
1297 /*
1298 * Nothing was found, proceed to the next round of search
1299 * if the next reading offset is not at or beyond EOF.
1300 */
1301 fsbno = map[i - 1].br_startoff + map[i - 1].br_blockcount;
1302 start = XFS_FSB_TO_B(mp, fsbno);
1303 if (start >= isize) {
1304 if (whence == SEEK_HOLE) {
1305 offset = isize;
1306 break;
1307 }
1308 ASSERT(whence == SEEK_DATA);
1309 error = -ENXIO;
1310 goto out_unlock;
1311 }
1312 }
1313
1314 out:
1315 /*
1316 * If at this point we have found the hole we wanted, the returned
1317 * offset may be bigger than the file size as it may be aligned to
1318 * page boundary for unwritten extents. We need to deal with this
1319 * situation in particular.
1320 */
1321 if (whence == SEEK_HOLE)
1322 offset = min_t(loff_t, offset, isize);
1323 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1324
1325 out_unlock:
1326 xfs_iunlock(ip, lock);
1327
1328 if (error)
1329 return error;
1330 return offset;
1331 }
1332
1333 STATIC loff_t
1334 xfs_file_llseek(
1335 struct file *file,
1336 loff_t offset,
1337 int whence)
1338 {
1339 switch (whence) {
1340 case SEEK_END:
1341 case SEEK_CUR:
1342 case SEEK_SET:
1343 return generic_file_llseek(file, offset, whence);
1344 case SEEK_HOLE:
1345 case SEEK_DATA:
1346 return xfs_seek_hole_data(file, offset, whence);
1347 default:
1348 return -EINVAL;
1349 }
1350 }
1351
1352 const struct file_operations xfs_file_operations = {
1353 .llseek = xfs_file_llseek,
1354 .read = new_sync_read,
1355 .write = new_sync_write,
1356 .read_iter = xfs_file_read_iter,
1357 .write_iter = xfs_file_write_iter,
1358 .splice_read = xfs_file_splice_read,
1359 .splice_write = iter_file_splice_write,
1360 .unlocked_ioctl = xfs_file_ioctl,
1361 #ifdef CONFIG_COMPAT
1362 .compat_ioctl = xfs_file_compat_ioctl,
1363 #endif
1364 .mmap = xfs_file_mmap,
1365 .open = xfs_file_open,
1366 .release = xfs_file_release,
1367 .fsync = xfs_file_fsync,
1368 .fallocate = xfs_file_fallocate,
1369 };
1370
1371 const struct file_operations xfs_dir_file_operations = {
1372 .open = xfs_dir_open,
1373 .read = generic_read_dir,
1374 .iterate = xfs_file_readdir,
1375 .llseek = generic_file_llseek,
1376 .unlocked_ioctl = xfs_file_ioctl,
1377 #ifdef CONFIG_COMPAT
1378 .compat_ioctl = xfs_file_compat_ioctl,
1379 #endif
1380 .fsync = xfs_dir_fsync,
1381 };
1382
1383 static const struct vm_operations_struct xfs_file_vm_ops = {
1384 .fault = filemap_fault,
1385 .map_pages = filemap_map_pages,
1386 .page_mkwrite = xfs_vm_page_mkwrite,
1387 };