]> git.proxmox.com Git - mirror_zfs.git/blob - module/os/linux/zfs/zpl_file.c
Add Linux posix_fadvise support
[mirror_zfs.git] / module / os / linux / zfs / zpl_file.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
23 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
24 */
25
26
27 #ifdef CONFIG_COMPAT
28 #include <linux/compat.h>
29 #endif
30 #include <linux/fs.h>
31 #include <sys/file.h>
32 #include <sys/dmu_objset.h>
33 #include <sys/zfs_znode.h>
34 #include <sys/zfs_vfsops.h>
35 #include <sys/zfs_vnops.h>
36 #include <sys/zfs_project.h>
37 #if defined(HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS) || \
38 defined(HAVE_VFS_FILEMAP_DIRTY_FOLIO)
39 #include <linux/pagemap.h>
40 #endif
41 #ifdef HAVE_FILE_FADVISE
42 #include <linux/fadvise.h>
43 #endif
44 #ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
45 #include <linux/writeback.h>
46 #endif
47
48 /*
49 * When using fallocate(2) to preallocate space, inflate the requested
50 * capacity check by 10% to account for the required metadata blocks.
51 */
52 static unsigned int zfs_fallocate_reserve_percent = 110;
53
54 static int
55 zpl_open(struct inode *ip, struct file *filp)
56 {
57 cred_t *cr = CRED();
58 int error;
59 fstrans_cookie_t cookie;
60
61 error = generic_file_open(ip, filp);
62 if (error)
63 return (error);
64
65 crhold(cr);
66 cookie = spl_fstrans_mark();
67 error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
68 spl_fstrans_unmark(cookie);
69 crfree(cr);
70 ASSERT3S(error, <=, 0);
71
72 return (error);
73 }
74
75 static int
76 zpl_release(struct inode *ip, struct file *filp)
77 {
78 cred_t *cr = CRED();
79 int error;
80 fstrans_cookie_t cookie;
81
82 cookie = spl_fstrans_mark();
83 if (ITOZ(ip)->z_atime_dirty)
84 zfs_mark_inode_dirty(ip);
85
86 crhold(cr);
87 error = -zfs_close(ip, filp->f_flags, cr);
88 spl_fstrans_unmark(cookie);
89 crfree(cr);
90 ASSERT3S(error, <=, 0);
91
92 return (error);
93 }
94
95 static int
96 zpl_iterate(struct file *filp, zpl_dir_context_t *ctx)
97 {
98 cred_t *cr = CRED();
99 int error;
100 fstrans_cookie_t cookie;
101
102 crhold(cr);
103 cookie = spl_fstrans_mark();
104 error = -zfs_readdir(file_inode(filp), ctx, cr);
105 spl_fstrans_unmark(cookie);
106 crfree(cr);
107 ASSERT3S(error, <=, 0);
108
109 return (error);
110 }
111
112 #if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
113 static int
114 zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
115 {
116 zpl_dir_context_t ctx =
117 ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
118 int error;
119
120 error = zpl_iterate(filp, &ctx);
121 filp->f_pos = ctx.pos;
122
123 return (error);
124 }
125 #endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
126
127 #if defined(HAVE_FSYNC_WITHOUT_DENTRY)
128 /*
129 * Linux 2.6.35 - 3.0 API,
130 * As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
131 * redundant. The dentry is still accessible via filp->f_path.dentry,
132 * and we are guaranteed that filp will never be NULL.
133 */
134 static int
135 zpl_fsync(struct file *filp, int datasync)
136 {
137 struct inode *inode = filp->f_mapping->host;
138 cred_t *cr = CRED();
139 int error;
140 fstrans_cookie_t cookie;
141
142 crhold(cr);
143 cookie = spl_fstrans_mark();
144 error = -zfs_fsync(ITOZ(inode), datasync, cr);
145 spl_fstrans_unmark(cookie);
146 crfree(cr);
147 ASSERT3S(error, <=, 0);
148
149 return (error);
150 }
151
152 #ifdef HAVE_FILE_AIO_FSYNC
153 static int
154 zpl_aio_fsync(struct kiocb *kiocb, int datasync)
155 {
156 return (zpl_fsync(kiocb->ki_filp, datasync));
157 }
158 #endif
159
160 #elif defined(HAVE_FSYNC_RANGE)
161 /*
162 * Linux 3.1 API,
163 * As of 3.1 the responsibility to call filemap_write_and_wait_range() has
164 * been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
165 * lock is no longer held by the caller, for zfs we don't require the lock
166 * to be held so we don't acquire it.
167 */
168 static int
169 zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
170 {
171 struct inode *inode = filp->f_mapping->host;
172 znode_t *zp = ITOZ(inode);
173 zfsvfs_t *zfsvfs = ITOZSB(inode);
174 cred_t *cr = CRED();
175 int error;
176 fstrans_cookie_t cookie;
177
178 /*
179 * The variables z_sync_writes_cnt and z_async_writes_cnt work in
180 * tandem so that sync writes can detect if there are any non-sync
181 * writes going on and vice-versa. The "vice-versa" part to this logic
182 * is located in zfs_putpage() where non-sync writes check if there are
183 * any ongoing sync writes. If any sync and non-sync writes overlap,
184 * we do a commit to complete the non-sync writes since the latter can
185 * potentially take several seconds to complete and thus block sync
186 * writes in the upcoming call to filemap_write_and_wait_range().
187 */
188 atomic_inc_32(&zp->z_sync_writes_cnt);
189 /*
190 * If the following check does not detect an overlapping non-sync write
191 * (say because it's just about to start), then it is guaranteed that
192 * the non-sync write will detect this sync write. This is because we
193 * always increment z_sync_writes_cnt / z_async_writes_cnt before doing
194 * the check on z_async_writes_cnt / z_sync_writes_cnt here and in
195 * zfs_putpage() respectively.
196 */
197 if (atomic_load_32(&zp->z_async_writes_cnt) > 0) {
198 ZPL_ENTER(zfsvfs);
199 zil_commit(zfsvfs->z_log, zp->z_id);
200 ZPL_EXIT(zfsvfs);
201 }
202
203 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
204
205 /*
206 * The sync write is not complete yet but we decrement
207 * z_sync_writes_cnt since zfs_fsync() increments and decrements
208 * it internally. If a non-sync write starts just after the decrement
209 * operation but before we call zfs_fsync(), it may not detect this
210 * overlapping sync write but it does not matter since we have already
211 * gone past filemap_write_and_wait_range() and we won't block due to
212 * the non-sync write.
213 */
214 atomic_dec_32(&zp->z_sync_writes_cnt);
215
216 if (error)
217 return (error);
218
219 crhold(cr);
220 cookie = spl_fstrans_mark();
221 error = -zfs_fsync(zp, datasync, cr);
222 spl_fstrans_unmark(cookie);
223 crfree(cr);
224 ASSERT3S(error, <=, 0);
225
226 return (error);
227 }
228
229 #ifdef HAVE_FILE_AIO_FSYNC
230 static int
231 zpl_aio_fsync(struct kiocb *kiocb, int datasync)
232 {
233 return (zpl_fsync(kiocb->ki_filp, kiocb->ki_pos, -1, datasync));
234 }
235 #endif
236
237 #else
238 #error "Unsupported fops->fsync() implementation"
239 #endif
240
241 static inline int
242 zfs_io_flags(struct kiocb *kiocb)
243 {
244 int flags = 0;
245
246 #if defined(IOCB_DSYNC)
247 if (kiocb->ki_flags & IOCB_DSYNC)
248 flags |= O_DSYNC;
249 #endif
250 #if defined(IOCB_SYNC)
251 if (kiocb->ki_flags & IOCB_SYNC)
252 flags |= O_SYNC;
253 #endif
254 #if defined(IOCB_APPEND)
255 if (kiocb->ki_flags & IOCB_APPEND)
256 flags |= O_APPEND;
257 #endif
258 #if defined(IOCB_DIRECT)
259 if (kiocb->ki_flags & IOCB_DIRECT)
260 flags |= O_DIRECT;
261 #endif
262 return (flags);
263 }
264
265 /*
266 * If relatime is enabled, call file_accessed() if zfs_relatime_need_update()
267 * is true. This is needed since datasets with inherited "relatime" property
268 * aren't necessarily mounted with the MNT_RELATIME flag (e.g. after
269 * `zfs set relatime=...`), which is what relatime test in VFS by
270 * relatime_need_update() is based on.
271 */
272 static inline void
273 zpl_file_accessed(struct file *filp)
274 {
275 struct inode *ip = filp->f_mapping->host;
276
277 if (!IS_NOATIME(ip) && ITOZSB(ip)->z_relatime) {
278 if (zfs_relatime_need_update(ip))
279 file_accessed(filp);
280 } else {
281 file_accessed(filp);
282 }
283 }
284
285 #if defined(HAVE_VFS_RW_ITERATE)
286
287 /*
288 * When HAVE_VFS_IOV_ITER is defined the iov_iter structure supports
289 * iovecs, kvevs, bvecs and pipes, plus all the required interfaces to
290 * manipulate the iov_iter are available. In which case the full iov_iter
291 * can be attached to the uio and correctly handled in the lower layers.
292 * Otherwise, for older kernels extract the iovec and pass it instead.
293 */
294 static void
295 zpl_uio_init(zfs_uio_t *uio, struct kiocb *kiocb, struct iov_iter *to,
296 loff_t pos, ssize_t count, size_t skip)
297 {
298 #if defined(HAVE_VFS_IOV_ITER)
299 zfs_uio_iov_iter_init(uio, to, pos, count, skip);
300 #else
301 #ifdef HAVE_IOV_ITER_TYPE
302 zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
303 iov_iter_type(to) & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
304 count, skip);
305 #else
306 zfs_uio_iovec_init(uio, to->iov, to->nr_segs, pos,
307 to->type & ITER_KVEC ? UIO_SYSSPACE : UIO_USERSPACE,
308 count, skip);
309 #endif
310 #endif
311 }
312
313 static ssize_t
314 zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
315 {
316 cred_t *cr = CRED();
317 fstrans_cookie_t cookie;
318 struct file *filp = kiocb->ki_filp;
319 ssize_t count = iov_iter_count(to);
320 zfs_uio_t uio;
321
322 zpl_uio_init(&uio, kiocb, to, kiocb->ki_pos, count, 0);
323
324 crhold(cr);
325 cookie = spl_fstrans_mark();
326
327 int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
328 filp->f_flags | zfs_io_flags(kiocb), cr);
329
330 spl_fstrans_unmark(cookie);
331 crfree(cr);
332
333 if (error < 0)
334 return (error);
335
336 ssize_t read = count - uio.uio_resid;
337 kiocb->ki_pos += read;
338
339 zpl_file_accessed(filp);
340
341 return (read);
342 }
343
344 static inline ssize_t
345 zpl_generic_write_checks(struct kiocb *kiocb, struct iov_iter *from,
346 size_t *countp)
347 {
348 #ifdef HAVE_GENERIC_WRITE_CHECKS_KIOCB
349 ssize_t ret = generic_write_checks(kiocb, from);
350 if (ret <= 0)
351 return (ret);
352
353 *countp = ret;
354 #else
355 struct file *file = kiocb->ki_filp;
356 struct address_space *mapping = file->f_mapping;
357 struct inode *ip = mapping->host;
358 int isblk = S_ISBLK(ip->i_mode);
359
360 *countp = iov_iter_count(from);
361 ssize_t ret = generic_write_checks(file, &kiocb->ki_pos, countp, isblk);
362 if (ret)
363 return (ret);
364 #endif
365
366 return (0);
367 }
368
369 static ssize_t
370 zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
371 {
372 cred_t *cr = CRED();
373 fstrans_cookie_t cookie;
374 struct file *filp = kiocb->ki_filp;
375 struct inode *ip = filp->f_mapping->host;
376 zfs_uio_t uio;
377 size_t count = 0;
378 ssize_t ret;
379
380 ret = zpl_generic_write_checks(kiocb, from, &count);
381 if (ret)
382 return (ret);
383
384 zpl_uio_init(&uio, kiocb, from, kiocb->ki_pos, count, from->iov_offset);
385
386 crhold(cr);
387 cookie = spl_fstrans_mark();
388
389 int error = -zfs_write(ITOZ(ip), &uio,
390 filp->f_flags | zfs_io_flags(kiocb), cr);
391
392 spl_fstrans_unmark(cookie);
393 crfree(cr);
394
395 if (error < 0)
396 return (error);
397
398 ssize_t wrote = count - uio.uio_resid;
399 kiocb->ki_pos += wrote;
400
401 return (wrote);
402 }
403
404 #else /* !HAVE_VFS_RW_ITERATE */
405
406 static ssize_t
407 zpl_aio_read(struct kiocb *kiocb, const struct iovec *iov,
408 unsigned long nr_segs, loff_t pos)
409 {
410 cred_t *cr = CRED();
411 fstrans_cookie_t cookie;
412 struct file *filp = kiocb->ki_filp;
413 size_t count;
414 ssize_t ret;
415
416 ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
417 if (ret)
418 return (ret);
419
420 zfs_uio_t uio;
421 zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
422 count, 0);
423
424 crhold(cr);
425 cookie = spl_fstrans_mark();
426
427 int error = -zfs_read(ITOZ(filp->f_mapping->host), &uio,
428 filp->f_flags | zfs_io_flags(kiocb), cr);
429
430 spl_fstrans_unmark(cookie);
431 crfree(cr);
432
433 if (error < 0)
434 return (error);
435
436 ssize_t read = count - uio.uio_resid;
437 kiocb->ki_pos += read;
438
439 zpl_file_accessed(filp);
440
441 return (read);
442 }
443
444 static ssize_t
445 zpl_aio_write(struct kiocb *kiocb, const struct iovec *iov,
446 unsigned long nr_segs, loff_t pos)
447 {
448 cred_t *cr = CRED();
449 fstrans_cookie_t cookie;
450 struct file *filp = kiocb->ki_filp;
451 struct inode *ip = filp->f_mapping->host;
452 size_t count;
453 ssize_t ret;
454
455 ret = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
456 if (ret)
457 return (ret);
458
459 ret = generic_write_checks(filp, &pos, &count, S_ISBLK(ip->i_mode));
460 if (ret)
461 return (ret);
462
463 kiocb->ki_pos = pos;
464
465 zfs_uio_t uio;
466 zfs_uio_iovec_init(&uio, iov, nr_segs, kiocb->ki_pos, UIO_USERSPACE,
467 count, 0);
468
469 crhold(cr);
470 cookie = spl_fstrans_mark();
471
472 int error = -zfs_write(ITOZ(ip), &uio,
473 filp->f_flags | zfs_io_flags(kiocb), cr);
474
475 spl_fstrans_unmark(cookie);
476 crfree(cr);
477
478 if (error < 0)
479 return (error);
480
481 ssize_t wrote = count - uio.uio_resid;
482 kiocb->ki_pos += wrote;
483
484 return (wrote);
485 }
486 #endif /* HAVE_VFS_RW_ITERATE */
487
488 #if defined(HAVE_VFS_RW_ITERATE)
489 static ssize_t
490 zpl_direct_IO_impl(int rw, struct kiocb *kiocb, struct iov_iter *iter)
491 {
492 if (rw == WRITE)
493 return (zpl_iter_write(kiocb, iter));
494 else
495 return (zpl_iter_read(kiocb, iter));
496 }
497 #if defined(HAVE_VFS_DIRECT_IO_ITER)
498 static ssize_t
499 zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter)
500 {
501 return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
502 }
503 #elif defined(HAVE_VFS_DIRECT_IO_ITER_OFFSET)
504 static ssize_t
505 zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
506 {
507 ASSERT3S(pos, ==, kiocb->ki_pos);
508 return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
509 }
510 #elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
511 static ssize_t
512 zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
513 {
514 ASSERT3S(pos, ==, kiocb->ki_pos);
515 return (zpl_direct_IO_impl(rw, kiocb, iter));
516 }
517 #else
518 #error "Unknown direct IO interface"
519 #endif
520
521 #else /* HAVE_VFS_RW_ITERATE */
522
523 #if defined(HAVE_VFS_DIRECT_IO_IOVEC)
524 static ssize_t
525 zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iov,
526 loff_t pos, unsigned long nr_segs)
527 {
528 if (rw == WRITE)
529 return (zpl_aio_write(kiocb, iov, nr_segs, pos));
530 else
531 return (zpl_aio_read(kiocb, iov, nr_segs, pos));
532 }
533 #elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
534 static ssize_t
535 zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
536 {
537 const struct iovec *iovp = iov_iter_iovec(iter);
538 unsigned long nr_segs = iter->nr_segs;
539
540 ASSERT3S(pos, ==, kiocb->ki_pos);
541 if (rw == WRITE)
542 return (zpl_aio_write(kiocb, iovp, nr_segs, pos));
543 else
544 return (zpl_aio_read(kiocb, iovp, nr_segs, pos));
545 }
546 #else
547 #error "Unknown direct IO interface"
548 #endif
549
550 #endif /* HAVE_VFS_RW_ITERATE */
551
552 static loff_t
553 zpl_llseek(struct file *filp, loff_t offset, int whence)
554 {
555 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
556 fstrans_cookie_t cookie;
557
558 if (whence == SEEK_DATA || whence == SEEK_HOLE) {
559 struct inode *ip = filp->f_mapping->host;
560 loff_t maxbytes = ip->i_sb->s_maxbytes;
561 loff_t error;
562
563 spl_inode_lock_shared(ip);
564 cookie = spl_fstrans_mark();
565 error = -zfs_holey(ITOZ(ip), whence, &offset);
566 spl_fstrans_unmark(cookie);
567 if (error == 0)
568 error = lseek_execute(filp, ip, offset, maxbytes);
569 spl_inode_unlock_shared(ip);
570
571 return (error);
572 }
573 #endif /* SEEK_HOLE && SEEK_DATA */
574
575 return (generic_file_llseek(filp, offset, whence));
576 }
577
578 /*
579 * It's worth taking a moment to describe how mmap is implemented
580 * for zfs because it differs considerably from other Linux filesystems.
581 * However, this issue is handled the same way under OpenSolaris.
582 *
583 * The issue is that by design zfs bypasses the Linux page cache and
584 * leaves all caching up to the ARC. This has been shown to work
585 * well for the common read(2)/write(2) case. However, mmap(2)
586 * is problem because it relies on being tightly integrated with the
587 * page cache. To handle this we cache mmap'ed files twice, once in
588 * the ARC and a second time in the page cache. The code is careful
589 * to keep both copies synchronized.
590 *
591 * When a file with an mmap'ed region is written to using write(2)
592 * both the data in the ARC and existing pages in the page cache
593 * are updated. For a read(2) data will be read first from the page
594 * cache then the ARC if needed. Neither a write(2) or read(2) will
595 * will ever result in new pages being added to the page cache.
596 *
597 * New pages are added to the page cache only via .readpage() which
598 * is called when the vfs needs to read a page off disk to back the
599 * virtual memory region. These pages may be modified without
600 * notifying the ARC and will be written out periodically via
601 * .writepage(). This will occur due to either a sync or the usual
602 * page aging behavior. Note because a read(2) of a mmap'ed file
603 * will always check the page cache first even when the ARC is out
604 * of date correct data will still be returned.
605 *
606 * While this implementation ensures correct behavior it does have
607 * have some drawbacks. The most obvious of which is that it
608 * increases the required memory footprint when access mmap'ed
609 * files. It also adds additional complexity to the code keeping
610 * both caches synchronized.
611 *
612 * Longer term it may be possible to cleanly resolve this wart by
613 * mapping page cache pages directly on to the ARC buffers. The
614 * Linux address space operations are flexible enough to allow
615 * selection of which pages back a particular index. The trick
616 * would be working out the details of which subsystem is in
617 * charge, the ARC, the page cache, or both. It may also prove
618 * helpful to move the ARC buffers to a scatter-gather lists
619 * rather than a vmalloc'ed region.
620 */
621 static int
622 zpl_mmap(struct file *filp, struct vm_area_struct *vma)
623 {
624 struct inode *ip = filp->f_mapping->host;
625 znode_t *zp = ITOZ(ip);
626 int error;
627 fstrans_cookie_t cookie;
628
629 cookie = spl_fstrans_mark();
630 error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
631 (size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
632 spl_fstrans_unmark(cookie);
633 if (error)
634 return (error);
635
636 error = generic_file_mmap(filp, vma);
637 if (error)
638 return (error);
639
640 mutex_enter(&zp->z_lock);
641 zp->z_is_mapped = B_TRUE;
642 mutex_exit(&zp->z_lock);
643
644 return (error);
645 }
646
647 /*
648 * Populate a page with data for the Linux page cache. This function is
649 * only used to support mmap(2). There will be an identical copy of the
650 * data in the ARC which is kept up to date via .write() and .writepage().
651 */
652 static inline int
653 zpl_readpage_common(struct page *pp)
654 {
655 struct inode *ip;
656 struct page *pl[1];
657 int error = 0;
658 fstrans_cookie_t cookie;
659
660 ASSERT(PageLocked(pp));
661 ip = pp->mapping->host;
662 pl[0] = pp;
663
664 cookie = spl_fstrans_mark();
665 error = -zfs_getpage(ip, pl, 1);
666 spl_fstrans_unmark(cookie);
667
668 if (error) {
669 SetPageError(pp);
670 ClearPageUptodate(pp);
671 } else {
672 ClearPageError(pp);
673 SetPageUptodate(pp);
674 flush_dcache_page(pp);
675 }
676
677 unlock_page(pp);
678 return (error);
679 }
680
681 #ifdef HAVE_VFS_READ_FOLIO
682 static int
683 zpl_read_folio(struct file *filp, struct folio *folio)
684 {
685 return (zpl_readpage_common(&folio->page));
686 }
687 #else
688 static int
689 zpl_readpage(struct file *filp, struct page *pp)
690 {
691 return (zpl_readpage_common(pp));
692 }
693 #endif
694
695 static int
696 zpl_readpage_filler(void *data, struct page *pp)
697 {
698 return (zpl_readpage_common(pp));
699 }
700
701 /*
702 * Populate a set of pages with data for the Linux page cache. This
703 * function will only be called for read ahead and never for demand
704 * paging. For simplicity, the code relies on read_cache_pages() to
705 * correctly lock each page for IO and call zpl_readpage().
706 */
707 #ifdef HAVE_VFS_READPAGES
708 static int
709 zpl_readpages(struct file *filp, struct address_space *mapping,
710 struct list_head *pages, unsigned nr_pages)
711 {
712 return (read_cache_pages(mapping, pages, zpl_readpage_filler, NULL));
713 }
714 #else
715 static void
716 zpl_readahead(struct readahead_control *ractl)
717 {
718 struct page *page;
719
720 while ((page = readahead_page(ractl)) != NULL) {
721 int ret;
722
723 ret = zpl_readpage_filler(NULL, page);
724 put_page(page);
725 if (ret)
726 break;
727 }
728 }
729 #endif
730
731 static int
732 zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
733 {
734 boolean_t *for_sync = data;
735 fstrans_cookie_t cookie;
736
737 ASSERT(PageLocked(pp));
738 ASSERT(!PageWriteback(pp));
739
740 cookie = spl_fstrans_mark();
741 (void) zfs_putpage(pp->mapping->host, pp, wbc, *for_sync);
742 spl_fstrans_unmark(cookie);
743
744 return (0);
745 }
746
747 static int
748 zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
749 {
750 znode_t *zp = ITOZ(mapping->host);
751 zfsvfs_t *zfsvfs = ITOZSB(mapping->host);
752 enum writeback_sync_modes sync_mode;
753 int result;
754
755 ZPL_ENTER(zfsvfs);
756 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
757 wbc->sync_mode = WB_SYNC_ALL;
758 ZPL_EXIT(zfsvfs);
759 sync_mode = wbc->sync_mode;
760
761 /*
762 * We don't want to run write_cache_pages() in SYNC mode here, because
763 * that would make putpage() wait for a single page to be committed to
764 * disk every single time, resulting in atrocious performance. Instead
765 * we run it once in non-SYNC mode so that the ZIL gets all the data,
766 * and then we commit it all in one go.
767 */
768 boolean_t for_sync = (sync_mode == WB_SYNC_ALL);
769 wbc->sync_mode = WB_SYNC_NONE;
770 result = write_cache_pages(mapping, wbc, zpl_putpage, &for_sync);
771 if (sync_mode != wbc->sync_mode) {
772 ZPL_ENTER(zfsvfs);
773 ZPL_VERIFY_ZP(zp);
774 if (zfsvfs->z_log != NULL)
775 zil_commit(zfsvfs->z_log, zp->z_id);
776 ZPL_EXIT(zfsvfs);
777
778 /*
779 * We need to call write_cache_pages() again (we can't just
780 * return after the commit) because the previous call in
781 * non-SYNC mode does not guarantee that we got all the dirty
782 * pages (see the implementation of write_cache_pages() for
783 * details). That being said, this is a no-op in most cases.
784 */
785 wbc->sync_mode = sync_mode;
786 result = write_cache_pages(mapping, wbc, zpl_putpage,
787 &for_sync);
788 }
789 return (result);
790 }
791
792 /*
793 * Write out dirty pages to the ARC, this function is only required to
794 * support mmap(2). Mapped pages may be dirtied by memory operations
795 * which never call .write(). These dirty pages are kept in sync with
796 * the ARC buffers via this hook.
797 */
798 static int
799 zpl_writepage(struct page *pp, struct writeback_control *wbc)
800 {
801 if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
802 wbc->sync_mode = WB_SYNC_ALL;
803
804 boolean_t for_sync = (wbc->sync_mode == WB_SYNC_ALL);
805
806 return (zpl_putpage(pp, wbc, &for_sync));
807 }
808
809 /*
810 * The flag combination which matches the behavior of zfs_space() is
811 * FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
812 * flag was introduced in the 2.6.38 kernel.
813 *
814 * The original mode=0 (allocate space) behavior can be reasonably emulated
815 * by checking if enough space exists and creating a sparse file, as real
816 * persistent space reservation is not possible due to COW, snapshots, etc.
817 */
818 static long
819 zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
820 {
821 cred_t *cr = CRED();
822 loff_t olen;
823 fstrans_cookie_t cookie;
824 int error = 0;
825
826 int test_mode = FALLOC_FL_PUNCH_HOLE;
827 #ifdef HAVE_FALLOC_FL_ZERO_RANGE
828 test_mode |= FALLOC_FL_ZERO_RANGE;
829 #endif
830
831 if ((mode & ~(FALLOC_FL_KEEP_SIZE | test_mode)) != 0)
832 return (-EOPNOTSUPP);
833
834 if (offset < 0 || len <= 0)
835 return (-EINVAL);
836
837 spl_inode_lock(ip);
838 olen = i_size_read(ip);
839
840 crhold(cr);
841 cookie = spl_fstrans_mark();
842 if (mode & (test_mode)) {
843 flock64_t bf;
844
845 if (mode & FALLOC_FL_KEEP_SIZE) {
846 if (offset > olen)
847 goto out_unmark;
848
849 if (offset + len > olen)
850 len = olen - offset;
851 }
852 bf.l_type = F_WRLCK;
853 bf.l_whence = SEEK_SET;
854 bf.l_start = offset;
855 bf.l_len = len;
856 bf.l_pid = 0;
857
858 error = -zfs_space(ITOZ(ip), F_FREESP, &bf, O_RDWR, offset, cr);
859 } else if ((mode & ~FALLOC_FL_KEEP_SIZE) == 0) {
860 unsigned int percent = zfs_fallocate_reserve_percent;
861 struct kstatfs statfs;
862
863 /* Legacy mode, disable fallocate compatibility. */
864 if (percent == 0) {
865 error = -EOPNOTSUPP;
866 goto out_unmark;
867 }
868
869 /*
870 * Use zfs_statvfs() instead of dmu_objset_space() since it
871 * also checks project quota limits, which are relevant here.
872 */
873 error = zfs_statvfs(ip, &statfs);
874 if (error)
875 goto out_unmark;
876
877 /*
878 * Shrink available space a bit to account for overhead/races.
879 * We know the product previously fit into availbytes from
880 * dmu_objset_space(), so the smaller product will also fit.
881 */
882 if (len > statfs.f_bavail * (statfs.f_bsize * 100 / percent)) {
883 error = -ENOSPC;
884 goto out_unmark;
885 }
886 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > olen)
887 error = zfs_freesp(ITOZ(ip), offset + len, 0, 0, FALSE);
888 }
889 out_unmark:
890 spl_fstrans_unmark(cookie);
891 spl_inode_unlock(ip);
892
893 crfree(cr);
894
895 return (error);
896 }
897
898 static long
899 zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
900 {
901 return zpl_fallocate_common(file_inode(filp),
902 mode, offset, len);
903 }
904
905 static int
906 zpl_ioctl_getversion(struct file *filp, void __user *arg)
907 {
908 uint32_t generation = file_inode(filp)->i_generation;
909
910 return (copy_to_user(arg, &generation, sizeof (generation)));
911 }
912
913 #ifdef HAVE_FILE_FADVISE
914 static int
915 zpl_fadvise(struct file *filp, loff_t offset, loff_t len, int advice)
916 {
917 struct inode *ip = file_inode(filp);
918 znode_t *zp = ITOZ(ip);
919 zfsvfs_t *zfsvfs = ITOZSB(ip);
920 objset_t *os = zfsvfs->z_os;
921 int error = 0;
922
923 if (S_ISFIFO(ip->i_mode))
924 return (-ESPIPE);
925
926 if (offset < 0 || len < 0)
927 return (-EINVAL);
928
929 ZFS_ENTER(zfsvfs);
930 ZFS_VERIFY_ZP(zp);
931
932 switch (advice) {
933 case POSIX_FADV_SEQUENTIAL:
934 case POSIX_FADV_WILLNEED:
935 #ifdef HAVE_GENERIC_FADVISE
936 if (zn_has_cached_data(zp))
937 error = generic_fadvise(filp, offset, len, advice);
938 #endif
939 /*
940 * Pass on the caller's size directly, but note that
941 * dmu_prefetch_max will effectively cap it. If there
942 * really is a larger sequential access pattern, perhaps
943 * dmu_zfetch will detect it.
944 */
945 if (len == 0)
946 len = i_size_read(ip) - offset;
947
948 dmu_prefetch(os, zp->z_id, 0, offset, len,
949 ZIO_PRIORITY_ASYNC_READ);
950 break;
951 case POSIX_FADV_NORMAL:
952 case POSIX_FADV_RANDOM:
953 case POSIX_FADV_DONTNEED:
954 case POSIX_FADV_NOREUSE:
955 /* ignored for now */
956 break;
957 default:
958 error = -EINVAL;
959 break;
960 }
961
962 ZFS_EXIT(zfsvfs);
963
964 return (error);
965 }
966 #endif /* HAVE_FILE_FADVISE */
967
968 #define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
969 #define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
970
971 static uint32_t
972 __zpl_ioctl_getflags(struct inode *ip)
973 {
974 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
975 uint32_t ioctl_flags = 0;
976
977 if (zfs_flags & ZFS_IMMUTABLE)
978 ioctl_flags |= FS_IMMUTABLE_FL;
979
980 if (zfs_flags & ZFS_APPENDONLY)
981 ioctl_flags |= FS_APPEND_FL;
982
983 if (zfs_flags & ZFS_NODUMP)
984 ioctl_flags |= FS_NODUMP_FL;
985
986 if (zfs_flags & ZFS_PROJINHERIT)
987 ioctl_flags |= ZFS_PROJINHERIT_FL;
988
989 return (ioctl_flags & ZFS_FL_USER_VISIBLE);
990 }
991
992 /*
993 * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
994 * attributes common to both Linux and Solaris are mapped.
995 */
996 static int
997 zpl_ioctl_getflags(struct file *filp, void __user *arg)
998 {
999 uint32_t flags;
1000 int err;
1001
1002 flags = __zpl_ioctl_getflags(file_inode(filp));
1003 err = copy_to_user(arg, &flags, sizeof (flags));
1004
1005 return (err);
1006 }
1007
1008 /*
1009 * fchange() is a helper macro to detect if we have been asked to change a
1010 * flag. This is ugly, but the requirement that we do this is a consequence of
1011 * how the Linux file attribute interface was designed. Another consequence is
1012 * that concurrent modification of files suffers from a TOCTOU race. Neither
1013 * are things we can fix without modifying the kernel-userland interface, which
1014 * is outside of our jurisdiction.
1015 */
1016
1017 #define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
1018
1019 static int
1020 __zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva)
1021 {
1022 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
1023 xoptattr_t *xoap;
1024
1025 if (ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL |
1026 ZFS_PROJINHERIT_FL))
1027 return (-EOPNOTSUPP);
1028
1029 if (ioctl_flags & ~ZFS_FL_USER_MODIFIABLE)
1030 return (-EACCES);
1031
1032 if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
1033 fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
1034 !capable(CAP_LINUX_IMMUTABLE))
1035 return (-EPERM);
1036
1037 if (!zpl_inode_owner_or_capable(kcred->user_ns, ip))
1038 return (-EACCES);
1039
1040 xva_init(xva);
1041 xoap = xva_getxoptattr(xva);
1042
1043 #define FLAG_CHANGE(iflag, zflag, xflag, xfield) do { \
1044 if (((ioctl_flags & (iflag)) && !(zfs_flags & (zflag))) || \
1045 ((zfs_flags & (zflag)) && !(ioctl_flags & (iflag)))) { \
1046 XVA_SET_REQ(xva, (xflag)); \
1047 (xfield) = ((ioctl_flags & (iflag)) != 0); \
1048 } \
1049 } while (0)
1050
1051 FLAG_CHANGE(FS_IMMUTABLE_FL, ZFS_IMMUTABLE, XAT_IMMUTABLE,
1052 xoap->xoa_immutable);
1053 FLAG_CHANGE(FS_APPEND_FL, ZFS_APPENDONLY, XAT_APPENDONLY,
1054 xoap->xoa_appendonly);
1055 FLAG_CHANGE(FS_NODUMP_FL, ZFS_NODUMP, XAT_NODUMP,
1056 xoap->xoa_nodump);
1057 FLAG_CHANGE(ZFS_PROJINHERIT_FL, ZFS_PROJINHERIT, XAT_PROJINHERIT,
1058 xoap->xoa_projinherit);
1059
1060 #undef FLAG_CHANGE
1061
1062 return (0);
1063 }
1064
1065 static int
1066 zpl_ioctl_setflags(struct file *filp, void __user *arg)
1067 {
1068 struct inode *ip = file_inode(filp);
1069 uint32_t flags;
1070 cred_t *cr = CRED();
1071 xvattr_t xva;
1072 int err;
1073 fstrans_cookie_t cookie;
1074
1075 if (copy_from_user(&flags, arg, sizeof (flags)))
1076 return (-EFAULT);
1077
1078 err = __zpl_ioctl_setflags(ip, flags, &xva);
1079 if (err)
1080 return (err);
1081
1082 crhold(cr);
1083 cookie = spl_fstrans_mark();
1084 err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr);
1085 spl_fstrans_unmark(cookie);
1086 crfree(cr);
1087
1088 return (err);
1089 }
1090
1091 static int
1092 zpl_ioctl_getxattr(struct file *filp, void __user *arg)
1093 {
1094 zfsxattr_t fsx = { 0 };
1095 struct inode *ip = file_inode(filp);
1096 int err;
1097
1098 fsx.fsx_xflags = __zpl_ioctl_getflags(ip);
1099 fsx.fsx_projid = ITOZ(ip)->z_projid;
1100 err = copy_to_user(arg, &fsx, sizeof (fsx));
1101
1102 return (err);
1103 }
1104
1105 static int
1106 zpl_ioctl_setxattr(struct file *filp, void __user *arg)
1107 {
1108 struct inode *ip = file_inode(filp);
1109 zfsxattr_t fsx;
1110 cred_t *cr = CRED();
1111 xvattr_t xva;
1112 xoptattr_t *xoap;
1113 int err;
1114 fstrans_cookie_t cookie;
1115
1116 if (copy_from_user(&fsx, arg, sizeof (fsx)))
1117 return (-EFAULT);
1118
1119 if (!zpl_is_valid_projid(fsx.fsx_projid))
1120 return (-EINVAL);
1121
1122 err = __zpl_ioctl_setflags(ip, fsx.fsx_xflags, &xva);
1123 if (err)
1124 return (err);
1125
1126 xoap = xva_getxoptattr(&xva);
1127 XVA_SET_REQ(&xva, XAT_PROJID);
1128 xoap->xoa_projid = fsx.fsx_projid;
1129
1130 crhold(cr);
1131 cookie = spl_fstrans_mark();
1132 err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr);
1133 spl_fstrans_unmark(cookie);
1134 crfree(cr);
1135
1136 return (err);
1137 }
1138
1139 /*
1140 * Expose Additional File Level Attributes of ZFS.
1141 */
1142 static int
1143 zpl_ioctl_getdosflags(struct file *filp, void __user *arg)
1144 {
1145 struct inode *ip = file_inode(filp);
1146 uint64_t dosflags = ITOZ(ip)->z_pflags;
1147 dosflags &= ZFS_DOS_FL_USER_VISIBLE;
1148 int err = copy_to_user(arg, &dosflags, sizeof (dosflags));
1149
1150 return (err);
1151 }
1152
1153 static int
1154 __zpl_ioctl_setdosflags(struct inode *ip, uint64_t ioctl_flags, xvattr_t *xva)
1155 {
1156 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
1157 xoptattr_t *xoap;
1158
1159 if (ioctl_flags & (~ZFS_DOS_FL_USER_VISIBLE))
1160 return (-EOPNOTSUPP);
1161
1162 if ((fchange(ioctl_flags, zfs_flags, ZFS_IMMUTABLE, ZFS_IMMUTABLE) ||
1163 fchange(ioctl_flags, zfs_flags, ZFS_APPENDONLY, ZFS_APPENDONLY)) &&
1164 !capable(CAP_LINUX_IMMUTABLE))
1165 return (-EPERM);
1166
1167 if (!zpl_inode_owner_or_capable(kcred->user_ns, ip))
1168 return (-EACCES);
1169
1170 xva_init(xva);
1171 xoap = xva_getxoptattr(xva);
1172
1173 #define FLAG_CHANGE(iflag, xflag, xfield) do { \
1174 if (((ioctl_flags & (iflag)) && !(zfs_flags & (iflag))) || \
1175 ((zfs_flags & (iflag)) && !(ioctl_flags & (iflag)))) { \
1176 XVA_SET_REQ(xva, (xflag)); \
1177 (xfield) = ((ioctl_flags & (iflag)) != 0); \
1178 } \
1179 } while (0)
1180
1181 FLAG_CHANGE(ZFS_IMMUTABLE, XAT_IMMUTABLE, xoap->xoa_immutable);
1182 FLAG_CHANGE(ZFS_APPENDONLY, XAT_APPENDONLY, xoap->xoa_appendonly);
1183 FLAG_CHANGE(ZFS_NODUMP, XAT_NODUMP, xoap->xoa_nodump);
1184 FLAG_CHANGE(ZFS_READONLY, XAT_READONLY, xoap->xoa_readonly);
1185 FLAG_CHANGE(ZFS_HIDDEN, XAT_HIDDEN, xoap->xoa_hidden);
1186 FLAG_CHANGE(ZFS_SYSTEM, XAT_SYSTEM, xoap->xoa_system);
1187 FLAG_CHANGE(ZFS_ARCHIVE, XAT_ARCHIVE, xoap->xoa_archive);
1188 FLAG_CHANGE(ZFS_NOUNLINK, XAT_NOUNLINK, xoap->xoa_nounlink);
1189 FLAG_CHANGE(ZFS_REPARSE, XAT_REPARSE, xoap->xoa_reparse);
1190 FLAG_CHANGE(ZFS_OFFLINE, XAT_OFFLINE, xoap->xoa_offline);
1191 FLAG_CHANGE(ZFS_SPARSE, XAT_SPARSE, xoap->xoa_sparse);
1192
1193 #undef FLAG_CHANGE
1194
1195 return (0);
1196 }
1197
1198 /*
1199 * Set Additional File Level Attributes of ZFS.
1200 */
1201 static int
1202 zpl_ioctl_setdosflags(struct file *filp, void __user *arg)
1203 {
1204 struct inode *ip = file_inode(filp);
1205 uint64_t dosflags;
1206 cred_t *cr = CRED();
1207 xvattr_t xva;
1208 int err;
1209 fstrans_cookie_t cookie;
1210
1211 if (copy_from_user(&dosflags, arg, sizeof (dosflags)))
1212 return (-EFAULT);
1213
1214 err = __zpl_ioctl_setdosflags(ip, dosflags, &xva);
1215 if (err)
1216 return (err);
1217
1218 crhold(cr);
1219 cookie = spl_fstrans_mark();
1220 err = -zfs_setattr(ITOZ(ip), (vattr_t *)&xva, 0, cr);
1221 spl_fstrans_unmark(cookie);
1222 crfree(cr);
1223
1224 return (err);
1225 }
1226
1227 static long
1228 zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1229 {
1230 switch (cmd) {
1231 case FS_IOC_GETVERSION:
1232 return (zpl_ioctl_getversion(filp, (void *)arg));
1233 case FS_IOC_GETFLAGS:
1234 return (zpl_ioctl_getflags(filp, (void *)arg));
1235 case FS_IOC_SETFLAGS:
1236 return (zpl_ioctl_setflags(filp, (void *)arg));
1237 case ZFS_IOC_FSGETXATTR:
1238 return (zpl_ioctl_getxattr(filp, (void *)arg));
1239 case ZFS_IOC_FSSETXATTR:
1240 return (zpl_ioctl_setxattr(filp, (void *)arg));
1241 case ZFS_IOC_GETDOSFLAGS:
1242 return (zpl_ioctl_getdosflags(filp, (void *)arg));
1243 case ZFS_IOC_SETDOSFLAGS:
1244 return (zpl_ioctl_setdosflags(filp, (void *)arg));
1245 default:
1246 return (-ENOTTY);
1247 }
1248 }
1249
1250 #ifdef CONFIG_COMPAT
1251 static long
1252 zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1253 {
1254 switch (cmd) {
1255 case FS_IOC32_GETVERSION:
1256 cmd = FS_IOC_GETVERSION;
1257 break;
1258 case FS_IOC32_GETFLAGS:
1259 cmd = FS_IOC_GETFLAGS;
1260 break;
1261 case FS_IOC32_SETFLAGS:
1262 cmd = FS_IOC_SETFLAGS;
1263 break;
1264 default:
1265 return (-ENOTTY);
1266 }
1267 return (zpl_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)));
1268 }
1269 #endif /* CONFIG_COMPAT */
1270
1271
1272 const struct address_space_operations zpl_address_space_operations = {
1273 #ifdef HAVE_VFS_READPAGES
1274 .readpages = zpl_readpages,
1275 #else
1276 .readahead = zpl_readahead,
1277 #endif
1278 #ifdef HAVE_VFS_READ_FOLIO
1279 .read_folio = zpl_read_folio,
1280 #else
1281 .readpage = zpl_readpage,
1282 #endif
1283 .writepage = zpl_writepage,
1284 .writepages = zpl_writepages,
1285 .direct_IO = zpl_direct_IO,
1286 #ifdef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS
1287 .set_page_dirty = __set_page_dirty_nobuffers,
1288 #endif
1289 #ifdef HAVE_VFS_FILEMAP_DIRTY_FOLIO
1290 .dirty_folio = filemap_dirty_folio,
1291 #endif
1292 };
1293
1294 const struct file_operations zpl_file_operations = {
1295 .open = zpl_open,
1296 .release = zpl_release,
1297 .llseek = zpl_llseek,
1298 #ifdef HAVE_VFS_RW_ITERATE
1299 #ifdef HAVE_NEW_SYNC_READ
1300 .read = new_sync_read,
1301 .write = new_sync_write,
1302 #endif
1303 .read_iter = zpl_iter_read,
1304 .write_iter = zpl_iter_write,
1305 #ifdef HAVE_VFS_IOV_ITER
1306 .splice_read = generic_file_splice_read,
1307 .splice_write = iter_file_splice_write,
1308 #endif
1309 #else
1310 .read = do_sync_read,
1311 .write = do_sync_write,
1312 .aio_read = zpl_aio_read,
1313 .aio_write = zpl_aio_write,
1314 #endif
1315 .mmap = zpl_mmap,
1316 .fsync = zpl_fsync,
1317 #ifdef HAVE_FILE_AIO_FSYNC
1318 .aio_fsync = zpl_aio_fsync,
1319 #endif
1320 .fallocate = zpl_fallocate,
1321 #ifdef HAVE_FILE_FADVISE
1322 .fadvise = zpl_fadvise,
1323 #endif
1324 .unlocked_ioctl = zpl_ioctl,
1325 #ifdef CONFIG_COMPAT
1326 .compat_ioctl = zpl_compat_ioctl,
1327 #endif
1328 };
1329
1330 const struct file_operations zpl_dir_file_operations = {
1331 .llseek = generic_file_llseek,
1332 .read = generic_read_dir,
1333 #if defined(HAVE_VFS_ITERATE_SHARED)
1334 .iterate_shared = zpl_iterate,
1335 #elif defined(HAVE_VFS_ITERATE)
1336 .iterate = zpl_iterate,
1337 #else
1338 .readdir = zpl_readdir,
1339 #endif
1340 .fsync = zpl_fsync,
1341 .unlocked_ioctl = zpl_ioctl,
1342 #ifdef CONFIG_COMPAT
1343 .compat_ioctl = zpl_compat_ioctl,
1344 #endif
1345 };
1346
1347 /* CSTYLED */
1348 module_param(zfs_fallocate_reserve_percent, uint, 0644);
1349 MODULE_PARM_DESC(zfs_fallocate_reserve_percent,
1350 "Percentage of length to use for the available capacity check");