]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zpl_file.c
9c231d9502c4df70761d08dbde21f67c65127edd
[mirror_zfs.git] / module / zfs / zpl_file.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
23 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
24 */
25
26
27 #ifdef CONFIG_COMPAT
28 #include <linux/compat.h>
29 #endif
30 #include <sys/file.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/zfs_vfsops.h>
33 #include <sys/zfs_vnops.h>
34 #include <sys/zfs_znode.h>
35 #include <sys/zfs_project.h>
36
37
38 static int
39 zpl_open(struct inode *ip, struct file *filp)
40 {
41 cred_t *cr = CRED();
42 int error;
43 fstrans_cookie_t cookie;
44
45 error = generic_file_open(ip, filp);
46 if (error)
47 return (error);
48
49 crhold(cr);
50 cookie = spl_fstrans_mark();
51 error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
52 spl_fstrans_unmark(cookie);
53 crfree(cr);
54 ASSERT3S(error, <=, 0);
55
56 return (error);
57 }
58
59 static int
60 zpl_release(struct inode *ip, struct file *filp)
61 {
62 cred_t *cr = CRED();
63 int error;
64 fstrans_cookie_t cookie;
65
66 cookie = spl_fstrans_mark();
67 if (ITOZ(ip)->z_atime_dirty)
68 zfs_mark_inode_dirty(ip);
69
70 crhold(cr);
71 error = -zfs_close(ip, filp->f_flags, cr);
72 spl_fstrans_unmark(cookie);
73 crfree(cr);
74 ASSERT3S(error, <=, 0);
75
76 return (error);
77 }
78
79 static int
80 zpl_iterate(struct file *filp, zpl_dir_context_t *ctx)
81 {
82 cred_t *cr = CRED();
83 int error;
84 fstrans_cookie_t cookie;
85
86 crhold(cr);
87 cookie = spl_fstrans_mark();
88 error = -zfs_readdir(file_inode(filp), ctx, cr);
89 spl_fstrans_unmark(cookie);
90 crfree(cr);
91 ASSERT3S(error, <=, 0);
92
93 return (error);
94 }
95
96 #if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
97 static int
98 zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
99 {
100 zpl_dir_context_t ctx =
101 ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
102 int error;
103
104 error = zpl_iterate(filp, &ctx);
105 filp->f_pos = ctx.pos;
106
107 return (error);
108 }
109 #endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
110
111 #if defined(HAVE_FSYNC_WITH_DENTRY)
112 /*
113 * Linux 2.6.x - 2.6.34 API,
114 * Through 2.6.34 the nfsd kernel server would pass a NULL 'file struct *'
115 * to the fops->fsync() hook. For this reason, we must be careful not to
116 * use filp unconditionally.
117 */
118 static int
119 zpl_fsync(struct file *filp, struct dentry *dentry, int datasync)
120 {
121 cred_t *cr = CRED();
122 int error;
123 fstrans_cookie_t cookie;
124
125 crhold(cr);
126 cookie = spl_fstrans_mark();
127 error = -zfs_fsync(dentry->d_inode, datasync, cr);
128 spl_fstrans_unmark(cookie);
129 crfree(cr);
130 ASSERT3S(error, <=, 0);
131
132 return (error);
133 }
134
135 #ifdef HAVE_FILE_AIO_FSYNC
136 static int
137 zpl_aio_fsync(struct kiocb *kiocb, int datasync)
138 {
139 struct file *filp = kiocb->ki_filp;
140 return (zpl_fsync(filp, file_dentry(filp), datasync));
141 }
142 #endif
143
144 #elif defined(HAVE_FSYNC_WITHOUT_DENTRY)
145 /*
146 * Linux 2.6.35 - 3.0 API,
147 * As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
148 * redundant. The dentry is still accessible via filp->f_path.dentry,
149 * and we are guaranteed that filp will never be NULL.
150 */
151 static int
152 zpl_fsync(struct file *filp, int datasync)
153 {
154 struct inode *inode = filp->f_mapping->host;
155 cred_t *cr = CRED();
156 int error;
157 fstrans_cookie_t cookie;
158
159 crhold(cr);
160 cookie = spl_fstrans_mark();
161 error = -zfs_fsync(inode, datasync, cr);
162 spl_fstrans_unmark(cookie);
163 crfree(cr);
164 ASSERT3S(error, <=, 0);
165
166 return (error);
167 }
168
169 #ifdef HAVE_FILE_AIO_FSYNC
170 static int
171 zpl_aio_fsync(struct kiocb *kiocb, int datasync)
172 {
173 return (zpl_fsync(kiocb->ki_filp, datasync));
174 }
175 #endif
176
177 #elif defined(HAVE_FSYNC_RANGE)
178 /*
179 * Linux 3.1 - 3.x API,
180 * As of 3.1 the responsibility to call filemap_write_and_wait_range() has
181 * been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
182 * lock is no longer held by the caller, for zfs we don't require the lock
183 * to be held so we don't acquire it.
184 */
185 static int
186 zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
187 {
188 struct inode *inode = filp->f_mapping->host;
189 cred_t *cr = CRED();
190 int error;
191 fstrans_cookie_t cookie;
192
193 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
194 if (error)
195 return (error);
196
197 crhold(cr);
198 cookie = spl_fstrans_mark();
199 error = -zfs_fsync(inode, datasync, cr);
200 spl_fstrans_unmark(cookie);
201 crfree(cr);
202 ASSERT3S(error, <=, 0);
203
204 return (error);
205 }
206
207 #ifdef HAVE_FILE_AIO_FSYNC
208 static int
209 zpl_aio_fsync(struct kiocb *kiocb, int datasync)
210 {
211 return (zpl_fsync(kiocb->ki_filp, kiocb->ki_pos, -1, datasync));
212 }
213 #endif
214
215 #else
216 #error "Unsupported fops->fsync() implementation"
217 #endif
218
219 static inline int
220 zfs_io_flags(struct kiocb *kiocb)
221 {
222 int flags = 0;
223
224 #if defined(IOCB_DSYNC)
225 if (kiocb->ki_flags & IOCB_DSYNC)
226 flags |= FDSYNC;
227 #endif
228 #if defined(IOCB_SYNC)
229 if (kiocb->ki_flags & IOCB_SYNC)
230 flags |= FSYNC;
231 #endif
232 #if defined(IOCB_APPEND)
233 if (kiocb->ki_flags & IOCB_APPEND)
234 flags |= FAPPEND;
235 #endif
236 #if defined(IOCB_DIRECT)
237 if (kiocb->ki_flags & IOCB_DIRECT)
238 flags |= FDIRECT;
239 #endif
240 return (flags);
241 }
242
243 static ssize_t
244 zpl_read_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
245 unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags,
246 cred_t *cr, size_t skip)
247 {
248 ssize_t read;
249 uio_t uio;
250 int error;
251 fstrans_cookie_t cookie;
252
253 uio.uio_iov = iovp;
254 uio.uio_skip = skip;
255 uio.uio_resid = count;
256 uio.uio_iovcnt = nr_segs;
257 uio.uio_loffset = *ppos;
258 uio.uio_limit = MAXOFFSET_T;
259 uio.uio_segflg = segment;
260
261 cookie = spl_fstrans_mark();
262 error = -zfs_read(ip, &uio, flags, cr);
263 spl_fstrans_unmark(cookie);
264 if (error < 0)
265 return (error);
266
267 read = count - uio.uio_resid;
268 *ppos += read;
269
270 return (read);
271 }
272
273 inline ssize_t
274 zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
275 uio_seg_t segment, int flags, cred_t *cr)
276 {
277 struct iovec iov;
278
279 iov.iov_base = (void *)buf;
280 iov.iov_len = len;
281
282 return (zpl_read_common_iovec(ip, &iov, len, 1, ppos, segment,
283 flags, cr, 0));
284 }
285
286 static ssize_t
287 zpl_iter_read_common(struct kiocb *kiocb, const struct iovec *iovp,
288 unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip)
289 {
290 cred_t *cr = CRED();
291 struct file *filp = kiocb->ki_filp;
292 ssize_t read;
293 unsigned int f_flags = filp->f_flags;
294
295 f_flags |= zfs_io_flags(kiocb);
296 crhold(cr);
297 read = zpl_read_common_iovec(filp->f_mapping->host, iovp, count,
298 nr_segs, &kiocb->ki_pos, seg, f_flags, cr, skip);
299 crfree(cr);
300
301 file_accessed(filp);
302 return (read);
303 }
304
305 #if defined(HAVE_VFS_RW_ITERATE)
306 static ssize_t
307 zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
308 {
309 ssize_t ret;
310 uio_seg_t seg = UIO_USERSPACE;
311 if (to->type & ITER_KVEC)
312 seg = UIO_SYSSPACE;
313 if (to->type & ITER_BVEC)
314 seg = UIO_BVEC;
315 ret = zpl_iter_read_common(kiocb, to->iov, to->nr_segs,
316 iov_iter_count(to), seg, to->iov_offset);
317 if (ret > 0)
318 iov_iter_advance(to, ret);
319 return (ret);
320 }
321 #else
322 static ssize_t
323 zpl_aio_read(struct kiocb *kiocb, const struct iovec *iovp,
324 unsigned long nr_segs, loff_t pos)
325 {
326 ssize_t ret;
327 size_t count;
328
329 ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_WRITE);
330 if (ret)
331 return (ret);
332
333 return (zpl_iter_read_common(kiocb, iovp, nr_segs, count,
334 UIO_USERSPACE, 0));
335 }
336 #endif /* HAVE_VFS_RW_ITERATE */
337
338 static ssize_t
339 zpl_write_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
340 unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags,
341 cred_t *cr, size_t skip)
342 {
343 ssize_t wrote;
344 uio_t uio;
345 int error;
346 fstrans_cookie_t cookie;
347
348 if (flags & O_APPEND)
349 *ppos = i_size_read(ip);
350
351 uio.uio_iov = iovp;
352 uio.uio_skip = skip;
353 uio.uio_resid = count;
354 uio.uio_iovcnt = nr_segs;
355 uio.uio_loffset = *ppos;
356 uio.uio_limit = MAXOFFSET_T;
357 uio.uio_segflg = segment;
358
359 cookie = spl_fstrans_mark();
360 error = -zfs_write(ip, &uio, flags, cr);
361 spl_fstrans_unmark(cookie);
362 if (error < 0)
363 return (error);
364
365 wrote = count - uio.uio_resid;
366 *ppos += wrote;
367
368 return (wrote);
369 }
370
371 inline ssize_t
372 zpl_write_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
373 uio_seg_t segment, int flags, cred_t *cr)
374 {
375 struct iovec iov;
376
377 iov.iov_base = (void *)buf;
378 iov.iov_len = len;
379
380 return (zpl_write_common_iovec(ip, &iov, len, 1, ppos, segment,
381 flags, cr, 0));
382 }
383
384 static ssize_t
385 zpl_iter_write_common(struct kiocb *kiocb, const struct iovec *iovp,
386 unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip)
387 {
388 cred_t *cr = CRED();
389 struct file *filp = kiocb->ki_filp;
390 ssize_t wrote;
391 unsigned int f_flags = filp->f_flags;
392
393 f_flags |= zfs_io_flags(kiocb);
394 crhold(cr);
395 wrote = zpl_write_common_iovec(filp->f_mapping->host, iovp, count,
396 nr_segs, &kiocb->ki_pos, seg, f_flags, cr, skip);
397 crfree(cr);
398
399 return (wrote);
400 }
401
402 #if defined(HAVE_VFS_RW_ITERATE)
403 static ssize_t
404 zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
405 {
406 size_t count;
407 ssize_t ret;
408 uio_seg_t seg = UIO_USERSPACE;
409
410 #ifndef HAVE_GENERIC_WRITE_CHECKS_KIOCB
411 struct file *file = kiocb->ki_filp;
412 struct address_space *mapping = file->f_mapping;
413 struct inode *ip = mapping->host;
414 int isblk = S_ISBLK(ip->i_mode);
415
416 count = iov_iter_count(from);
417 ret = generic_write_checks(file, &kiocb->ki_pos, &count, isblk);
418 if (ret)
419 return (ret);
420 #else
421 /*
422 * XXX - ideally this check should be in the same lock region with
423 * write operations, so that there's no TOCTTOU race when doing
424 * append and someone else grow the file.
425 */
426 ret = generic_write_checks(kiocb, from);
427 if (ret <= 0)
428 return (ret);
429 count = ret;
430 #endif
431
432 if (from->type & ITER_KVEC)
433 seg = UIO_SYSSPACE;
434 if (from->type & ITER_BVEC)
435 seg = UIO_BVEC;
436
437 ret = zpl_iter_write_common(kiocb, from->iov, from->nr_segs,
438 count, seg, from->iov_offset);
439 if (ret > 0)
440 iov_iter_advance(from, ret);
441
442 return (ret);
443 }
444 #else
445 static ssize_t
446 zpl_aio_write(struct kiocb *kiocb, const struct iovec *iovp,
447 unsigned long nr_segs, loff_t pos)
448 {
449 struct file *file = kiocb->ki_filp;
450 struct address_space *mapping = file->f_mapping;
451 struct inode *ip = mapping->host;
452 int isblk = S_ISBLK(ip->i_mode);
453 size_t count;
454 ssize_t ret;
455
456 ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_READ);
457 if (ret)
458 return (ret);
459
460 ret = generic_write_checks(file, &pos, &count, isblk);
461 if (ret)
462 return (ret);
463
464 return (zpl_iter_write_common(kiocb, iovp, nr_segs, count,
465 UIO_USERSPACE, 0));
466 }
467 #endif /* HAVE_VFS_RW_ITERATE */
468
469 #if defined(HAVE_VFS_RW_ITERATE)
470 static ssize_t
471 zpl_direct_IO_impl(int rw, struct kiocb *kiocb, struct iov_iter *iter)
472 {
473 if (rw == WRITE)
474 return (zpl_iter_write(kiocb, iter));
475 else
476 return (zpl_iter_read(kiocb, iter));
477 }
478 #if defined(HAVE_VFS_DIRECT_IO_ITER)
479 static ssize_t
480 zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter)
481 {
482 return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
483 }
484 #elif defined(HAVE_VFS_DIRECT_IO_ITER_OFFSET)
485 static ssize_t
486 zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
487 {
488 ASSERT3S(pos, ==, kiocb->ki_pos);
489 return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
490 }
491 #elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
492 static ssize_t
493 zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
494 {
495 ASSERT3S(pos, ==, kiocb->ki_pos);
496 return (zpl_direct_IO_impl(rw, kiocb, iter));
497 }
498 #else
499 #error "Unknown direct IO interface"
500 #endif
501
502 #else
503
504 #if defined(HAVE_VFS_DIRECT_IO_IOVEC)
505 static ssize_t
506 zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iovp,
507 loff_t pos, unsigned long nr_segs)
508 {
509 if (rw == WRITE)
510 return (zpl_aio_write(kiocb, iovp, nr_segs, pos));
511 else
512 return (zpl_aio_read(kiocb, iovp, nr_segs, pos));
513 }
514 #else
515 #error "Unknown direct IO interface"
516 #endif
517
518 #endif /* HAVE_VFS_RW_ITERATE */
519
520 static loff_t
521 zpl_llseek(struct file *filp, loff_t offset, int whence)
522 {
523 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
524 fstrans_cookie_t cookie;
525
526 if (whence == SEEK_DATA || whence == SEEK_HOLE) {
527 struct inode *ip = filp->f_mapping->host;
528 loff_t maxbytes = ip->i_sb->s_maxbytes;
529 loff_t error;
530
531 spl_inode_lock_shared(ip);
532 cookie = spl_fstrans_mark();
533 error = -zfs_holey(ip, whence, &offset);
534 spl_fstrans_unmark(cookie);
535 if (error == 0)
536 error = lseek_execute(filp, ip, offset, maxbytes);
537 spl_inode_unlock_shared(ip);
538
539 return (error);
540 }
541 #endif /* SEEK_HOLE && SEEK_DATA */
542
543 return (generic_file_llseek(filp, offset, whence));
544 }
545
546 /*
547 * It's worth taking a moment to describe how mmap is implemented
548 * for zfs because it differs considerably from other Linux filesystems.
549 * However, this issue is handled the same way under OpenSolaris.
550 *
551 * The issue is that by design zfs bypasses the Linux page cache and
552 * leaves all caching up to the ARC. This has been shown to work
553 * well for the common read(2)/write(2) case. However, mmap(2)
554 * is problem because it relies on being tightly integrated with the
555 * page cache. To handle this we cache mmap'ed files twice, once in
556 * the ARC and a second time in the page cache. The code is careful
557 * to keep both copies synchronized.
558 *
559 * When a file with an mmap'ed region is written to using write(2)
560 * both the data in the ARC and existing pages in the page cache
561 * are updated. For a read(2) data will be read first from the page
562 * cache then the ARC if needed. Neither a write(2) or read(2) will
563 * will ever result in new pages being added to the page cache.
564 *
565 * New pages are added to the page cache only via .readpage() which
566 * is called when the vfs needs to read a page off disk to back the
567 * virtual memory region. These pages may be modified without
568 * notifying the ARC and will be written out periodically via
569 * .writepage(). This will occur due to either a sync or the usual
570 * page aging behavior. Note because a read(2) of a mmap'ed file
571 * will always check the page cache first even when the ARC is out
572 * of date correct data will still be returned.
573 *
574 * While this implementation ensures correct behavior it does have
575 * have some drawbacks. The most obvious of which is that it
576 * increases the required memory footprint when access mmap'ed
577 * files. It also adds additional complexity to the code keeping
578 * both caches synchronized.
579 *
580 * Longer term it may be possible to cleanly resolve this wart by
581 * mapping page cache pages directly on to the ARC buffers. The
582 * Linux address space operations are flexible enough to allow
583 * selection of which pages back a particular index. The trick
584 * would be working out the details of which subsystem is in
585 * charge, the ARC, the page cache, or both. It may also prove
586 * helpful to move the ARC buffers to a scatter-gather lists
587 * rather than a vmalloc'ed region.
588 */
589 static int
590 zpl_mmap(struct file *filp, struct vm_area_struct *vma)
591 {
592 struct inode *ip = filp->f_mapping->host;
593 znode_t *zp = ITOZ(ip);
594 int error;
595 fstrans_cookie_t cookie;
596
597 cookie = spl_fstrans_mark();
598 error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
599 (size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
600 spl_fstrans_unmark(cookie);
601 if (error)
602 return (error);
603
604 error = generic_file_mmap(filp, vma);
605 if (error)
606 return (error);
607
608 mutex_enter(&zp->z_lock);
609 zp->z_is_mapped = B_TRUE;
610 mutex_exit(&zp->z_lock);
611
612 return (error);
613 }
614
615 /*
616 * Populate a page with data for the Linux page cache. This function is
617 * only used to support mmap(2). There will be an identical copy of the
618 * data in the ARC which is kept up to date via .write() and .writepage().
619 *
620 * Current this function relies on zpl_read_common() and the O_DIRECT
621 * flag to read in a page. This works but the more correct way is to
622 * update zfs_fillpage() to be Linux friendly and use that interface.
623 */
624 static int
625 zpl_readpage(struct file *filp, struct page *pp)
626 {
627 struct inode *ip;
628 struct page *pl[1];
629 int error = 0;
630 fstrans_cookie_t cookie;
631
632 ASSERT(PageLocked(pp));
633 ip = pp->mapping->host;
634 pl[0] = pp;
635
636 cookie = spl_fstrans_mark();
637 error = -zfs_getpage(ip, pl, 1);
638 spl_fstrans_unmark(cookie);
639
640 if (error) {
641 SetPageError(pp);
642 ClearPageUptodate(pp);
643 } else {
644 ClearPageError(pp);
645 SetPageUptodate(pp);
646 flush_dcache_page(pp);
647 }
648
649 unlock_page(pp);
650 return (error);
651 }
652
653 /*
654 * Populate a set of pages with data for the Linux page cache. This
655 * function will only be called for read ahead and never for demand
656 * paging. For simplicity, the code relies on read_cache_pages() to
657 * correctly lock each page for IO and call zpl_readpage().
658 */
659 static int
660 zpl_readpages(struct file *filp, struct address_space *mapping,
661 struct list_head *pages, unsigned nr_pages)
662 {
663 return (read_cache_pages(mapping, pages,
664 (filler_t *)zpl_readpage, filp));
665 }
666
667 int
668 zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
669 {
670 struct address_space *mapping = data;
671 fstrans_cookie_t cookie;
672
673 ASSERT(PageLocked(pp));
674 ASSERT(!PageWriteback(pp));
675
676 cookie = spl_fstrans_mark();
677 (void) zfs_putpage(mapping->host, pp, wbc);
678 spl_fstrans_unmark(cookie);
679
680 return (0);
681 }
682
683 static int
684 zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
685 {
686 znode_t *zp = ITOZ(mapping->host);
687 zfsvfs_t *zfsvfs = ITOZSB(mapping->host);
688 enum writeback_sync_modes sync_mode;
689 int result;
690
691 ZFS_ENTER(zfsvfs);
692 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
693 wbc->sync_mode = WB_SYNC_ALL;
694 ZFS_EXIT(zfsvfs);
695 sync_mode = wbc->sync_mode;
696
697 /*
698 * We don't want to run write_cache_pages() in SYNC mode here, because
699 * that would make putpage() wait for a single page to be committed to
700 * disk every single time, resulting in atrocious performance. Instead
701 * we run it once in non-SYNC mode so that the ZIL gets all the data,
702 * and then we commit it all in one go.
703 */
704 wbc->sync_mode = WB_SYNC_NONE;
705 result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
706 if (sync_mode != wbc->sync_mode) {
707 ZFS_ENTER(zfsvfs);
708 ZFS_VERIFY_ZP(zp);
709 if (zfsvfs->z_log != NULL)
710 zil_commit(zfsvfs->z_log, zp->z_id);
711 ZFS_EXIT(zfsvfs);
712
713 /*
714 * We need to call write_cache_pages() again (we can't just
715 * return after the commit) because the previous call in
716 * non-SYNC mode does not guarantee that we got all the dirty
717 * pages (see the implementation of write_cache_pages() for
718 * details). That being said, this is a no-op in most cases.
719 */
720 wbc->sync_mode = sync_mode;
721 result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
722 }
723 return (result);
724 }
725
726 /*
727 * Write out dirty pages to the ARC, this function is only required to
728 * support mmap(2). Mapped pages may be dirtied by memory operations
729 * which never call .write(). These dirty pages are kept in sync with
730 * the ARC buffers via this hook.
731 */
732 static int
733 zpl_writepage(struct page *pp, struct writeback_control *wbc)
734 {
735 if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
736 wbc->sync_mode = WB_SYNC_ALL;
737
738 return (zpl_putpage(pp, wbc, pp->mapping));
739 }
740
741 /*
742 * The only flag combination which matches the behavior of zfs_space()
743 * is FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
744 * flag was introduced in the 2.6.38 kernel.
745 */
746 #if defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE)
747 long
748 zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
749 {
750 int error = -EOPNOTSUPP;
751
752 #if defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE)
753 cred_t *cr = CRED();
754 flock64_t bf;
755 loff_t olen;
756 fstrans_cookie_t cookie;
757
758 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
759 return (error);
760
761 if (offset < 0 || len <= 0)
762 return (-EINVAL);
763
764 spl_inode_lock(ip);
765 olen = i_size_read(ip);
766
767 if (offset > olen) {
768 spl_inode_unlock(ip);
769 return (0);
770 }
771 if (offset + len > olen)
772 len = olen - offset;
773 bf.l_type = F_WRLCK;
774 bf.l_whence = SEEK_SET;
775 bf.l_start = offset;
776 bf.l_len = len;
777 bf.l_pid = 0;
778
779 crhold(cr);
780 cookie = spl_fstrans_mark();
781 error = -zfs_space(ip, F_FREESP, &bf, FWRITE, offset, cr);
782 spl_fstrans_unmark(cookie);
783 spl_inode_unlock(ip);
784
785 crfree(cr);
786 #endif /* defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE) */
787
788 ASSERT3S(error, <=, 0);
789 return (error);
790 }
791 #endif /* defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE) */
792
793 #ifdef HAVE_FILE_FALLOCATE
794 static long
795 zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
796 {
797 return zpl_fallocate_common(file_inode(filp),
798 mode, offset, len);
799 }
800 #endif /* HAVE_FILE_FALLOCATE */
801
802 #define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
803 #define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
804
805 static uint32_t
806 __zpl_ioctl_getflags(struct inode *ip)
807 {
808 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
809 uint32_t ioctl_flags = 0;
810
811 if (zfs_flags & ZFS_IMMUTABLE)
812 ioctl_flags |= FS_IMMUTABLE_FL;
813
814 if (zfs_flags & ZFS_APPENDONLY)
815 ioctl_flags |= FS_APPEND_FL;
816
817 if (zfs_flags & ZFS_NODUMP)
818 ioctl_flags |= FS_NODUMP_FL;
819
820 if (zfs_flags & ZFS_PROJINHERIT)
821 ioctl_flags |= ZFS_PROJINHERIT_FL;
822
823 return (ioctl_flags & ZFS_FL_USER_VISIBLE);
824 }
825
826 /*
827 * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
828 * attributes common to both Linux and Solaris are mapped.
829 */
830 static int
831 zpl_ioctl_getflags(struct file *filp, void __user *arg)
832 {
833 uint32_t flags;
834 int err;
835
836 flags = __zpl_ioctl_getflags(file_inode(filp));
837 err = copy_to_user(arg, &flags, sizeof (flags));
838
839 return (err);
840 }
841
842 /*
843 * fchange() is a helper macro to detect if we have been asked to change a
844 * flag. This is ugly, but the requirement that we do this is a consequence of
845 * how the Linux file attribute interface was designed. Another consequence is
846 * that concurrent modification of files suffers from a TOCTOU race. Neither
847 * are things we can fix without modifying the kernel-userland interface, which
848 * is outside of our jurisdiction.
849 */
850
851 #define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
852
853 static int
854 __zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva)
855 {
856 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
857 xoptattr_t *xoap;
858
859 if (ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL |
860 ZFS_PROJINHERIT_FL))
861 return (-EOPNOTSUPP);
862
863 if (ioctl_flags & ~ZFS_FL_USER_MODIFIABLE)
864 return (-EACCES);
865
866 if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
867 fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
868 !capable(CAP_LINUX_IMMUTABLE))
869 return (-EACCES);
870
871 if (!zpl_inode_owner_or_capable(ip))
872 return (-EACCES);
873
874 xva_init(xva);
875 xoap = xva_getxoptattr(xva);
876
877 XVA_SET_REQ(xva, XAT_IMMUTABLE);
878 if (ioctl_flags & FS_IMMUTABLE_FL)
879 xoap->xoa_immutable = B_TRUE;
880
881 XVA_SET_REQ(xva, XAT_APPENDONLY);
882 if (ioctl_flags & FS_APPEND_FL)
883 xoap->xoa_appendonly = B_TRUE;
884
885 XVA_SET_REQ(xva, XAT_NODUMP);
886 if (ioctl_flags & FS_NODUMP_FL)
887 xoap->xoa_nodump = B_TRUE;
888
889 XVA_SET_REQ(xva, XAT_PROJINHERIT);
890 if (ioctl_flags & ZFS_PROJINHERIT_FL)
891 xoap->xoa_projinherit = B_TRUE;
892
893 return (0);
894 }
895
896 static int
897 zpl_ioctl_setflags(struct file *filp, void __user *arg)
898 {
899 struct inode *ip = file_inode(filp);
900 uint32_t flags;
901 cred_t *cr = CRED();
902 xvattr_t xva;
903 int err;
904 fstrans_cookie_t cookie;
905
906 if (copy_from_user(&flags, arg, sizeof (flags)))
907 return (-EFAULT);
908
909 err = __zpl_ioctl_setflags(ip, flags, &xva);
910 if (err)
911 return (err);
912
913 crhold(cr);
914 cookie = spl_fstrans_mark();
915 err = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
916 spl_fstrans_unmark(cookie);
917 crfree(cr);
918
919 return (err);
920 }
921
922 static int
923 zpl_ioctl_getxattr(struct file *filp, void __user *arg)
924 {
925 zfsxattr_t fsx = { 0 };
926 struct inode *ip = file_inode(filp);
927 int err;
928
929 fsx.fsx_xflags = __zpl_ioctl_getflags(ip);
930 fsx.fsx_projid = ITOZ(ip)->z_projid;
931 err = copy_to_user(arg, &fsx, sizeof (fsx));
932
933 return (err);
934 }
935
936 static int
937 zpl_ioctl_setxattr(struct file *filp, void __user *arg)
938 {
939 struct inode *ip = file_inode(filp);
940 zfsxattr_t fsx;
941 cred_t *cr = CRED();
942 xvattr_t xva;
943 xoptattr_t *xoap;
944 int err;
945 fstrans_cookie_t cookie;
946
947 if (copy_from_user(&fsx, arg, sizeof (fsx)))
948 return (-EFAULT);
949
950 if (!zpl_is_valid_projid(fsx.fsx_projid))
951 return (-EINVAL);
952
953 err = __zpl_ioctl_setflags(ip, fsx.fsx_xflags, &xva);
954 if (err)
955 return (err);
956
957 xoap = xva_getxoptattr(&xva);
958 XVA_SET_REQ(&xva, XAT_PROJID);
959 xoap->xoa_projid = fsx.fsx_projid;
960
961 crhold(cr);
962 cookie = spl_fstrans_mark();
963 err = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
964 spl_fstrans_unmark(cookie);
965 crfree(cr);
966
967 return (err);
968 }
969
970 static long
971 zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
972 {
973 switch (cmd) {
974 case FS_IOC_GETFLAGS:
975 return (zpl_ioctl_getflags(filp, (void *)arg));
976 case FS_IOC_SETFLAGS:
977 return (zpl_ioctl_setflags(filp, (void *)arg));
978 case ZFS_IOC_FSGETXATTR:
979 return (zpl_ioctl_getxattr(filp, (void *)arg));
980 case ZFS_IOC_FSSETXATTR:
981 return (zpl_ioctl_setxattr(filp, (void *)arg));
982 default:
983 return (-ENOTTY);
984 }
985 }
986
987 #ifdef CONFIG_COMPAT
988 static long
989 zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
990 {
991 switch (cmd) {
992 case FS_IOC32_GETFLAGS:
993 cmd = FS_IOC_GETFLAGS;
994 break;
995 case FS_IOC32_SETFLAGS:
996 cmd = FS_IOC_SETFLAGS;
997 break;
998 default:
999 return (-ENOTTY);
1000 }
1001 return (zpl_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)));
1002 }
1003 #endif /* CONFIG_COMPAT */
1004
1005
1006 const struct address_space_operations zpl_address_space_operations = {
1007 .readpages = zpl_readpages,
1008 .readpage = zpl_readpage,
1009 .writepage = zpl_writepage,
1010 .writepages = zpl_writepages,
1011 .direct_IO = zpl_direct_IO,
1012 };
1013
1014 const struct file_operations zpl_file_operations = {
1015 .open = zpl_open,
1016 .release = zpl_release,
1017 .llseek = zpl_llseek,
1018 #ifdef HAVE_VFS_RW_ITERATE
1019 #ifdef HAVE_NEW_SYNC_READ
1020 .read = new_sync_read,
1021 .write = new_sync_write,
1022 #endif
1023 .read_iter = zpl_iter_read,
1024 .write_iter = zpl_iter_write,
1025 #else
1026 .read = do_sync_read,
1027 .write = do_sync_write,
1028 .aio_read = zpl_aio_read,
1029 .aio_write = zpl_aio_write,
1030 #endif
1031 .mmap = zpl_mmap,
1032 .fsync = zpl_fsync,
1033 #ifdef HAVE_FILE_AIO_FSYNC
1034 .aio_fsync = zpl_aio_fsync,
1035 #endif
1036 #ifdef HAVE_FILE_FALLOCATE
1037 .fallocate = zpl_fallocate,
1038 #endif /* HAVE_FILE_FALLOCATE */
1039 .unlocked_ioctl = zpl_ioctl,
1040 #ifdef CONFIG_COMPAT
1041 .compat_ioctl = zpl_compat_ioctl,
1042 #endif
1043 };
1044
1045 const struct file_operations zpl_dir_file_operations = {
1046 .llseek = generic_file_llseek,
1047 .read = generic_read_dir,
1048 #if defined(HAVE_VFS_ITERATE_SHARED)
1049 .iterate_shared = zpl_iterate,
1050 #elif defined(HAVE_VFS_ITERATE)
1051 .iterate = zpl_iterate,
1052 #else
1053 .readdir = zpl_readdir,
1054 #endif
1055 .fsync = zpl_fsync,
1056 .unlocked_ioctl = zpl_ioctl,
1057 #ifdef CONFIG_COMPAT
1058 .compat_ioctl = zpl_compat_ioctl,
1059 #endif
1060 };