]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zpl_file.c
Fix typo/etc in module/zfs/zfs_ctldir.c
[mirror_zfs.git] / module / zfs / zpl_file.c
CommitLineData
1efb473f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
5475aada 23 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
1efb473f
BB
24 */
25
26
f7b939bd
CIK
27#ifdef CONFIG_COMPAT
28#include <linux/compat.h>
29#endif
93ce2b4c 30#include <sys/file.h>
119a394a 31#include <sys/dmu_objset.h>
1efb473f
BB
32#include <sys/zfs_vfsops.h>
33#include <sys/zfs_vnops.h>
34#include <sys/zfs_znode.h>
9c5167d1 35#include <sys/zfs_project.h>
1efb473f
BB
36
37
126400a1
BB
38static int
39zpl_open(struct inode *ip, struct file *filp)
40{
81e97e21 41 cred_t *cr = CRED();
126400a1 42 int error;
40d06e3c 43 fstrans_cookie_t cookie;
126400a1 44
7dc71949
CC
45 error = generic_file_open(ip, filp);
46 if (error)
47 return (error);
48
81e97e21 49 crhold(cr);
40d06e3c 50 cookie = spl_fstrans_mark();
126400a1 51 error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
40d06e3c 52 spl_fstrans_unmark(cookie);
81e97e21 53 crfree(cr);
126400a1
BB
54 ASSERT3S(error, <=, 0);
55
7dc71949 56 return (error);
126400a1
BB
57}
58
59static int
60zpl_release(struct inode *ip, struct file *filp)
61{
81e97e21 62 cred_t *cr = CRED();
126400a1 63 int error;
40d06e3c 64 fstrans_cookie_t cookie;
126400a1 65
40d06e3c 66 cookie = spl_fstrans_mark();
78d7a5d7 67 if (ITOZ(ip)->z_atime_dirty)
1e8db771 68 zfs_mark_inode_dirty(ip);
78d7a5d7 69
81e97e21 70 crhold(cr);
126400a1 71 error = -zfs_close(ip, filp->f_flags, cr);
40d06e3c 72 spl_fstrans_unmark(cookie);
81e97e21 73 crfree(cr);
126400a1
BB
74 ASSERT3S(error, <=, 0);
75
76 return (error);
77}
78
1efb473f 79static int
9464b959 80zpl_iterate(struct file *filp, zpl_dir_context_t *ctx)
1efb473f 81{
81e97e21 82 cred_t *cr = CRED();
1efb473f 83 int error;
40d06e3c 84 fstrans_cookie_t cookie;
1efb473f 85
81e97e21 86 crhold(cr);
40d06e3c 87 cookie = spl_fstrans_mark();
d9c97ec0 88 error = -zfs_readdir(file_inode(filp), ctx, cr);
40d06e3c 89 spl_fstrans_unmark(cookie);
81e97e21 90 crfree(cr);
1efb473f
BB
91 ASSERT3S(error, <=, 0);
92
93 return (error);
94}
95
9baaa7de 96#if !defined(HAVE_VFS_ITERATE) && !defined(HAVE_VFS_ITERATE_SHARED)
0f37d0c8
RY
97static int
98zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
99{
9464b959
BB
100 zpl_dir_context_t ctx =
101 ZPL_DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
0f37d0c8
RY
102 int error;
103
104 error = zpl_iterate(filp, &ctx);
105 filp->f_pos = ctx.pos;
106
107 return (error);
108}
9464b959 109#endif /* !HAVE_VFS_ITERATE && !HAVE_VFS_ITERATE_SHARED */
0f37d0c8 110
adcd70bd 111#if defined(HAVE_FSYNC_WITH_DENTRY)
3117dd0b 112/*
adcd70bd
BB
113 * Linux 2.6.x - 2.6.34 API,
114 * Through 2.6.34 the nfsd kernel server would pass a NULL 'file struct *'
115 * to the fops->fsync() hook. For this reason, we must be careful not to
116 * use filp unconditionally.
117 */
118static int
119zpl_fsync(struct file *filp, struct dentry *dentry, int datasync)
120{
121 cred_t *cr = CRED();
122 int error;
40d06e3c 123 fstrans_cookie_t cookie;
adcd70bd
BB
124
125 crhold(cr);
40d06e3c 126 cookie = spl_fstrans_mark();
adcd70bd 127 error = -zfs_fsync(dentry->d_inode, datasync, cr);
40d06e3c 128 spl_fstrans_unmark(cookie);
adcd70bd
BB
129 crfree(cr);
130 ASSERT3S(error, <=, 0);
131
132 return (error);
133}
134
7ca25051 135#ifdef HAVE_FILE_AIO_FSYNC
cd3939c5
RY
136static int
137zpl_aio_fsync(struct kiocb *kiocb, int datasync)
138{
139 struct file *filp = kiocb->ki_filp;
d9c97ec0 140 return (zpl_fsync(filp, file_dentry(filp), datasync));
cd3939c5 141}
7ca25051
D
142#endif
143
adcd70bd
BB
144#elif defined(HAVE_FSYNC_WITHOUT_DENTRY)
145/*
146 * Linux 2.6.35 - 3.0 API,
147 * As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
3117dd0b
BB
148 * redundant. The dentry is still accessible via filp->f_path.dentry,
149 * and we are guaranteed that filp will never be NULL.
3117dd0b 150 */
3117dd0b
BB
151static int
152zpl_fsync(struct file *filp, int datasync)
153{
adcd70bd
BB
154 struct inode *inode = filp->f_mapping->host;
155 cred_t *cr = CRED();
156 int error;
40d06e3c 157 fstrans_cookie_t cookie;
adcd70bd
BB
158
159 crhold(cr);
40d06e3c 160 cookie = spl_fstrans_mark();
adcd70bd 161 error = -zfs_fsync(inode, datasync, cr);
40d06e3c 162 spl_fstrans_unmark(cookie);
adcd70bd
BB
163 crfree(cr);
164 ASSERT3S(error, <=, 0);
165
166 return (error);
167}
168
7ca25051 169#ifdef HAVE_FILE_AIO_FSYNC
cd3939c5
RY
170static int
171zpl_aio_fsync(struct kiocb *kiocb, int datasync)
172{
173 return (zpl_fsync(kiocb->ki_filp, datasync));
174}
7ca25051
D
175#endif
176
adcd70bd
BB
177#elif defined(HAVE_FSYNC_RANGE)
178/*
179 * Linux 3.1 - 3.x API,
180 * As of 3.1 the responsibility to call filemap_write_and_wait_range() has
181 * been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
182 * lock is no longer held by the caller, for zfs we don't require the lock
183 * to be held so we don't acquire it.
184 */
3117dd0b 185static int
adcd70bd 186zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
1efb473f 187{
adcd70bd 188 struct inode *inode = filp->f_mapping->host;
81e97e21 189 cred_t *cr = CRED();
1efb473f 190 int error;
40d06e3c 191 fstrans_cookie_t cookie;
1efb473f 192
adcd70bd
BB
193 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
194 if (error)
195 return (error);
196
81e97e21 197 crhold(cr);
40d06e3c 198 cookie = spl_fstrans_mark();
adcd70bd 199 error = -zfs_fsync(inode, datasync, cr);
40d06e3c 200 spl_fstrans_unmark(cookie);
81e97e21 201 crfree(cr);
1efb473f
BB
202 ASSERT3S(error, <=, 0);
203
204 return (error);
205}
cd3939c5 206
7ca25051 207#ifdef HAVE_FILE_AIO_FSYNC
cd3939c5
RY
208static int
209zpl_aio_fsync(struct kiocb *kiocb, int datasync)
210{
57ae8400 211 return (zpl_fsync(kiocb->ki_filp, kiocb->ki_pos, -1, datasync));
cd3939c5 212}
7ca25051
D
213#endif
214
adcd70bd
BB
215#else
216#error "Unsupported fops->fsync() implementation"
217#endif
1efb473f 218
b1b94e96
GW
219static inline int
220zfs_io_flags(struct kiocb *kiocb)
221{
222 int flags = 0;
223
224#if defined(IOCB_DSYNC)
225 if (kiocb->ki_flags & IOCB_DSYNC)
226 flags |= FDSYNC;
227#endif
228#if defined(IOCB_SYNC)
229 if (kiocb->ki_flags & IOCB_SYNC)
230 flags |= FSYNC;
231#endif
232#if defined(IOCB_APPEND)
233 if (kiocb->ki_flags & IOCB_APPEND)
234 flags |= FAPPEND;
235#endif
236#if defined(IOCB_DIRECT)
237 if (kiocb->ki_flags & IOCB_DIRECT)
238 flags |= FDIRECT;
239#endif
240 return (flags);
241}
242
5475aada 243static ssize_t
cd3939c5 244zpl_read_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
5475aada
CC
245 unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags,
246 cred_t *cr, size_t skip)
1efb473f 247{
e3dc14b8 248 ssize_t read;
1efb473f 249 uio_t uio;
cd3939c5 250 int error;
40d06e3c 251 fstrans_cookie_t cookie;
1efb473f 252
5475aada
CC
253 uio.uio_iov = iovp;
254 uio.uio_skip = skip;
cd3939c5
RY
255 uio.uio_resid = count;
256 uio.uio_iovcnt = nr_segs;
257 uio.uio_loffset = *ppos;
1efb473f
BB
258 uio.uio_limit = MAXOFFSET_T;
259 uio.uio_segflg = segment;
260
40d06e3c 261 cookie = spl_fstrans_mark();
1efb473f 262 error = -zfs_read(ip, &uio, flags, cr);
40d06e3c 263 spl_fstrans_unmark(cookie);
1efb473f
BB
264 if (error < 0)
265 return (error);
266
cd3939c5
RY
267 read = count - uio.uio_resid;
268 *ppos += read;
e3dc14b8
BB
269
270 return (read);
1efb473f
BB
271}
272
cd3939c5
RY
273inline ssize_t
274zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
275 uio_seg_t segment, int flags, cred_t *cr)
276{
277 struct iovec iov;
278
279 iov.iov_base = (void *)buf;
280 iov.iov_len = len;
281
282 return (zpl_read_common_iovec(ip, &iov, len, 1, ppos, segment,
5475aada 283 flags, cr, 0));
cd3939c5
RY
284}
285
cd3939c5 286static ssize_t
57ae8400 287zpl_iter_read_common(struct kiocb *kiocb, const struct iovec *iovp,
5475aada 288 unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip)
cd3939c5
RY
289{
290 cred_t *cr = CRED();
291 struct file *filp = kiocb->ki_filp;
cd3939c5 292 ssize_t read;
b1b94e96 293 unsigned int f_flags = filp->f_flags;
cd3939c5 294
b1b94e96 295 f_flags |= zfs_io_flags(kiocb);
cd3939c5 296 crhold(cr);
5475aada 297 read = zpl_read_common_iovec(filp->f_mapping->host, iovp, count,
b1b94e96 298 nr_segs, &kiocb->ki_pos, seg, f_flags, cr, skip);
cd3939c5
RY
299 crfree(cr);
300
0df9673f 301 file_accessed(filp);
1efb473f
BB
302 return (read);
303}
304
57ae8400
MK
305#if defined(HAVE_VFS_RW_ITERATE)
306static ssize_t
307zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
308{
5475aada
CC
309 ssize_t ret;
310 uio_seg_t seg = UIO_USERSPACE;
311 if (to->type & ITER_KVEC)
312 seg = UIO_SYSSPACE;
313 if (to->type & ITER_BVEC)
314 seg = UIO_BVEC;
315 ret = zpl_iter_read_common(kiocb, to->iov, to->nr_segs,
316 iov_iter_count(to), seg, to->iov_offset);
317 if (ret > 0)
318 iov_iter_advance(to, ret);
319 return (ret);
57ae8400
MK
320}
321#else
322static ssize_t
323zpl_aio_read(struct kiocb *kiocb, const struct iovec *iovp,
324 unsigned long nr_segs, loff_t pos)
325{
933ec999
CC
326 ssize_t ret;
327 size_t count;
328
329 ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_WRITE);
330 if (ret)
331 return (ret);
332
333 return (zpl_iter_read_common(kiocb, iovp, nr_segs, count,
5475aada 334 UIO_USERSPACE, 0));
57ae8400
MK
335}
336#endif /* HAVE_VFS_RW_ITERATE */
337
5475aada 338static ssize_t
cd3939c5 339zpl_write_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
5475aada
CC
340 unsigned long nr_segs, loff_t *ppos, uio_seg_t segment, int flags,
341 cred_t *cr, size_t skip)
1efb473f 342{
e3dc14b8 343 ssize_t wrote;
1efb473f 344 uio_t uio;
cd3939c5 345 int error;
40d06e3c 346 fstrans_cookie_t cookie;
1efb473f 347
1efdc45e
BB
348 if (flags & O_APPEND)
349 *ppos = i_size_read(ip);
350
5475aada
CC
351 uio.uio_iov = iovp;
352 uio.uio_skip = skip;
cd3939c5
RY
353 uio.uio_resid = count;
354 uio.uio_iovcnt = nr_segs;
355 uio.uio_loffset = *ppos;
1efb473f
BB
356 uio.uio_limit = MAXOFFSET_T;
357 uio.uio_segflg = segment;
358
40d06e3c 359 cookie = spl_fstrans_mark();
1efb473f 360 error = -zfs_write(ip, &uio, flags, cr);
40d06e3c 361 spl_fstrans_unmark(cookie);
1efb473f
BB
362 if (error < 0)
363 return (error);
364
cd3939c5
RY
365 wrote = count - uio.uio_resid;
366 *ppos += wrote;
e3dc14b8
BB
367
368 return (wrote);
1efb473f 369}
933ec999 370
cd3939c5
RY
371inline ssize_t
372zpl_write_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
373 uio_seg_t segment, int flags, cred_t *cr)
374{
375 struct iovec iov;
376
377 iov.iov_base = (void *)buf;
378 iov.iov_len = len;
379
380 return (zpl_write_common_iovec(ip, &iov, len, 1, ppos, segment,
5475aada 381 flags, cr, 0));
cd3939c5 382}
1efb473f 383
cd3939c5 384static ssize_t
57ae8400 385zpl_iter_write_common(struct kiocb *kiocb, const struct iovec *iovp,
5475aada 386 unsigned long nr_segs, size_t count, uio_seg_t seg, size_t skip)
cd3939c5
RY
387{
388 cred_t *cr = CRED();
389 struct file *filp = kiocb->ki_filp;
cd3939c5 390 ssize_t wrote;
b1b94e96 391 unsigned int f_flags = filp->f_flags;
cd3939c5 392
b1b94e96 393 f_flags |= zfs_io_flags(kiocb);
cd3939c5 394 crhold(cr);
5475aada 395 wrote = zpl_write_common_iovec(filp->f_mapping->host, iovp, count,
b1b94e96 396 nr_segs, &kiocb->ki_pos, seg, f_flags, cr, skip);
cd3939c5
RY
397 crfree(cr);
398
1efb473f
BB
399 return (wrote);
400}
401
57ae8400
MK
402#if defined(HAVE_VFS_RW_ITERATE)
403static ssize_t
404zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
405{
933ec999 406 size_t count;
5475aada
CC
407 ssize_t ret;
408 uio_seg_t seg = UIO_USERSPACE;
933ec999
CC
409
410#ifndef HAVE_GENERIC_WRITE_CHECKS_KIOCB
411 struct file *file = kiocb->ki_filp;
412 struct address_space *mapping = file->f_mapping;
413 struct inode *ip = mapping->host;
414 int isblk = S_ISBLK(ip->i_mode);
415
416 count = iov_iter_count(from);
417 ret = generic_write_checks(file, &kiocb->ki_pos, &count, isblk);
c7af63d6
CC
418 if (ret)
419 return (ret);
933ec999
CC
420#else
421 /*
422 * XXX - ideally this check should be in the same lock region with
423 * write operations, so that there's no TOCTTOU race when doing
424 * append and someone else grow the file.
425 */
426 ret = generic_write_checks(kiocb, from);
933ec999
CC
427 if (ret <= 0)
428 return (ret);
c7af63d6
CC
429 count = ret;
430#endif
933ec999 431
5475aada
CC
432 if (from->type & ITER_KVEC)
433 seg = UIO_SYSSPACE;
434 if (from->type & ITER_BVEC)
435 seg = UIO_BVEC;
933ec999 436
5475aada 437 ret = zpl_iter_write_common(kiocb, from->iov, from->nr_segs,
933ec999 438 count, seg, from->iov_offset);
5475aada
CC
439 if (ret > 0)
440 iov_iter_advance(from, ret);
933ec999 441
5475aada 442 return (ret);
57ae8400
MK
443}
444#else
445static ssize_t
446zpl_aio_write(struct kiocb *kiocb, const struct iovec *iovp,
447 unsigned long nr_segs, loff_t pos)
448{
933ec999
CC
449 struct file *file = kiocb->ki_filp;
450 struct address_space *mapping = file->f_mapping;
451 struct inode *ip = mapping->host;
452 int isblk = S_ISBLK(ip->i_mode);
453 size_t count;
454 ssize_t ret;
455
456 ret = generic_segment_checks(iovp, &nr_segs, &count, VERIFY_READ);
457 if (ret)
458 return (ret);
459
460 ret = generic_write_checks(file, &pos, &count, isblk);
461 if (ret)
462 return (ret);
463
464 return (zpl_iter_write_common(kiocb, iovp, nr_segs, count,
5475aada 465 UIO_USERSPACE, 0));
57ae8400
MK
466}
467#endif /* HAVE_VFS_RW_ITERATE */
468
a584ef26
BB
469#if defined(HAVE_VFS_RW_ITERATE)
470static ssize_t
471zpl_direct_IO_impl(int rw, struct kiocb *kiocb, struct iov_iter *iter)
472{
473 if (rw == WRITE)
474 return (zpl_iter_write(kiocb, iter));
475 else
476 return (zpl_iter_read(kiocb, iter));
477}
478#if defined(HAVE_VFS_DIRECT_IO_ITER)
479static ssize_t
480zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter)
481{
482 return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
483}
484#elif defined(HAVE_VFS_DIRECT_IO_ITER_OFFSET)
485static ssize_t
486zpl_direct_IO(struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
487{
488 ASSERT3S(pos, ==, kiocb->ki_pos);
489 return (zpl_direct_IO_impl(iov_iter_rw(iter), kiocb, iter));
490}
491#elif defined(HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET)
492static ssize_t
493zpl_direct_IO(int rw, struct kiocb *kiocb, struct iov_iter *iter, loff_t pos)
494{
495 ASSERT3S(pos, ==, kiocb->ki_pos);
496 return (zpl_direct_IO_impl(rw, kiocb, iter));
497}
498#else
499#error "Unknown direct IO interface"
500#endif
501
502#else
503
504#if defined(HAVE_VFS_DIRECT_IO_IOVEC)
505static ssize_t
506zpl_direct_IO(int rw, struct kiocb *kiocb, const struct iovec *iovp,
507 loff_t pos, unsigned long nr_segs)
508{
509 if (rw == WRITE)
510 return (zpl_aio_write(kiocb, iovp, nr_segs, pos));
511 else
512 return (zpl_aio_read(kiocb, iovp, nr_segs, pos));
513}
514#else
515#error "Unknown direct IO interface"
516#endif
517
518#endif /* HAVE_VFS_RW_ITERATE */
519
802e7b5f
LD
520static loff_t
521zpl_llseek(struct file *filp, loff_t offset, int whence)
522{
523#if defined(SEEK_HOLE) && defined(SEEK_DATA)
40d06e3c
TC
524 fstrans_cookie_t cookie;
525
802e7b5f
LD
526 if (whence == SEEK_DATA || whence == SEEK_HOLE) {
527 struct inode *ip = filp->f_mapping->host;
528 loff_t maxbytes = ip->i_sb->s_maxbytes;
529 loff_t error;
530
9baaa7de 531 spl_inode_lock_shared(ip);
40d06e3c 532 cookie = spl_fstrans_mark();
802e7b5f 533 error = -zfs_holey(ip, whence, &offset);
40d06e3c 534 spl_fstrans_unmark(cookie);
802e7b5f
LD
535 if (error == 0)
536 error = lseek_execute(filp, ip, offset, maxbytes);
9baaa7de 537 spl_inode_unlock_shared(ip);
802e7b5f
LD
538
539 return (error);
540 }
541#endif /* SEEK_HOLE && SEEK_DATA */
542
d1d7e268 543 return (generic_file_llseek(filp, offset, whence));
802e7b5f
LD
544}
545
c0d35759
BB
546/*
547 * It's worth taking a moment to describe how mmap is implemented
548 * for zfs because it differs considerably from other Linux filesystems.
549 * However, this issue is handled the same way under OpenSolaris.
550 *
551 * The issue is that by design zfs bypasses the Linux page cache and
552 * leaves all caching up to the ARC. This has been shown to work
553 * well for the common read(2)/write(2) case. However, mmap(2)
554 * is problem because it relies on being tightly integrated with the
555 * page cache. To handle this we cache mmap'ed files twice, once in
556 * the ARC and a second time in the page cache. The code is careful
557 * to keep both copies synchronized.
558 *
559 * When a file with an mmap'ed region is written to using write(2)
560 * both the data in the ARC and existing pages in the page cache
561 * are updated. For a read(2) data will be read first from the page
562 * cache then the ARC if needed. Neither a write(2) or read(2) will
563 * will ever result in new pages being added to the page cache.
564 *
565 * New pages are added to the page cache only via .readpage() which
566 * is called when the vfs needs to read a page off disk to back the
567 * virtual memory region. These pages may be modified without
568 * notifying the ARC and will be written out periodically via
569 * .writepage(). This will occur due to either a sync or the usual
570 * page aging behavior. Note because a read(2) of a mmap'ed file
571 * will always check the page cache first even when the ARC is out
572 * of date correct data will still be returned.
573 *
574 * While this implementation ensures correct behavior it does have
575 * have some drawbacks. The most obvious of which is that it
576 * increases the required memory footprint when access mmap'ed
577 * files. It also adds additional complexity to the code keeping
578 * both caches synchronized.
579 *
580 * Longer term it may be possible to cleanly resolve this wart by
581 * mapping page cache pages directly on to the ARC buffers. The
582 * Linux address space operations are flexible enough to allow
583 * selection of which pages back a particular index. The trick
584 * would be working out the details of which subsystem is in
585 * charge, the ARC, the page cache, or both. It may also prove
586 * helpful to move the ARC buffers to a scatter-gather lists
587 * rather than a vmalloc'ed region.
588 */
589static int
590zpl_mmap(struct file *filp, struct vm_area_struct *vma)
591{
e2e7aa2d
BB
592 struct inode *ip = filp->f_mapping->host;
593 znode_t *zp = ITOZ(ip);
c0d35759 594 int error;
40d06e3c 595 fstrans_cookie_t cookie;
c0d35759 596
40d06e3c 597 cookie = spl_fstrans_mark();
e2e7aa2d
BB
598 error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
599 (size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
40d06e3c 600 spl_fstrans_unmark(cookie);
e2e7aa2d
BB
601 if (error)
602 return (error);
603
c0d35759
BB
604 error = generic_file_mmap(filp, vma);
605 if (error)
606 return (error);
607
608 mutex_enter(&zp->z_lock);
18a2485f 609 zp->z_is_mapped = B_TRUE;
c0d35759
BB
610 mutex_exit(&zp->z_lock);
611
612 return (error);
613}
614
615/*
616 * Populate a page with data for the Linux page cache. This function is
617 * only used to support mmap(2). There will be an identical copy of the
618 * data in the ARC which is kept up to date via .write() and .writepage().
619 *
620 * Current this function relies on zpl_read_common() and the O_DIRECT
621 * flag to read in a page. This works but the more correct way is to
622 * update zfs_fillpage() to be Linux friendly and use that interface.
623 */
624static int
625zpl_readpage(struct file *filp, struct page *pp)
626{
627 struct inode *ip;
dde471ef 628 struct page *pl[1];
c0d35759 629 int error = 0;
40d06e3c 630 fstrans_cookie_t cookie;
c0d35759
BB
631
632 ASSERT(PageLocked(pp));
633 ip = pp->mapping->host;
dde471ef 634 pl[0] = pp;
c0d35759 635
40d06e3c 636 cookie = spl_fstrans_mark();
dde471ef 637 error = -zfs_getpage(ip, pl, 1);
40d06e3c 638 spl_fstrans_unmark(cookie);
c0d35759 639
dde471ef
PJ
640 if (error) {
641 SetPageError(pp);
642 ClearPageUptodate(pp);
643 } else {
644 ClearPageError(pp);
645 SetPageUptodate(pp);
646 flush_dcache_page(pp);
647 }
c0d35759 648
dde471ef 649 unlock_page(pp);
d1d7e268 650 return (error);
dde471ef 651}
c0d35759 652
f3ab88d6
BB
653/*
654 * Populate a set of pages with data for the Linux page cache. This
655 * function will only be called for read ahead and never for demand
656 * paging. For simplicity, the code relies on read_cache_pages() to
657 * correctly lock each page for IO and call zpl_readpage().
658 */
659static int
660zpl_readpages(struct file *filp, struct address_space *mapping,
4ea3f864 661 struct list_head *pages, unsigned nr_pages)
f3ab88d6 662{
95d9fd02
BB
663 return (read_cache_pages(mapping, pages,
664 (filler_t *)zpl_readpage, filp));
f3ab88d6
BB
665}
666
dde471ef
PJ
667int
668zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
669{
3c0e5c0f 670 struct address_space *mapping = data;
92119cc2 671 fstrans_cookie_t cookie;
3c0e5c0f
BB
672
673 ASSERT(PageLocked(pp));
674 ASSERT(!PageWriteback(pp));
8630650a 675
92119cc2 676 cookie = spl_fstrans_mark();
62c4165a 677 (void) zfs_putpage(mapping->host, pp, wbc);
92119cc2 678 spl_fstrans_unmark(cookie);
c0d35759 679
3c0e5c0f 680 return (0);
dde471ef 681}
c0d35759 682
dde471ef
PJ
683static int
684zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
685{
119a394a 686 znode_t *zp = ITOZ(mapping->host);
0037b49e 687 zfsvfs_t *zfsvfs = ITOZSB(mapping->host);
119a394a
ED
688 enum writeback_sync_modes sync_mode;
689 int result;
690
0037b49e
BB
691 ZFS_ENTER(zfsvfs);
692 if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
119a394a 693 wbc->sync_mode = WB_SYNC_ALL;
0037b49e 694 ZFS_EXIT(zfsvfs);
119a394a
ED
695 sync_mode = wbc->sync_mode;
696
697 /*
698 * We don't want to run write_cache_pages() in SYNC mode here, because
699 * that would make putpage() wait for a single page to be committed to
700 * disk every single time, resulting in atrocious performance. Instead
701 * we run it once in non-SYNC mode so that the ZIL gets all the data,
702 * and then we commit it all in one go.
703 */
704 wbc->sync_mode = WB_SYNC_NONE;
705 result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
706 if (sync_mode != wbc->sync_mode) {
0037b49e 707 ZFS_ENTER(zfsvfs);
119a394a 708 ZFS_VERIFY_ZP(zp);
0037b49e
BB
709 if (zfsvfs->z_log != NULL)
710 zil_commit(zfsvfs->z_log, zp->z_id);
711 ZFS_EXIT(zfsvfs);
119a394a
ED
712
713 /*
714 * We need to call write_cache_pages() again (we can't just
715 * return after the commit) because the previous call in
716 * non-SYNC mode does not guarantee that we got all the dirty
717 * pages (see the implementation of write_cache_pages() for
718 * details). That being said, this is a no-op in most cases.
719 */
720 wbc->sync_mode = sync_mode;
721 result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
722 }
723 return (result);
c0d35759
BB
724}
725
726/*
727 * Write out dirty pages to the ARC, this function is only required to
728 * support mmap(2). Mapped pages may be dirtied by memory operations
729 * which never call .write(). These dirty pages are kept in sync with
730 * the ARC buffers via this hook.
c0d35759
BB
731 */
732static int
733zpl_writepage(struct page *pp, struct writeback_control *wbc)
734{
119a394a
ED
735 if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
736 wbc->sync_mode = WB_SYNC_ALL;
737
738 return (zpl_putpage(pp, wbc, pp->mapping));
c0d35759
BB
739}
740
cb2d1901
ED
741/*
742 * The only flag combination which matches the behavior of zfs_space()
223df016
TC
743 * is FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
744 * flag was introduced in the 2.6.38 kernel.
cb2d1901 745 */
223df016 746#if defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE)
cb2d1901
ED
747long
748zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
749{
cb2d1901
ED
750 int error = -EOPNOTSUPP;
751
223df016
TC
752#if defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE)
753 cred_t *cr = CRED();
754 flock64_t bf;
755 loff_t olen;
40d06e3c 756 fstrans_cookie_t cookie;
223df016
TC
757
758 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
759 return (error);
cb2d1901 760
223df016
TC
761 if (offset < 0 || len <= 0)
762 return (-EINVAL);
cb2d1901 763
223df016
TC
764 spl_inode_lock(ip);
765 olen = i_size_read(ip);
cb2d1901 766
223df016
TC
767 if (offset > olen) {
768 spl_inode_unlock(ip);
769 return (0);
cb2d1901 770 }
223df016
TC
771 if (offset + len > olen)
772 len = olen - offset;
773 bf.l_type = F_WRLCK;
126d0fa7 774 bf.l_whence = SEEK_SET;
223df016
TC
775 bf.l_start = offset;
776 bf.l_len = len;
777 bf.l_pid = 0;
778
9fa4db44 779 crhold(cr);
40d06e3c 780 cookie = spl_fstrans_mark();
223df016 781 error = -zfs_space(ip, F_FREESP, &bf, FWRITE, offset, cr);
40d06e3c 782 spl_fstrans_unmark(cookie);
223df016 783 spl_inode_unlock(ip);
cb2d1901
ED
784
785 crfree(cr);
223df016 786#endif /* defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE) */
cb2d1901
ED
787
788 ASSERT3S(error, <=, 0);
789 return (error);
790}
223df016 791#endif /* defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE) */
cb2d1901
ED
792
793#ifdef HAVE_FILE_FALLOCATE
794static long
795zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
796{
d9c97ec0 797 return zpl_fallocate_common(file_inode(filp),
cb2d1901
ED
798 mode, offset, len);
799}
800#endif /* HAVE_FILE_FALLOCATE */
801
9c5167d1
NF
802#define ZFS_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | ZFS_PROJINHERIT_FL)
803#define ZFS_FL_USER_MODIFIABLE (FS_FL_USER_MODIFIABLE | ZFS_PROJINHERIT_FL)
804
805static uint32_t
806__zpl_ioctl_getflags(struct inode *ip)
9d317793 807{
9d317793 808 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
9c5167d1 809 uint32_t ioctl_flags = 0;
9d317793
RY
810
811 if (zfs_flags & ZFS_IMMUTABLE)
812 ioctl_flags |= FS_IMMUTABLE_FL;
813
814 if (zfs_flags & ZFS_APPENDONLY)
815 ioctl_flags |= FS_APPEND_FL;
816
817 if (zfs_flags & ZFS_NODUMP)
818 ioctl_flags |= FS_NODUMP_FL;
819
9c5167d1
NF
820 if (zfs_flags & ZFS_PROJINHERIT)
821 ioctl_flags |= ZFS_PROJINHERIT_FL;
9d317793 822
9c5167d1
NF
823 return (ioctl_flags & ZFS_FL_USER_VISIBLE);
824}
9d317793 825
9c5167d1
NF
826/*
827 * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
828 * attributes common to both Linux and Solaris are mapped.
829 */
830static int
831zpl_ioctl_getflags(struct file *filp, void __user *arg)
832{
833 uint32_t flags;
834 int err;
835
836 flags = __zpl_ioctl_getflags(file_inode(filp));
837 err = copy_to_user(arg, &flags, sizeof (flags));
838
839 return (err);
9d317793
RY
840}
841
842/*
843 * fchange() is a helper macro to detect if we have been asked to change a
844 * flag. This is ugly, but the requirement that we do this is a consequence of
845 * how the Linux file attribute interface was designed. Another consequence is
846 * that concurrent modification of files suffers from a TOCTOU race. Neither
847 * are things we can fix without modifying the kernel-userland interface, which
848 * is outside of our jurisdiction.
849 */
850
c360af54 851#define fchange(f0, f1, b0, b1) (!((f0) & (b0)) != !((f1) & (b1)))
9d317793
RY
852
853static int
9c5167d1 854__zpl_ioctl_setflags(struct inode *ip, uint32_t ioctl_flags, xvattr_t *xva)
9d317793 855{
9c5167d1
NF
856 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
857 xoptattr_t *xoap;
9d317793 858
9c5167d1
NF
859 if (ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL |
860 ZFS_PROJINHERIT_FL))
9d317793
RY
861 return (-EOPNOTSUPP);
862
9c5167d1 863 if (ioctl_flags & ~ZFS_FL_USER_MODIFIABLE)
9d317793
RY
864 return (-EACCES);
865
866 if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
867 fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
868 !capable(CAP_LINUX_IMMUTABLE))
869 return (-EACCES);
870
871 if (!zpl_inode_owner_or_capable(ip))
872 return (-EACCES);
873
9c5167d1
NF
874 xva_init(xva);
875 xoap = xva_getxoptattr(xva);
9d317793 876
9c5167d1 877 XVA_SET_REQ(xva, XAT_IMMUTABLE);
9d317793
RY
878 if (ioctl_flags & FS_IMMUTABLE_FL)
879 xoap->xoa_immutable = B_TRUE;
880
9c5167d1 881 XVA_SET_REQ(xva, XAT_APPENDONLY);
9d317793
RY
882 if (ioctl_flags & FS_APPEND_FL)
883 xoap->xoa_appendonly = B_TRUE;
884
9c5167d1 885 XVA_SET_REQ(xva, XAT_NODUMP);
9d317793
RY
886 if (ioctl_flags & FS_NODUMP_FL)
887 xoap->xoa_nodump = B_TRUE;
888
9c5167d1
NF
889 XVA_SET_REQ(xva, XAT_PROJINHERIT);
890 if (ioctl_flags & ZFS_PROJINHERIT_FL)
891 xoap->xoa_projinherit = B_TRUE;
892
893 return (0);
894}
895
896static int
897zpl_ioctl_setflags(struct file *filp, void __user *arg)
898{
899 struct inode *ip = file_inode(filp);
900 uint32_t flags;
901 cred_t *cr = CRED();
902 xvattr_t xva;
903 int err;
904 fstrans_cookie_t cookie;
905
906 if (copy_from_user(&flags, arg, sizeof (flags)))
907 return (-EFAULT);
908
909 err = __zpl_ioctl_setflags(ip, flags, &xva);
910 if (err)
911 return (err);
912
9d317793 913 crhold(cr);
40d06e3c 914 cookie = spl_fstrans_mark();
9c5167d1 915 err = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
40d06e3c 916 spl_fstrans_unmark(cookie);
9d317793
RY
917 crfree(cr);
918
9c5167d1
NF
919 return (err);
920}
921
922static int
923zpl_ioctl_getxattr(struct file *filp, void __user *arg)
924{
925 zfsxattr_t fsx = { 0 };
926 struct inode *ip = file_inode(filp);
927 int err;
928
929 fsx.fsx_xflags = __zpl_ioctl_getflags(ip);
930 fsx.fsx_projid = ITOZ(ip)->z_projid;
931 err = copy_to_user(arg, &fsx, sizeof (fsx));
932
933 return (err);
934}
935
936static int
937zpl_ioctl_setxattr(struct file *filp, void __user *arg)
938{
939 struct inode *ip = file_inode(filp);
940 zfsxattr_t fsx;
941 cred_t *cr = CRED();
942 xvattr_t xva;
943 xoptattr_t *xoap;
944 int err;
945 fstrans_cookie_t cookie;
946
947 if (copy_from_user(&fsx, arg, sizeof (fsx)))
948 return (-EFAULT);
949
950 if (!zpl_is_valid_projid(fsx.fsx_projid))
951 return (-EINVAL);
952
953 err = __zpl_ioctl_setflags(ip, fsx.fsx_xflags, &xva);
954 if (err)
955 return (err);
956
957 xoap = xva_getxoptattr(&xva);
958 XVA_SET_REQ(&xva, XAT_PROJID);
959 xoap->xoa_projid = fsx.fsx_projid;
960
961 crhold(cr);
962 cookie = spl_fstrans_mark();
963 err = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
964 spl_fstrans_unmark(cookie);
965 crfree(cr);
966
967 return (err);
9d317793
RY
968}
969
88c28395
BB
970static long
971zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
972{
973 switch (cmd) {
9d317793
RY
974 case FS_IOC_GETFLAGS:
975 return (zpl_ioctl_getflags(filp, (void *)arg));
976 case FS_IOC_SETFLAGS:
977 return (zpl_ioctl_setflags(filp, (void *)arg));
9c5167d1
NF
978 case ZFS_IOC_FSGETXATTR:
979 return (zpl_ioctl_getxattr(filp, (void *)arg));
980 case ZFS_IOC_FSSETXATTR:
981 return (zpl_ioctl_setxattr(filp, (void *)arg));
88c28395
BB
982 default:
983 return (-ENOTTY);
984 }
985}
986
987#ifdef CONFIG_COMPAT
988static long
989zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
990{
f7b939bd
CIK
991 switch (cmd) {
992 case FS_IOC32_GETFLAGS:
993 cmd = FS_IOC_GETFLAGS;
994 break;
995 case FS_IOC32_SETFLAGS:
996 cmd = FS_IOC_SETFLAGS;
997 break;
998 default:
999 return (-ENOTTY);
1000 }
1001 return (zpl_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)));
88c28395
BB
1002}
1003#endif /* CONFIG_COMPAT */
1004
1005
1efb473f 1006const struct address_space_operations zpl_address_space_operations = {
dde471ef 1007 .readpages = zpl_readpages,
1efb473f
BB
1008 .readpage = zpl_readpage,
1009 .writepage = zpl_writepage,
d1d7e268 1010 .writepages = zpl_writepages,
a584ef26 1011 .direct_IO = zpl_direct_IO,
1efb473f
BB
1012};
1013
1014const struct file_operations zpl_file_operations = {
126400a1
BB
1015 .open = zpl_open,
1016 .release = zpl_release,
802e7b5f 1017 .llseek = zpl_llseek,
57ae8400 1018#ifdef HAVE_VFS_RW_ITERATE
7a789346
CC
1019#ifdef HAVE_NEW_SYNC_READ
1020 .read = new_sync_read,
1021 .write = new_sync_write,
1022#endif
57ae8400
MK
1023 .read_iter = zpl_iter_read,
1024 .write_iter = zpl_iter_write,
1025#else
7a789346
CC
1026 .read = do_sync_read,
1027 .write = do_sync_write,
cd3939c5
RY
1028 .aio_read = zpl_aio_read,
1029 .aio_write = zpl_aio_write,
57ae8400 1030#endif
c0d35759 1031 .mmap = zpl_mmap,
1efb473f 1032 .fsync = zpl_fsync,
7ca25051 1033#ifdef HAVE_FILE_AIO_FSYNC
cd3939c5 1034 .aio_fsync = zpl_aio_fsync,
7ca25051 1035#endif
cb2d1901 1036#ifdef HAVE_FILE_FALLOCATE
d1d7e268 1037 .fallocate = zpl_fallocate,
cb2d1901 1038#endif /* HAVE_FILE_FALLOCATE */
d1d7e268 1039 .unlocked_ioctl = zpl_ioctl,
88c28395 1040#ifdef CONFIG_COMPAT
d1d7e268 1041 .compat_ioctl = zpl_compat_ioctl,
88c28395 1042#endif
1efb473f
BB
1043};
1044
1045const struct file_operations zpl_dir_file_operations = {
1046 .llseek = generic_file_llseek,
1047 .read = generic_read_dir,
9464b959 1048#if defined(HAVE_VFS_ITERATE_SHARED)
9baaa7de
CC
1049 .iterate_shared = zpl_iterate,
1050#elif defined(HAVE_VFS_ITERATE)
0f37d0c8
RY
1051 .iterate = zpl_iterate,
1052#else
1efb473f 1053 .readdir = zpl_readdir,
0f37d0c8 1054#endif
1efb473f 1055 .fsync = zpl_fsync,
88c28395
BB
1056 .unlocked_ioctl = zpl_ioctl,
1057#ifdef CONFIG_COMPAT
1058 .compat_ioctl = zpl_compat_ioctl,
1059#endif
1efb473f 1060};