]> git.proxmox.com Git - mirror_zfs-debian.git/blame - module/zfs/zpl_file.c
Imported Upstream version 0.6.4.2
[mirror_zfs-debian.git] / module / zfs / zpl_file.c
CommitLineData
1efb473f
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2011, Lawrence Livermore National Security, LLC.
23 */
24
25
a08ee875 26#include <sys/dmu_objset.h>
1efb473f
BB
27#include <sys/zfs_vfsops.h>
28#include <sys/zfs_vnops.h>
29#include <sys/zfs_znode.h>
30#include <sys/zpl.h>
31
32
126400a1
BB
33static int
34zpl_open(struct inode *ip, struct file *filp)
35{
81e97e21 36 cred_t *cr = CRED();
126400a1 37 int error;
ea04106b 38 fstrans_cookie_t cookie;
126400a1 39
a08ee875
LG
40 error = generic_file_open(ip, filp);
41 if (error)
42 return (error);
43
81e97e21 44 crhold(cr);
ea04106b 45 cookie = spl_fstrans_mark();
126400a1 46 error = -zfs_open(ip, filp->f_mode, filp->f_flags, cr);
ea04106b 47 spl_fstrans_unmark(cookie);
81e97e21 48 crfree(cr);
126400a1
BB
49 ASSERT3S(error, <=, 0);
50
a08ee875 51 return (error);
126400a1
BB
52}
53
54static int
55zpl_release(struct inode *ip, struct file *filp)
56{
81e97e21 57 cred_t *cr = CRED();
126400a1 58 int error;
ea04106b 59 fstrans_cookie_t cookie;
126400a1 60
ea04106b 61 cookie = spl_fstrans_mark();
c06d4368 62 if (ITOZ(ip)->z_atime_dirty)
ea04106b 63 zfs_mark_inode_dirty(ip);
c06d4368 64
81e97e21 65 crhold(cr);
126400a1 66 error = -zfs_close(ip, filp->f_flags, cr);
ea04106b 67 spl_fstrans_unmark(cookie);
81e97e21 68 crfree(cr);
126400a1
BB
69 ASSERT3S(error, <=, 0);
70
71 return (error);
72}
73
1efb473f 74static int
c06d4368 75zpl_iterate(struct file *filp, struct dir_context *ctx)
1efb473f
BB
76{
77 struct dentry *dentry = filp->f_path.dentry;
81e97e21 78 cred_t *cr = CRED();
1efb473f 79 int error;
ea04106b 80 fstrans_cookie_t cookie;
1efb473f 81
81e97e21 82 crhold(cr);
ea04106b 83 cookie = spl_fstrans_mark();
c06d4368 84 error = -zfs_readdir(dentry->d_inode, ctx, cr);
ea04106b 85 spl_fstrans_unmark(cookie);
81e97e21 86 crfree(cr);
1efb473f
BB
87 ASSERT3S(error, <=, 0);
88
89 return (error);
90}
91
c06d4368
AX
92#if !defined(HAVE_VFS_ITERATE)
93static int
94zpl_readdir(struct file *filp, void *dirent, filldir_t filldir)
95{
96 struct dir_context ctx = DIR_CONTEXT_INIT(dirent, filldir, filp->f_pos);
97 int error;
98
99 error = zpl_iterate(filp, &ctx);
100 filp->f_pos = ctx.pos;
101
102 return (error);
103}
104#endif /* HAVE_VFS_ITERATE */
105
adcd70bd 106#if defined(HAVE_FSYNC_WITH_DENTRY)
3117dd0b 107/*
adcd70bd
BB
108 * Linux 2.6.x - 2.6.34 API,
109 * Through 2.6.34 the nfsd kernel server would pass a NULL 'file struct *'
110 * to the fops->fsync() hook. For this reason, we must be careful not to
111 * use filp unconditionally.
112 */
113static int
114zpl_fsync(struct file *filp, struct dentry *dentry, int datasync)
115{
116 cred_t *cr = CRED();
117 int error;
ea04106b 118 fstrans_cookie_t cookie;
adcd70bd
BB
119
120 crhold(cr);
ea04106b 121 cookie = spl_fstrans_mark();
adcd70bd 122 error = -zfs_fsync(dentry->d_inode, datasync, cr);
ea04106b 123 spl_fstrans_unmark(cookie);
adcd70bd
BB
124 crfree(cr);
125 ASSERT3S(error, <=, 0);
126
127 return (error);
128}
129
ea04106b
AX
130static int
131zpl_aio_fsync(struct kiocb *kiocb, int datasync)
132{
133 struct file *filp = kiocb->ki_filp;
134 return (zpl_fsync(filp, filp->f_path.dentry, datasync));
135}
adcd70bd
BB
136#elif defined(HAVE_FSYNC_WITHOUT_DENTRY)
137/*
138 * Linux 2.6.35 - 3.0 API,
139 * As of 2.6.35 the dentry argument to the fops->fsync() hook was deemed
3117dd0b
BB
140 * redundant. The dentry is still accessible via filp->f_path.dentry,
141 * and we are guaranteed that filp will never be NULL.
3117dd0b 142 */
3117dd0b
BB
143static int
144zpl_fsync(struct file *filp, int datasync)
145{
adcd70bd
BB
146 struct inode *inode = filp->f_mapping->host;
147 cred_t *cr = CRED();
148 int error;
ea04106b 149 fstrans_cookie_t cookie;
adcd70bd
BB
150
151 crhold(cr);
ea04106b 152 cookie = spl_fstrans_mark();
adcd70bd 153 error = -zfs_fsync(inode, datasync, cr);
ea04106b 154 spl_fstrans_unmark(cookie);
adcd70bd
BB
155 crfree(cr);
156 ASSERT3S(error, <=, 0);
157
158 return (error);
159}
160
ea04106b
AX
161static int
162zpl_aio_fsync(struct kiocb *kiocb, int datasync)
163{
164 return (zpl_fsync(kiocb->ki_filp, datasync));
165}
adcd70bd
BB
166#elif defined(HAVE_FSYNC_RANGE)
167/*
168 * Linux 3.1 - 3.x API,
169 * As of 3.1 the responsibility to call filemap_write_and_wait_range() has
170 * been pushed down in to the .fsync() vfs hook. Additionally, the i_mutex
171 * lock is no longer held by the caller, for zfs we don't require the lock
172 * to be held so we don't acquire it.
173 */
3117dd0b 174static int
adcd70bd 175zpl_fsync(struct file *filp, loff_t start, loff_t end, int datasync)
1efb473f 176{
adcd70bd 177 struct inode *inode = filp->f_mapping->host;
81e97e21 178 cred_t *cr = CRED();
1efb473f 179 int error;
ea04106b 180 fstrans_cookie_t cookie;
1efb473f 181
adcd70bd
BB
182 error = filemap_write_and_wait_range(inode->i_mapping, start, end);
183 if (error)
184 return (error);
185
81e97e21 186 crhold(cr);
ea04106b 187 cookie = spl_fstrans_mark();
adcd70bd 188 error = -zfs_fsync(inode, datasync, cr);
ea04106b 189 spl_fstrans_unmark(cookie);
81e97e21 190 crfree(cr);
1efb473f
BB
191 ASSERT3S(error, <=, 0);
192
193 return (error);
194}
ea04106b
AX
195
196static int
197zpl_aio_fsync(struct kiocb *kiocb, int datasync)
198{
199 return (zpl_fsync(kiocb->ki_filp, kiocb->ki_pos, -1, datasync));
200}
adcd70bd
BB
201#else
202#error "Unsupported fops->fsync() implementation"
203#endif
1efb473f 204
ea04106b
AX
205static inline ssize_t
206zpl_read_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
207 unsigned long nr_segs, loff_t *ppos, uio_seg_t segment,
208 int flags, cred_t *cr)
1efb473f 209{
a08ee875 210 ssize_t read;
1efb473f 211 uio_t uio;
ea04106b
AX
212 int error;
213 fstrans_cookie_t cookie;
1efb473f 214
ea04106b
AX
215 uio.uio_iov = (struct iovec *)iovp;
216 uio.uio_resid = count;
217 uio.uio_iovcnt = nr_segs;
218 uio.uio_loffset = *ppos;
1efb473f
BB
219 uio.uio_limit = MAXOFFSET_T;
220 uio.uio_segflg = segment;
221
ea04106b 222 cookie = spl_fstrans_mark();
1efb473f 223 error = -zfs_read(ip, &uio, flags, cr);
ea04106b 224 spl_fstrans_unmark(cookie);
1efb473f
BB
225 if (error < 0)
226 return (error);
227
ea04106b
AX
228 read = count - uio.uio_resid;
229 *ppos += read;
a08ee875
LG
230 task_io_account_read(read);
231
232 return (read);
1efb473f
BB
233}
234
ea04106b
AX
235inline ssize_t
236zpl_read_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
237 uio_seg_t segment, int flags, cred_t *cr)
238{
239 struct iovec iov;
240
241 iov.iov_base = (void *)buf;
242 iov.iov_len = len;
243
244 return (zpl_read_common_iovec(ip, &iov, len, 1, ppos, segment,
245 flags, cr));
246}
247
1efb473f
BB
248static ssize_t
249zpl_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
250{
81e97e21 251 cred_t *cr = CRED();
1efb473f
BB
252 ssize_t read;
253
81e97e21 254 crhold(cr);
ea04106b 255 read = zpl_read_common(filp->f_mapping->host, buf, len, ppos,
1efb473f 256 UIO_USERSPACE, filp->f_flags, cr);
81e97e21 257 crfree(cr);
1efb473f 258
ea04106b
AX
259 return (read);
260}
261
262static ssize_t
263zpl_iter_read_common(struct kiocb *kiocb, const struct iovec *iovp,
264 unsigned long nr_segs, size_t count)
265{
266 cred_t *cr = CRED();
267 struct file *filp = kiocb->ki_filp;
268 ssize_t read;
269 size_t alloc_size = sizeof (struct iovec) * nr_segs;
270 struct iovec *iov_tmp = kmem_alloc(alloc_size, KM_SLEEP);
271 bcopy(iovp, iov_tmp, alloc_size);
272
273 ASSERT(iovp);
274
275 crhold(cr);
276 read = zpl_read_common_iovec(filp->f_mapping->host, iov_tmp, count,
277 nr_segs, &kiocb->ki_pos, UIO_USERSPACE, filp->f_flags, cr);
278 crfree(cr);
279
280 kmem_free(iov_tmp, alloc_size);
1efb473f 281
1efb473f
BB
282 return (read);
283}
284
ea04106b
AX
285#if defined(HAVE_VFS_RW_ITERATE)
286static ssize_t
287zpl_iter_read(struct kiocb *kiocb, struct iov_iter *to)
288{
289 return (zpl_iter_read_common(kiocb, to->iov, to->nr_segs,
290 iov_iter_count(to)));
291}
292#else
293static ssize_t
294zpl_aio_read(struct kiocb *kiocb, const struct iovec *iovp,
295 unsigned long nr_segs, loff_t pos)
296{
297 return (zpl_iter_read_common(kiocb, iovp, nr_segs, kiocb->ki_nbytes));
298}
299#endif /* HAVE_VFS_RW_ITERATE */
300
301static inline ssize_t
302zpl_write_common_iovec(struct inode *ip, const struct iovec *iovp, size_t count,
303 unsigned long nr_segs, loff_t *ppos, uio_seg_t segment,
304 int flags, cred_t *cr)
1efb473f 305{
a08ee875 306 ssize_t wrote;
1efb473f 307 uio_t uio;
ea04106b
AX
308 int error;
309 fstrans_cookie_t cookie;
1efb473f 310
ea04106b
AX
311 if (flags & O_APPEND)
312 *ppos = i_size_read(ip);
1efb473f 313
ea04106b
AX
314 uio.uio_iov = (struct iovec *)iovp;
315 uio.uio_resid = count;
316 uio.uio_iovcnt = nr_segs;
317 uio.uio_loffset = *ppos;
1efb473f
BB
318 uio.uio_limit = MAXOFFSET_T;
319 uio.uio_segflg = segment;
320
ea04106b 321 cookie = spl_fstrans_mark();
1efb473f 322 error = -zfs_write(ip, &uio, flags, cr);
ea04106b 323 spl_fstrans_unmark(cookie);
1efb473f
BB
324 if (error < 0)
325 return (error);
326
ea04106b
AX
327 wrote = count - uio.uio_resid;
328 *ppos += wrote;
a08ee875
LG
329 task_io_account_write(wrote);
330
331 return (wrote);
1efb473f 332}
ea04106b
AX
333inline ssize_t
334zpl_write_common(struct inode *ip, const char *buf, size_t len, loff_t *ppos,
335 uio_seg_t segment, int flags, cred_t *cr)
336{
337 struct iovec iov;
338
339 iov.iov_base = (void *)buf;
340 iov.iov_len = len;
341
342 return (zpl_write_common_iovec(ip, &iov, len, 1, ppos, segment,
343 flags, cr));
344}
1efb473f
BB
345
346static ssize_t
347zpl_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos)
348{
81e97e21 349 cred_t *cr = CRED();
1efb473f
BB
350 ssize_t wrote;
351
81e97e21 352 crhold(cr);
ea04106b 353 wrote = zpl_write_common(filp->f_mapping->host, buf, len, ppos,
1efb473f 354 UIO_USERSPACE, filp->f_flags, cr);
81e97e21 355 crfree(cr);
1efb473f 356
ea04106b
AX
357 return (wrote);
358}
359
360static ssize_t
361zpl_iter_write_common(struct kiocb *kiocb, const struct iovec *iovp,
362 unsigned long nr_segs, size_t count)
363{
364 cred_t *cr = CRED();
365 struct file *filp = kiocb->ki_filp;
366 ssize_t wrote;
367 size_t alloc_size = sizeof (struct iovec) * nr_segs;
368 struct iovec *iov_tmp = kmem_alloc(alloc_size, KM_SLEEP);
369 bcopy(iovp, iov_tmp, alloc_size);
370
371 ASSERT(iovp);
372
373 crhold(cr);
374 wrote = zpl_write_common_iovec(filp->f_mapping->host, iov_tmp, count,
375 nr_segs, &kiocb->ki_pos, UIO_USERSPACE, filp->f_flags, cr);
376 crfree(cr);
377
378 kmem_free(iov_tmp, alloc_size);
1efb473f 379
1efb473f
BB
380 return (wrote);
381}
382
ea04106b
AX
383#if defined(HAVE_VFS_RW_ITERATE)
384static ssize_t
385zpl_iter_write(struct kiocb *kiocb, struct iov_iter *from)
386{
387 return (zpl_iter_write_common(kiocb, from->iov, from->nr_segs,
388 iov_iter_count(from)));
389}
390#else
391static ssize_t
392zpl_aio_write(struct kiocb *kiocb, const struct iovec *iovp,
393 unsigned long nr_segs, loff_t pos)
394{
395 return (zpl_iter_write_common(kiocb, iovp, nr_segs, kiocb->ki_nbytes));
396}
397#endif /* HAVE_VFS_RW_ITERATE */
398
c06d4368
AX
399static loff_t
400zpl_llseek(struct file *filp, loff_t offset, int whence)
401{
402#if defined(SEEK_HOLE) && defined(SEEK_DATA)
ea04106b
AX
403 fstrans_cookie_t cookie;
404
c06d4368
AX
405 if (whence == SEEK_DATA || whence == SEEK_HOLE) {
406 struct inode *ip = filp->f_mapping->host;
407 loff_t maxbytes = ip->i_sb->s_maxbytes;
408 loff_t error;
409
410 spl_inode_lock(ip);
ea04106b 411 cookie = spl_fstrans_mark();
c06d4368 412 error = -zfs_holey(ip, whence, &offset);
ea04106b 413 spl_fstrans_unmark(cookie);
c06d4368
AX
414 if (error == 0)
415 error = lseek_execute(filp, ip, offset, maxbytes);
416 spl_inode_unlock(ip);
417
418 return (error);
419 }
420#endif /* SEEK_HOLE && SEEK_DATA */
421
a08ee875 422 return (generic_file_llseek(filp, offset, whence));
c06d4368
AX
423}
424
c0d35759
BB
425/*
426 * It's worth taking a moment to describe how mmap is implemented
427 * for zfs because it differs considerably from other Linux filesystems.
428 * However, this issue is handled the same way under OpenSolaris.
429 *
430 * The issue is that by design zfs bypasses the Linux page cache and
431 * leaves all caching up to the ARC. This has been shown to work
432 * well for the common read(2)/write(2) case. However, mmap(2)
433 * is problem because it relies on being tightly integrated with the
434 * page cache. To handle this we cache mmap'ed files twice, once in
435 * the ARC and a second time in the page cache. The code is careful
436 * to keep both copies synchronized.
437 *
438 * When a file with an mmap'ed region is written to using write(2)
439 * both the data in the ARC and existing pages in the page cache
440 * are updated. For a read(2) data will be read first from the page
441 * cache then the ARC if needed. Neither a write(2) or read(2) will
442 * will ever result in new pages being added to the page cache.
443 *
444 * New pages are added to the page cache only via .readpage() which
445 * is called when the vfs needs to read a page off disk to back the
446 * virtual memory region. These pages may be modified without
447 * notifying the ARC and will be written out periodically via
448 * .writepage(). This will occur due to either a sync or the usual
449 * page aging behavior. Note because a read(2) of a mmap'ed file
450 * will always check the page cache first even when the ARC is out
451 * of date correct data will still be returned.
452 *
453 * While this implementation ensures correct behavior it does have
454 * have some drawbacks. The most obvious of which is that it
455 * increases the required memory footprint when access mmap'ed
456 * files. It also adds additional complexity to the code keeping
457 * both caches synchronized.
458 *
459 * Longer term it may be possible to cleanly resolve this wart by
460 * mapping page cache pages directly on to the ARC buffers. The
461 * Linux address space operations are flexible enough to allow
462 * selection of which pages back a particular index. The trick
463 * would be working out the details of which subsystem is in
464 * charge, the ARC, the page cache, or both. It may also prove
465 * helpful to move the ARC buffers to a scatter-gather lists
466 * rather than a vmalloc'ed region.
467 */
468static int
469zpl_mmap(struct file *filp, struct vm_area_struct *vma)
470{
e2e7aa2d
BB
471 struct inode *ip = filp->f_mapping->host;
472 znode_t *zp = ITOZ(ip);
c0d35759 473 int error;
ea04106b 474 fstrans_cookie_t cookie;
c0d35759 475
ea04106b 476 cookie = spl_fstrans_mark();
e2e7aa2d
BB
477 error = -zfs_map(ip, vma->vm_pgoff, (caddr_t *)vma->vm_start,
478 (size_t)(vma->vm_end - vma->vm_start), vma->vm_flags);
ea04106b 479 spl_fstrans_unmark(cookie);
e2e7aa2d
BB
480 if (error)
481 return (error);
482
c0d35759
BB
483 error = generic_file_mmap(filp, vma);
484 if (error)
485 return (error);
486
487 mutex_enter(&zp->z_lock);
488 zp->z_is_mapped = 1;
489 mutex_exit(&zp->z_lock);
490
491 return (error);
492}
493
494/*
495 * Populate a page with data for the Linux page cache. This function is
496 * only used to support mmap(2). There will be an identical copy of the
497 * data in the ARC which is kept up to date via .write() and .writepage().
498 *
499 * Current this function relies on zpl_read_common() and the O_DIRECT
500 * flag to read in a page. This works but the more correct way is to
501 * update zfs_fillpage() to be Linux friendly and use that interface.
502 */
503static int
504zpl_readpage(struct file *filp, struct page *pp)
505{
506 struct inode *ip;
dde471ef 507 struct page *pl[1];
c0d35759 508 int error = 0;
ea04106b 509 fstrans_cookie_t cookie;
c0d35759
BB
510
511 ASSERT(PageLocked(pp));
512 ip = pp->mapping->host;
dde471ef 513 pl[0] = pp;
c0d35759 514
ea04106b 515 cookie = spl_fstrans_mark();
dde471ef 516 error = -zfs_getpage(ip, pl, 1);
ea04106b 517 spl_fstrans_unmark(cookie);
c0d35759 518
dde471ef
PJ
519 if (error) {
520 SetPageError(pp);
521 ClearPageUptodate(pp);
522 } else {
523 ClearPageError(pp);
524 SetPageUptodate(pp);
525 flush_dcache_page(pp);
526 }
c0d35759 527
dde471ef 528 unlock_page(pp);
a08ee875 529 return (error);
dde471ef 530}
c0d35759 531
f3ab88d6
BB
532/*
533 * Populate a set of pages with data for the Linux page cache. This
534 * function will only be called for read ahead and never for demand
535 * paging. For simplicity, the code relies on read_cache_pages() to
536 * correctly lock each page for IO and call zpl_readpage().
537 */
538static int
539zpl_readpages(struct file *filp, struct address_space *mapping,
540 struct list_head *pages, unsigned nr_pages)
541{
95d9fd02
BB
542 return (read_cache_pages(mapping, pages,
543 (filler_t *)zpl_readpage, filp));
f3ab88d6
BB
544}
545
dde471ef
PJ
546int
547zpl_putpage(struct page *pp, struct writeback_control *wbc, void *data)
548{
3c0e5c0f 549 struct address_space *mapping = data;
ea04106b 550 fstrans_cookie_t cookie;
3c0e5c0f
BB
551
552 ASSERT(PageLocked(pp));
553 ASSERT(!PageWriteback(pp));
8630650a 554
ea04106b 555 cookie = spl_fstrans_mark();
62c4165a 556 (void) zfs_putpage(mapping->host, pp, wbc);
ea04106b 557 spl_fstrans_unmark(cookie);
c0d35759 558
3c0e5c0f 559 return (0);
dde471ef 560}
c0d35759 561
dde471ef
PJ
562static int
563zpl_writepages(struct address_space *mapping, struct writeback_control *wbc)
564{
a08ee875
LG
565 znode_t *zp = ITOZ(mapping->host);
566 zfs_sb_t *zsb = ITOZSB(mapping->host);
567 enum writeback_sync_modes sync_mode;
568 int result;
569
570 ZFS_ENTER(zsb);
571 if (zsb->z_os->os_sync == ZFS_SYNC_ALWAYS)
572 wbc->sync_mode = WB_SYNC_ALL;
573 ZFS_EXIT(zsb);
574 sync_mode = wbc->sync_mode;
575
576 /*
577 * We don't want to run write_cache_pages() in SYNC mode here, because
578 * that would make putpage() wait for a single page to be committed to
579 * disk every single time, resulting in atrocious performance. Instead
580 * we run it once in non-SYNC mode so that the ZIL gets all the data,
581 * and then we commit it all in one go.
582 */
583 wbc->sync_mode = WB_SYNC_NONE;
584 result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
585 if (sync_mode != wbc->sync_mode) {
586 ZFS_ENTER(zsb);
587 ZFS_VERIFY_ZP(zp);
ea04106b
AX
588 if (zsb->z_log != NULL)
589 zil_commit(zsb->z_log, zp->z_id);
a08ee875
LG
590 ZFS_EXIT(zsb);
591
592 /*
593 * We need to call write_cache_pages() again (we can't just
594 * return after the commit) because the previous call in
595 * non-SYNC mode does not guarantee that we got all the dirty
596 * pages (see the implementation of write_cache_pages() for
597 * details). That being said, this is a no-op in most cases.
598 */
599 wbc->sync_mode = sync_mode;
600 result = write_cache_pages(mapping, wbc, zpl_putpage, mapping);
601 }
602 return (result);
c0d35759
BB
603}
604
605/*
606 * Write out dirty pages to the ARC, this function is only required to
607 * support mmap(2). Mapped pages may be dirtied by memory operations
608 * which never call .write(). These dirty pages are kept in sync with
609 * the ARC buffers via this hook.
c0d35759
BB
610 */
611static int
612zpl_writepage(struct page *pp, struct writeback_control *wbc)
613{
a08ee875
LG
614 if (ITOZSB(pp->mapping->host)->z_os->os_sync == ZFS_SYNC_ALWAYS)
615 wbc->sync_mode = WB_SYNC_ALL;
616
617 return (zpl_putpage(pp, wbc, pp->mapping));
c0d35759
BB
618}
619
cb2d1901
ED
620/*
621 * The only flag combination which matches the behavior of zfs_space()
ea04106b
AX
622 * is FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE. The FALLOC_FL_PUNCH_HOLE
623 * flag was introduced in the 2.6.38 kernel.
cb2d1901 624 */
ea04106b 625#if defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE)
cb2d1901
ED
626long
627zpl_fallocate_common(struct inode *ip, int mode, loff_t offset, loff_t len)
628{
cb2d1901
ED
629 int error = -EOPNOTSUPP;
630
ea04106b
AX
631#if defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE)
632 cred_t *cr = CRED();
633 flock64_t bf;
634 loff_t olen;
635 fstrans_cookie_t cookie;
636
637 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
638 return (error);
cb2d1901
ED
639
640 crhold(cr);
641
ea04106b
AX
642 if (offset < 0 || len <= 0)
643 return (-EINVAL);
cb2d1901 644
ea04106b
AX
645 spl_inode_lock(ip);
646 olen = i_size_read(ip);
cb2d1901 647
ea04106b
AX
648 if (offset > olen) {
649 spl_inode_unlock(ip);
650 return (0);
cb2d1901 651 }
ea04106b
AX
652 if (offset + len > olen)
653 len = olen - offset;
654 bf.l_type = F_WRLCK;
655 bf.l_whence = 0;
656 bf.l_start = offset;
657 bf.l_len = len;
658 bf.l_pid = 0;
659
660 cookie = spl_fstrans_mark();
661 error = -zfs_space(ip, F_FREESP, &bf, FWRITE, offset, cr);
662 spl_fstrans_unmark(cookie);
663 spl_inode_unlock(ip);
cb2d1901
ED
664
665 crfree(cr);
ea04106b 666#endif /* defined(FALLOC_FL_PUNCH_HOLE) && defined(FALLOC_FL_KEEP_SIZE) */
cb2d1901
ED
667
668 ASSERT3S(error, <=, 0);
669 return (error);
670}
ea04106b 671#endif /* defined(HAVE_FILE_FALLOCATE) || defined(HAVE_INODE_FALLOCATE) */
cb2d1901
ED
672
673#ifdef HAVE_FILE_FALLOCATE
674static long
675zpl_fallocate(struct file *filp, int mode, loff_t offset, loff_t len)
676{
677 return zpl_fallocate_common(filp->f_path.dentry->d_inode,
678 mode, offset, len);
679}
680#endif /* HAVE_FILE_FALLOCATE */
681
ea04106b
AX
682/*
683 * Map zfs file z_pflags (xvattr_t) to linux file attributes. Only file
684 * attributes common to both Linux and Solaris are mapped.
685 */
686static int
687zpl_ioctl_getflags(struct file *filp, void __user *arg)
688{
689 struct inode *ip = file_inode(filp);
690 unsigned int ioctl_flags = 0;
691 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
692 int error;
693
694 if (zfs_flags & ZFS_IMMUTABLE)
695 ioctl_flags |= FS_IMMUTABLE_FL;
696
697 if (zfs_flags & ZFS_APPENDONLY)
698 ioctl_flags |= FS_APPEND_FL;
699
700 if (zfs_flags & ZFS_NODUMP)
701 ioctl_flags |= FS_NODUMP_FL;
702
703 ioctl_flags &= FS_FL_USER_VISIBLE;
704
705 error = copy_to_user(arg, &ioctl_flags, sizeof (ioctl_flags));
706
707 return (error);
708}
709
710/*
711 * fchange() is a helper macro to detect if we have been asked to change a
712 * flag. This is ugly, but the requirement that we do this is a consequence of
713 * how the Linux file attribute interface was designed. Another consequence is
714 * that concurrent modification of files suffers from a TOCTOU race. Neither
715 * are things we can fix without modifying the kernel-userland interface, which
716 * is outside of our jurisdiction.
717 */
718
719#define fchange(f0, f1, b0, b1) ((((f0) & (b0)) == (b0)) != \
720 (((b1) & (f1)) == (f1)))
721
722static int
723zpl_ioctl_setflags(struct file *filp, void __user *arg)
724{
725 struct inode *ip = file_inode(filp);
726 uint64_t zfs_flags = ITOZ(ip)->z_pflags;
727 unsigned int ioctl_flags;
728 cred_t *cr = CRED();
729 xvattr_t xva;
730 xoptattr_t *xoap;
731 int error;
732 fstrans_cookie_t cookie;
733
734 if (copy_from_user(&ioctl_flags, arg, sizeof (ioctl_flags)))
735 return (-EFAULT);
736
737 if ((ioctl_flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL)))
738 return (-EOPNOTSUPP);
739
740 if ((ioctl_flags & ~(FS_FL_USER_MODIFIABLE)))
741 return (-EACCES);
742
743 if ((fchange(ioctl_flags, zfs_flags, FS_IMMUTABLE_FL, ZFS_IMMUTABLE) ||
744 fchange(ioctl_flags, zfs_flags, FS_APPEND_FL, ZFS_APPENDONLY)) &&
745 !capable(CAP_LINUX_IMMUTABLE))
746 return (-EACCES);
747
748 if (!zpl_inode_owner_or_capable(ip))
749 return (-EACCES);
750
751 xva_init(&xva);
752 xoap = xva_getxoptattr(&xva);
753
754 XVA_SET_REQ(&xva, XAT_IMMUTABLE);
755 if (ioctl_flags & FS_IMMUTABLE_FL)
756 xoap->xoa_immutable = B_TRUE;
757
758 XVA_SET_REQ(&xva, XAT_APPENDONLY);
759 if (ioctl_flags & FS_APPEND_FL)
760 xoap->xoa_appendonly = B_TRUE;
761
762 XVA_SET_REQ(&xva, XAT_NODUMP);
763 if (ioctl_flags & FS_NODUMP_FL)
764 xoap->xoa_nodump = B_TRUE;
765
766 crhold(cr);
767 cookie = spl_fstrans_mark();
768 error = -zfs_setattr(ip, (vattr_t *)&xva, 0, cr);
769 spl_fstrans_unmark(cookie);
770 crfree(cr);
771
772 return (error);
773}
774
c06d4368
AX
775static long
776zpl_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
777{
778 switch (cmd) {
ea04106b
AX
779 case FS_IOC_GETFLAGS:
780 return (zpl_ioctl_getflags(filp, (void *)arg));
781 case FS_IOC_SETFLAGS:
782 return (zpl_ioctl_setflags(filp, (void *)arg));
c06d4368
AX
783 default:
784 return (-ENOTTY);
785 }
786}
787
788#ifdef CONFIG_COMPAT
789static long
790zpl_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
791{
a08ee875 792 return (zpl_ioctl(filp, cmd, arg));
c06d4368
AX
793}
794#endif /* CONFIG_COMPAT */
795
796
1efb473f 797const struct address_space_operations zpl_address_space_operations = {
dde471ef 798 .readpages = zpl_readpages,
1efb473f
BB
799 .readpage = zpl_readpage,
800 .writepage = zpl_writepage,
a08ee875 801 .writepages = zpl_writepages,
1efb473f
BB
802};
803
804const struct file_operations zpl_file_operations = {
126400a1
BB
805 .open = zpl_open,
806 .release = zpl_release,
c06d4368 807 .llseek = zpl_llseek,
c0d35759
BB
808 .read = zpl_read,
809 .write = zpl_write,
ea04106b
AX
810#ifdef HAVE_VFS_RW_ITERATE
811 .read_iter = zpl_iter_read,
812 .write_iter = zpl_iter_write,
813#else
814 .aio_read = zpl_aio_read,
815 .aio_write = zpl_aio_write,
816#endif
c0d35759 817 .mmap = zpl_mmap,
1efb473f 818 .fsync = zpl_fsync,
ea04106b 819 .aio_fsync = zpl_aio_fsync,
cb2d1901 820#ifdef HAVE_FILE_FALLOCATE
a08ee875 821 .fallocate = zpl_fallocate,
cb2d1901 822#endif /* HAVE_FILE_FALLOCATE */
a08ee875 823 .unlocked_ioctl = zpl_ioctl,
c06d4368 824#ifdef CONFIG_COMPAT
a08ee875 825 .compat_ioctl = zpl_compat_ioctl,
c06d4368 826#endif
1efb473f
BB
827};
828
829const struct file_operations zpl_dir_file_operations = {
830 .llseek = generic_file_llseek,
831 .read = generic_read_dir,
c06d4368
AX
832#ifdef HAVE_VFS_ITERATE
833 .iterate = zpl_iterate,
834#else
1efb473f 835 .readdir = zpl_readdir,
c06d4368 836#endif
1efb473f 837 .fsync = zpl_fsync,
c06d4368
AX
838 .unlocked_ioctl = zpl_ioctl,
839#ifdef CONFIG_COMPAT
840 .compat_ioctl = zpl_compat_ioctl,
841#endif
1efb473f 842};