4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
25 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
26 * Copyright 2017 Nexenta Systems, Inc.
29 /* Portions Copyright 2007 Jeremy Teo */
30 /* Portions Copyright 2010 Robert Milkowski */
33 #include <sys/types.h>
34 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysmacros.h>
38 #include <sys/resource.h>
40 #include <sys/vfs_opreg.h>
44 #include <sys/taskq.h>
46 #include <sys/vmsystm.h>
47 #include <sys/atomic.h>
49 #include <sys/pathname.h>
50 #include <sys/cmn_err.h>
51 #include <sys/errno.h>
52 #include <sys/unistd.h>
53 #include <sys/zfs_dir.h>
54 #include <sys/zfs_acl.h>
55 #include <sys/zfs_ioctl.h>
56 #include <sys/fs/zfs.h>
58 #include <sys/dmu_objset.h>
64 #include <sys/dirent.h>
65 #include <sys/policy.h>
66 #include <sys/sunddi.h>
69 #include "fs/fs_subr.h"
70 #include <sys/zfs_ctldir.h>
71 #include <sys/zfs_fuid.h>
72 #include <sys/zfs_sa.h>
73 #include <sys/zfs_vnops.h>
75 #include <sys/zfs_rlock.h>
76 #include <sys/extdirent.h>
77 #include <sys/kidmap.h>
85 * Each vnode op performs some logical unit of work. To do this, the ZPL must
86 * properly lock its in-core state, create a DMU transaction, do the work,
87 * record this work in the intent log (ZIL), commit the DMU transaction,
88 * and wait for the intent log to commit if it is a synchronous operation.
89 * Moreover, the vnode ops must work in both normal and log replay context.
90 * The ordering of events is important to avoid deadlocks and references
91 * to freed memory. The example below illustrates the following Big Rules:
93 * (1) A check must be made in each zfs thread for a mounted file system.
94 * This is done avoiding races using ZFS_ENTER(zfsvfs).
95 * A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
96 * must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
97 * can return EIO from the calling function.
99 * (2) iput() should always be the last thing except for zil_commit()
100 * (if necessary) and ZFS_EXIT(). This is for 3 reasons:
101 * First, if it's the last reference, the vnode/znode
102 * can be freed, so the zp may point to freed memory. Second, the last
103 * reference will call zfs_zinactive(), which may induce a lot of work --
104 * pushing cached pages (which acquires range locks) and syncing out
105 * cached atime changes. Third, zfs_zinactive() may require a new tx,
106 * which could deadlock the system if you were already holding one.
107 * If you must call iput() within a tx then use zfs_iput_async().
109 * (3) All range locks must be grabbed before calling dmu_tx_assign(),
110 * as they can span dmu_tx_assign() calls.
112 * (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
113 * dmu_tx_assign(). This is critical because we don't want to block
114 * while holding locks.
116 * If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
117 * reduces lock contention and CPU usage when we must wait (note that if
118 * throughput is constrained by the storage, nearly every transaction
121 * Note, in particular, that if a lock is sometimes acquired before
122 * the tx assigns, and sometimes after (e.g. z_lock), then failing
123 * to use a non-blocking assign can deadlock the system. The scenario:
125 * Thread A has grabbed a lock before calling dmu_tx_assign().
126 * Thread B is in an already-assigned tx, and blocks for this lock.
127 * Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
128 * forever, because the previous txg can't quiesce until B's tx commits.
130 * If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
131 * then drop all locks, call dmu_tx_wait(), and try again. On subsequent
132 * calls to dmu_tx_assign(), pass TXG_WAITED rather than TXG_NOWAIT,
133 * to indicate that this operation has already called dmu_tx_wait().
134 * This will ensure that we don't retry forever, waiting a short bit
137 * (5) If the operation succeeded, generate the intent log entry for it
138 * before dropping locks. This ensures that the ordering of events
139 * in the intent log matches the order in which they actually occurred.
140 * During ZIL replay the zfs_log_* functions will update the sequence
141 * number to indicate the zil transaction has replayed.
143 * (6) At the end of each vnode op, the DMU tx must always commit,
144 * regardless of whether there were any errors.
146 * (7) After dropping all locks, invoke zil_commit(zilog, foid)
147 * to ensure that synchronous semantics are provided when necessary.
149 * In general, this is how things should be ordered in each vnode op:
151 * ZFS_ENTER(zfsvfs); // exit if unmounted
153 * zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
154 * rw_enter(...); // grab any other locks you need
155 * tx = dmu_tx_create(...); // get DMU tx
156 * dmu_tx_hold_*(); // hold each object you might modify
157 * error = dmu_tx_assign(tx, waited ? TXG_WAITED : TXG_NOWAIT);
159 * rw_exit(...); // drop locks
160 * zfs_dirent_unlock(dl); // unlock directory entry
161 * iput(...); // release held vnodes
162 * if (error == ERESTART) {
168 * dmu_tx_abort(tx); // abort DMU tx
169 * ZFS_EXIT(zfsvfs); // finished in zfs
170 * return (error); // really out of space
172 * error = do_real_work(); // do whatever this VOP does
174 * zfs_log_*(...); // on success, make ZIL entry
175 * dmu_tx_commit(tx); // commit DMU tx -- error or not
176 * rw_exit(...); // drop locks
177 * zfs_dirent_unlock(dl); // unlock directory entry
178 * iput(...); // release held vnodes
179 * zil_commit(zilog, foid); // synchronous when necessary
180 * ZFS_EXIT(zfsvfs); // finished in zfs
181 * return (error); // done, report error
185 * Virus scanning is unsupported. It would be possible to add a hook
186 * here to performance the required virus scan. This could be done
187 * entirely in the kernel or potentially as an update to invoke a
191 zfs_vscan(struct inode
*ip
, cred_t
*cr
, int async
)
198 zfs_open(struct inode
*ip
, int mode
, int flag
, cred_t
*cr
)
200 znode_t
*zp
= ITOZ(ip
);
201 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
206 /* Honor ZFS_APPENDONLY file attribute */
207 if ((mode
& FMODE_WRITE
) && (zp
->z_pflags
& ZFS_APPENDONLY
) &&
208 ((flag
& O_APPEND
) == 0)) {
210 return (SET_ERROR(EPERM
));
213 /* Virus scan eligible files on open */
214 if (!zfs_has_ctldir(zp
) && zfsvfs
->z_vscan
&& S_ISREG(ip
->i_mode
) &&
215 !(zp
->z_pflags
& ZFS_AV_QUARANTINED
) && zp
->z_size
> 0) {
216 if (zfs_vscan(ip
, cr
, 0) != 0) {
218 return (SET_ERROR(EACCES
));
222 /* Keep a count of the synchronous opens in the znode */
224 atomic_inc_32(&zp
->z_sync_cnt
);
232 zfs_close(struct inode
*ip
, int flag
, cred_t
*cr
)
234 znode_t
*zp
= ITOZ(ip
);
235 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
240 /* Decrement the synchronous opens in the znode */
242 atomic_dec_32(&zp
->z_sync_cnt
);
244 if (!zfs_has_ctldir(zp
) && zfsvfs
->z_vscan
&& S_ISREG(ip
->i_mode
) &&
245 !(zp
->z_pflags
& ZFS_AV_QUARANTINED
) && zp
->z_size
> 0)
246 VERIFY(zfs_vscan(ip
, cr
, 1) == 0);
252 #if defined(SEEK_HOLE) && defined(SEEK_DATA)
254 * Lseek support for finding holes (cmd == SEEK_HOLE) and
255 * data (cmd == SEEK_DATA). "off" is an in/out parameter.
258 zfs_holey_common(struct inode
*ip
, int cmd
, loff_t
*off
)
260 znode_t
*zp
= ITOZ(ip
);
261 uint64_t noff
= (uint64_t)*off
; /* new offset */
266 file_sz
= zp
->z_size
;
267 if (noff
>= file_sz
) {
268 return (SET_ERROR(ENXIO
));
271 if (cmd
== SEEK_HOLE
)
276 error
= dmu_offset_next(ZTOZSB(zp
)->z_os
, zp
->z_id
, hole
, &noff
);
279 return (SET_ERROR(ENXIO
));
281 /* file was dirty, so fall back to using generic logic */
282 if (error
== EBUSY
) {
290 * We could find a hole that begins after the logical end-of-file,
291 * because dmu_offset_next() only works on whole blocks. If the
292 * EOF falls mid-block, then indicate that the "virtual hole"
293 * at the end of the file begins at the logical EOF, rather than
294 * at the end of the last block.
296 if (noff
> file_sz
) {
308 zfs_holey(struct inode
*ip
, int cmd
, loff_t
*off
)
310 znode_t
*zp
= ITOZ(ip
);
311 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
317 error
= zfs_holey_common(ip
, cmd
, off
);
322 #endif /* SEEK_HOLE && SEEK_DATA */
326 * When a file is memory mapped, we must keep the IO data synchronized
327 * between the DMU cache and the memory mapped pages. What this means:
329 * On Write: If we find a memory mapped page, we write to *both*
330 * the page and the dmu buffer.
333 update_pages(struct inode
*ip
, int64_t start
, int len
,
334 objset_t
*os
, uint64_t oid
)
336 struct address_space
*mp
= ip
->i_mapping
;
342 off
= start
& (PAGE_SIZE
-1);
343 for (start
&= PAGE_MASK
; len
> 0; start
+= PAGE_SIZE
) {
344 nbytes
= MIN(PAGE_SIZE
- off
, len
);
346 pp
= find_lock_page(mp
, start
>> PAGE_SHIFT
);
348 if (mapping_writably_mapped(mp
))
349 flush_dcache_page(pp
);
352 (void) dmu_read(os
, oid
, start
+off
, nbytes
, pb
+off
,
356 if (mapping_writably_mapped(mp
))
357 flush_dcache_page(pp
);
359 mark_page_accessed(pp
);
372 * When a file is memory mapped, we must keep the IO data synchronized
373 * between the DMU cache and the memory mapped pages. What this means:
375 * On Read: We "read" preferentially from memory mapped pages,
376 * else we default from the dmu buffer.
378 * NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
379 * the file is memory mapped.
382 mappedread(struct inode
*ip
, int nbytes
, uio_t
*uio
)
384 struct address_space
*mp
= ip
->i_mapping
;
386 znode_t
*zp
= ITOZ(ip
);
393 start
= uio
->uio_loffset
;
394 off
= start
& (PAGE_SIZE
-1);
395 for (start
&= PAGE_MASK
; len
> 0; start
+= PAGE_SIZE
) {
396 bytes
= MIN(PAGE_SIZE
- off
, len
);
398 pp
= find_lock_page(mp
, start
>> PAGE_SHIFT
);
400 ASSERT(PageUptodate(pp
));
403 error
= uiomove(pb
+ off
, bytes
, UIO_READ
, uio
);
406 if (mapping_writably_mapped(mp
))
407 flush_dcache_page(pp
);
409 mark_page_accessed(pp
);
413 error
= dmu_read_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
426 unsigned long zfs_read_chunk_size
= 1024 * 1024; /* Tunable */
427 unsigned long zfs_delete_blocks
= DMU_MAX_DELETEBLKCNT
;
430 * Read bytes from specified file into supplied buffer.
432 * IN: ip - inode of file to be read from.
433 * uio - structure supplying read location, range info,
435 * ioflag - FSYNC flags; used to provide FRSYNC semantics.
436 * O_DIRECT flag; used to bypass page cache.
437 * cr - credentials of caller.
439 * OUT: uio - updated offset and range, buffer filled.
441 * RETURN: 0 on success, error code on failure.
444 * inode - atime updated if byte count > 0
448 zfs_read(struct inode
*ip
, uio_t
*uio
, int ioflag
, cred_t
*cr
)
450 znode_t
*zp
= ITOZ(ip
);
451 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
455 #ifdef HAVE_UIO_ZEROCOPY
457 #endif /* HAVE_UIO_ZEROCOPY */
462 if (zp
->z_pflags
& ZFS_AV_QUARANTINED
) {
464 return (SET_ERROR(EACCES
));
468 * Validate file offset
470 if (uio
->uio_loffset
< (offset_t
)0) {
472 return (SET_ERROR(EINVAL
));
476 * Fasttrack empty reads
478 if (uio
->uio_resid
== 0) {
484 * If we're in FRSYNC mode, sync out this znode before reading it.
486 if (ioflag
& FRSYNC
|| zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
487 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
490 * Lock the range against changes.
492 rl
= zfs_range_lock(&zp
->z_range_lock
, uio
->uio_loffset
, uio
->uio_resid
,
496 * If we are reading past end-of-file we can skip
497 * to the end; but we might still need to set atime.
499 if (uio
->uio_loffset
>= zp
->z_size
) {
504 ASSERT(uio
->uio_loffset
< zp
->z_size
);
505 n
= MIN(uio
->uio_resid
, zp
->z_size
- uio
->uio_loffset
);
507 #ifdef HAVE_UIO_ZEROCOPY
508 if ((uio
->uio_extflg
== UIO_XUIO
) &&
509 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
)) {
511 int blksz
= zp
->z_blksz
;
512 uint64_t offset
= uio
->uio_loffset
;
514 xuio
= (xuio_t
*)uio
;
516 nblk
= (P2ROUNDUP(offset
+ n
, blksz
) - P2ALIGN(offset
,
519 ASSERT(offset
+ n
<= blksz
);
522 (void) dmu_xuio_init(xuio
, nblk
);
524 if (vn_has_cached_data(ip
)) {
526 * For simplicity, we always allocate a full buffer
527 * even if we only expect to read a portion of a block.
529 while (--nblk
>= 0) {
530 (void) dmu_xuio_add(xuio
,
531 dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
536 #endif /* HAVE_UIO_ZEROCOPY */
539 nbytes
= MIN(n
, zfs_read_chunk_size
-
540 P2PHASE(uio
->uio_loffset
, zfs_read_chunk_size
));
542 if (zp
->z_is_mapped
&& !(ioflag
& O_DIRECT
)) {
543 error
= mappedread(ip
, nbytes
, uio
);
545 error
= dmu_read_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
550 /* convert checksum errors into IO errors */
552 error
= SET_ERROR(EIO
);
559 zfs_range_unlock(rl
);
566 * Write the bytes to a file.
568 * IN: ip - inode of file to be written to.
569 * uio - structure supplying write location, range info,
571 * ioflag - FAPPEND flag set if in append mode.
572 * O_DIRECT flag; used to bypass page cache.
573 * cr - credentials of caller.
575 * OUT: uio - updated offset and range.
577 * RETURN: 0 if success
578 * error code if failure
581 * ip - ctime|mtime updated if byte count > 0
586 zfs_write(struct inode
*ip
, uio_t
*uio
, int ioflag
, cred_t
*cr
)
588 znode_t
*zp
= ITOZ(ip
);
589 rlim64_t limit
= uio
->uio_limit
;
590 ssize_t start_resid
= uio
->uio_resid
;
594 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
599 int max_blksz
= zfsvfs
->z_max_blksz
;
602 const iovec_t
*aiov
= NULL
;
606 sa_bulk_attr_t bulk
[4];
607 uint64_t mtime
[2], ctime
[2];
609 #ifdef HAVE_UIO_ZEROCOPY
611 const iovec_t
*iovp
= uio
->uio_iov
;
612 ASSERTV(int iovcnt
= uio
->uio_iovcnt
);
616 * Fasttrack empty write
622 if (limit
== RLIM64_INFINITY
|| limit
> MAXOFFSET_T
)
628 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
629 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
630 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
632 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
636 * Callers might not be able to detect properly that we are read-only,
637 * so check it explicitly here.
639 if (zfs_is_readonly(zfsvfs
)) {
641 return (SET_ERROR(EROFS
));
645 * If immutable or not appending then return EPERM
647 if ((zp
->z_pflags
& (ZFS_IMMUTABLE
| ZFS_READONLY
)) ||
648 ((zp
->z_pflags
& ZFS_APPENDONLY
) && !(ioflag
& FAPPEND
) &&
649 (uio
->uio_loffset
< zp
->z_size
))) {
651 return (SET_ERROR(EPERM
));
654 zilog
= zfsvfs
->z_log
;
657 * Validate file offset
659 woff
= ioflag
& FAPPEND
? zp
->z_size
: uio
->uio_loffset
;
662 return (SET_ERROR(EINVAL
));
666 * Pre-fault the pages to ensure slow (eg NFS) pages
668 * Skip this if uio contains loaned arc_buf.
670 #ifdef HAVE_UIO_ZEROCOPY
671 if ((uio
->uio_extflg
== UIO_XUIO
) &&
672 (((xuio_t
*)uio
)->xu_type
== UIOTYPE_ZEROCOPY
))
673 xuio
= (xuio_t
*)uio
;
676 uio_prefaultpages(MIN(n
, max_blksz
), uio
);
679 * If in append mode, set the io offset pointer to eof.
681 if (ioflag
& FAPPEND
) {
683 * Obtain an appending range lock to guarantee file append
684 * semantics. We reset the write offset once we have the lock.
686 rl
= zfs_range_lock(&zp
->z_range_lock
, 0, n
, RL_APPEND
);
688 if (rl
->r_len
== UINT64_MAX
) {
690 * We overlocked the file because this write will cause
691 * the file block size to increase.
692 * Note that zp_size cannot change with this lock held.
696 uio
->uio_loffset
= woff
;
699 * Note that if the file block size will change as a result of
700 * this write, then this range lock will lock the entire file
701 * so that we can re-write the block safely.
703 rl
= zfs_range_lock(&zp
->z_range_lock
, woff
, n
, RL_WRITER
);
707 zfs_range_unlock(rl
);
709 return (SET_ERROR(EFBIG
));
712 if ((woff
+ n
) > limit
|| woff
> (limit
- n
))
715 /* Will this write extend the file length? */
716 write_eof
= (woff
+ n
> zp
->z_size
);
718 end_size
= MAX(zp
->z_size
, woff
+ n
);
721 * Write the file in reasonable size chunks. Each chunk is written
722 * in a separate transaction; this keeps the intent log records small
723 * and allows us to do more fine-grained space accounting.
727 woff
= uio
->uio_loffset
;
728 if (zfs_owner_overquota(zfsvfs
, zp
, B_FALSE
) ||
729 zfs_owner_overquota(zfsvfs
, zp
, B_TRUE
)) {
731 dmu_return_arcbuf(abuf
);
732 error
= SET_ERROR(EDQUOT
);
736 if (xuio
&& abuf
== NULL
) {
737 #ifdef HAVE_UIO_ZEROCOPY
738 ASSERT(i_iov
< iovcnt
);
739 ASSERT3U(uio
->uio_segflg
, !=, UIO_BVEC
);
741 abuf
= dmu_xuio_arcbuf(xuio
, i_iov
);
742 dmu_xuio_clear(xuio
, i_iov
);
743 ASSERT((aiov
->iov_base
== abuf
->b_data
) ||
744 ((char *)aiov
->iov_base
- (char *)abuf
->b_data
+
745 aiov
->iov_len
== arc_buf_size(abuf
)));
748 } else if (abuf
== NULL
&& n
>= max_blksz
&&
749 woff
>= zp
->z_size
&&
750 P2PHASE(woff
, max_blksz
) == 0 &&
751 zp
->z_blksz
== max_blksz
) {
753 * This write covers a full block. "Borrow" a buffer
754 * from the dmu so that we can fill it before we enter
755 * a transaction. This avoids the possibility of
756 * holding up the transaction if the data copy hangs
757 * up on a pagefault (e.g., from an NFS server mapping).
761 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
763 ASSERT(abuf
!= NULL
);
764 ASSERT(arc_buf_size(abuf
) == max_blksz
);
765 if ((error
= uiocopy(abuf
->b_data
, max_blksz
,
766 UIO_WRITE
, uio
, &cbytes
))) {
767 dmu_return_arcbuf(abuf
);
770 ASSERT(cbytes
== max_blksz
);
774 * Start a transaction.
776 tx
= dmu_tx_create(zfsvfs
->z_os
);
777 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
778 dmu_tx_hold_write(tx
, zp
->z_id
, woff
, MIN(n
, max_blksz
));
779 zfs_sa_upgrade_txholds(tx
, zp
);
780 error
= dmu_tx_assign(tx
, TXG_WAIT
);
784 dmu_return_arcbuf(abuf
);
789 * If zfs_range_lock() over-locked we grow the blocksize
790 * and then reduce the lock range. This will only happen
791 * on the first iteration since zfs_range_reduce() will
792 * shrink down r_len to the appropriate size.
794 if (rl
->r_len
== UINT64_MAX
) {
797 if (zp
->z_blksz
> max_blksz
) {
799 * File's blocksize is already larger than the
800 * "recordsize" property. Only let it grow to
801 * the next power of 2.
803 ASSERT(!ISP2(zp
->z_blksz
));
804 new_blksz
= MIN(end_size
,
805 1 << highbit64(zp
->z_blksz
));
807 new_blksz
= MIN(end_size
, max_blksz
);
809 zfs_grow_blocksize(zp
, new_blksz
, tx
);
810 zfs_range_reduce(rl
, woff
, n
);
814 * XXX - should we really limit each write to z_max_blksz?
815 * Perhaps we should use SPA_MAXBLOCKSIZE chunks?
817 nbytes
= MIN(n
, max_blksz
- P2PHASE(woff
, max_blksz
));
820 tx_bytes
= uio
->uio_resid
;
821 error
= dmu_write_uio_dbuf(sa_get_db(zp
->z_sa_hdl
),
823 tx_bytes
-= uio
->uio_resid
;
826 ASSERT(xuio
== NULL
|| tx_bytes
== aiov
->iov_len
);
828 * If this is not a full block write, but we are
829 * extending the file past EOF and this data starts
830 * block-aligned, use assign_arcbuf(). Otherwise,
831 * write via dmu_write().
833 if (tx_bytes
< max_blksz
&& (!write_eof
||
834 aiov
->iov_base
!= abuf
->b_data
)) {
836 dmu_write(zfsvfs
->z_os
, zp
->z_id
, woff
,
837 // cppcheck-suppress nullPointer
838 aiov
->iov_len
, aiov
->iov_base
, tx
);
839 dmu_return_arcbuf(abuf
);
840 xuio_stat_wbuf_copied();
842 ASSERT(xuio
|| tx_bytes
== max_blksz
);
843 dmu_assign_arcbuf(sa_get_db(zp
->z_sa_hdl
),
846 ASSERT(tx_bytes
<= uio
->uio_resid
);
847 uioskip(uio
, tx_bytes
);
849 if (tx_bytes
&& zp
->z_is_mapped
&& !(ioflag
& O_DIRECT
)) {
850 update_pages(ip
, woff
,
851 tx_bytes
, zfsvfs
->z_os
, zp
->z_id
);
855 * If we made no progress, we're done. If we made even
856 * partial progress, update the znode and ZIL accordingly.
859 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zfsvfs
),
860 (void *)&zp
->z_size
, sizeof (uint64_t), tx
);
867 * Clear Set-UID/Set-GID bits on successful write if not
868 * privileged and at least one of the execute bits is set.
870 * It would be nice to to this after all writes have
871 * been done, but that would still expose the ISUID/ISGID
872 * to another app after the partial write is committed.
874 * Note: we don't call zfs_fuid_map_id() here because
875 * user 0 is not an ephemeral uid.
877 mutex_enter(&zp
->z_acl_lock
);
878 uid
= KUID_TO_SUID(ip
->i_uid
);
879 if ((zp
->z_mode
& (S_IXUSR
| (S_IXUSR
>> 3) |
880 (S_IXUSR
>> 6))) != 0 &&
881 (zp
->z_mode
& (S_ISUID
| S_ISGID
)) != 0 &&
882 secpolicy_vnode_setid_retain(cr
,
883 ((zp
->z_mode
& S_ISUID
) != 0 && uid
== 0)) != 0) {
885 zp
->z_mode
&= ~(S_ISUID
| S_ISGID
);
886 ip
->i_mode
= newmode
= zp
->z_mode
;
887 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_MODE(zfsvfs
),
888 (void *)&newmode
, sizeof (uint64_t), tx
);
890 mutex_exit(&zp
->z_acl_lock
);
892 zfs_tstamp_update_setup(zp
, CONTENT_MODIFIED
, mtime
, ctime
);
895 * Update the file size (zp_size) if it has changed;
896 * account for possible concurrent updates.
898 while ((end_size
= zp
->z_size
) < uio
->uio_loffset
) {
899 (void) atomic_cas_64(&zp
->z_size
, end_size
,
904 * If we are replaying and eof is non zero then force
905 * the file size to the specified eof. Note, there's no
906 * concurrency during replay.
908 if (zfsvfs
->z_replay
&& zfsvfs
->z_replay_eof
!= 0)
909 zp
->z_size
= zfsvfs
->z_replay_eof
;
911 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
913 zfs_log_write(zilog
, tx
, TX_WRITE
, zp
, woff
, tx_bytes
, ioflag
,
919 ASSERT(tx_bytes
== nbytes
);
923 uio_prefaultpages(MIN(n
, max_blksz
), uio
);
926 zfs_inode_update(zp
);
927 zfs_range_unlock(rl
);
930 * If we're in replay mode, or we made no progress, return error.
931 * Otherwise, it's at least a partial write, so it's successful.
933 if (zfsvfs
->z_replay
|| uio
->uio_resid
== start_resid
) {
938 if (ioflag
& (FSYNC
| FDSYNC
) ||
939 zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
940 zil_commit(zilog
, zp
->z_id
);
947 * Drop a reference on the passed inode asynchronously. This ensures
948 * that the caller will never drop the last reference on an inode in
949 * the current context. Doing so while holding open a tx could result
950 * in a deadlock if iput_final() re-enters the filesystem code.
953 zfs_iput_async(struct inode
*ip
)
955 objset_t
*os
= ITOZSB(ip
)->z_os
;
957 ASSERT(atomic_read(&ip
->i_count
) > 0);
960 if (atomic_read(&ip
->i_count
) == 1)
961 VERIFY(taskq_dispatch(dsl_pool_iput_taskq(dmu_objset_pool(os
)),
962 (task_func_t
*)iput
, ip
, TQ_SLEEP
) != TASKQID_INVALID
);
968 zfs_get_done(zgd_t
*zgd
, int error
)
970 znode_t
*zp
= zgd
->zgd_private
;
973 dmu_buf_rele(zgd
->zgd_db
, zgd
);
975 zfs_range_unlock(zgd
->zgd_rl
);
978 * Release the vnode asynchronously as we currently have the
979 * txg stopped from syncing.
981 zfs_iput_async(ZTOI(zp
));
983 if (error
== 0 && zgd
->zgd_bp
)
984 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
986 kmem_free(zgd
, sizeof (zgd_t
));
990 static int zil_fault_io
= 0;
994 * Get data to generate a TX_WRITE intent log record.
997 zfs_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
999 zfsvfs_t
*zfsvfs
= arg
;
1000 objset_t
*os
= zfsvfs
->z_os
;
1002 uint64_t object
= lr
->lr_foid
;
1003 uint64_t offset
= lr
->lr_offset
;
1004 uint64_t size
= lr
->lr_length
;
1009 ASSERT(zio
!= NULL
);
1013 * Nothing to do if the file has been removed
1015 if (zfs_zget(zfsvfs
, object
, &zp
) != 0)
1016 return (SET_ERROR(ENOENT
));
1017 if (zp
->z_unlinked
) {
1019 * Release the vnode asynchronously as we currently have the
1020 * txg stopped from syncing.
1022 zfs_iput_async(ZTOI(zp
));
1023 return (SET_ERROR(ENOENT
));
1026 zgd
= (zgd_t
*)kmem_zalloc(sizeof (zgd_t
), KM_SLEEP
);
1027 zgd
->zgd_zilog
= zfsvfs
->z_log
;
1028 zgd
->zgd_private
= zp
;
1031 * Write records come in two flavors: immediate and indirect.
1032 * For small writes it's cheaper to store the data with the
1033 * log record (immediate); for large writes it's cheaper to
1034 * sync the data and get a pointer to it (indirect) so that
1035 * we don't have to write the data twice.
1037 if (buf
!= NULL
) { /* immediate write */
1038 zgd
->zgd_rl
= zfs_range_lock(&zp
->z_range_lock
, offset
, size
,
1040 /* test for truncation needs to be done while range locked */
1041 if (offset
>= zp
->z_size
) {
1042 error
= SET_ERROR(ENOENT
);
1044 error
= dmu_read(os
, object
, offset
, size
, buf
,
1045 DMU_READ_NO_PREFETCH
);
1047 ASSERT(error
== 0 || error
== ENOENT
);
1048 } else { /* indirect write */
1050 * Have to lock the whole block to ensure when it's
1051 * written out and it's checksum is being calculated
1052 * that no one can change the data. We need to re-check
1053 * blocksize after we get the lock in case it's changed!
1058 blkoff
= ISP2(size
) ? P2PHASE(offset
, size
) : offset
;
1060 zgd
->zgd_rl
= zfs_range_lock(&zp
->z_range_lock
, offset
,
1062 if (zp
->z_blksz
== size
)
1065 zfs_range_unlock(zgd
->zgd_rl
);
1067 /* test for truncation needs to be done while range locked */
1068 if (lr
->lr_offset
>= zp
->z_size
)
1069 error
= SET_ERROR(ENOENT
);
1072 error
= SET_ERROR(EIO
);
1077 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
1078 DMU_READ_NO_PREFETCH
);
1081 blkptr_t
*bp
= &lr
->lr_blkptr
;
1086 ASSERT(db
->db_offset
== offset
);
1087 ASSERT(db
->db_size
== size
);
1089 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
1091 ASSERT(error
|| lr
->lr_length
<= size
);
1094 * On success, we need to wait for the write I/O
1095 * initiated by dmu_sync() to complete before we can
1096 * release this dbuf. We will finish everything up
1097 * in the zfs_get_done() callback.
1102 if (error
== EALREADY
) {
1103 lr
->lr_common
.lrc_txtype
= TX_WRITE2
;
1109 zfs_get_done(zgd
, error
);
1116 zfs_access(struct inode
*ip
, int mode
, int flag
, cred_t
*cr
)
1118 znode_t
*zp
= ITOZ(ip
);
1119 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
1125 if (flag
& V_ACE_MASK
)
1126 error
= zfs_zaccess(zp
, mode
, flag
, B_FALSE
, cr
);
1128 error
= zfs_zaccess_rwx(zp
, mode
, flag
, cr
);
1135 * Lookup an entry in a directory, or an extended attribute directory.
1136 * If it exists, return a held inode reference for it.
1138 * IN: dip - inode of directory to search.
1139 * nm - name of entry to lookup.
1140 * flags - LOOKUP_XATTR set if looking for an attribute.
1141 * cr - credentials of caller.
1142 * direntflags - directory lookup flags
1143 * realpnp - returned pathname.
1145 * OUT: ipp - inode of located entry, NULL if not found.
1147 * RETURN: 0 on success, error code on failure.
1154 zfs_lookup(struct inode
*dip
, char *nm
, struct inode
**ipp
, int flags
,
1155 cred_t
*cr
, int *direntflags
, pathname_t
*realpnp
)
1157 znode_t
*zdp
= ITOZ(dip
);
1158 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
1162 * Fast path lookup, however we must skip DNLC lookup
1163 * for case folding or normalizing lookups because the
1164 * DNLC code only stores the passed in name. This means
1165 * creating 'a' and removing 'A' on a case insensitive
1166 * file system would work, but DNLC still thinks 'a'
1167 * exists and won't let you create it again on the next
1168 * pass through fast path.
1170 if (!(flags
& (LOOKUP_XATTR
| FIGNORECASE
))) {
1172 if (!S_ISDIR(dip
->i_mode
)) {
1173 return (SET_ERROR(ENOTDIR
));
1174 } else if (zdp
->z_sa_hdl
== NULL
) {
1175 return (SET_ERROR(EIO
));
1178 if (nm
[0] == 0 || (nm
[0] == '.' && nm
[1] == '\0')) {
1179 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1187 } else if (!zdp
->z_zfsvfs
->z_norm
&&
1188 (zdp
->z_zfsvfs
->z_case
== ZFS_CASE_SENSITIVE
)) {
1190 vnode_t
*tvp
= dnlc_lookup(dvp
, nm
);
1193 error
= zfs_fastaccesschk_execute(zdp
, cr
);
1198 if (tvp
== DNLC_NO_VNODE
) {
1200 return (SET_ERROR(ENOENT
));
1203 return (specvp_check(vpp
, cr
));
1206 #endif /* HAVE_DNLC */
1215 if (flags
& LOOKUP_XATTR
) {
1217 * We don't allow recursive attributes..
1218 * Maybe someday we will.
1220 if (zdp
->z_pflags
& ZFS_XATTR
) {
1222 return (SET_ERROR(EINVAL
));
1225 if ((error
= zfs_get_xattrdir(zdp
, ipp
, cr
, flags
))) {
1231 * Do we have permission to get into attribute directory?
1234 if ((error
= zfs_zaccess(ITOZ(*ipp
), ACE_EXECUTE
, 0,
1244 if (!S_ISDIR(dip
->i_mode
)) {
1246 return (SET_ERROR(ENOTDIR
));
1250 * Check accessibility of directory.
1253 if ((error
= zfs_zaccess(zdp
, ACE_EXECUTE
, 0, B_FALSE
, cr
))) {
1258 if (zfsvfs
->z_utf8
&& u8_validate(nm
, strlen(nm
),
1259 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1261 return (SET_ERROR(EILSEQ
));
1264 error
= zfs_dirlook(zdp
, nm
, ipp
, flags
, direntflags
, realpnp
);
1265 if ((error
== 0) && (*ipp
))
1266 zfs_inode_update(ITOZ(*ipp
));
1273 * Attempt to create a new entry in a directory. If the entry
1274 * already exists, truncate the file if permissible, else return
1275 * an error. Return the ip of the created or trunc'd file.
1277 * IN: dip - inode of directory to put new file entry in.
1278 * name - name of new file entry.
1279 * vap - attributes of new file.
1280 * excl - flag indicating exclusive or non-exclusive mode.
1281 * mode - mode to open file with.
1282 * cr - credentials of caller.
1283 * flag - large file flag [UNUSED].
1284 * vsecp - ACL to be set
1286 * OUT: ipp - inode of created or trunc'd entry.
1288 * RETURN: 0 on success, error code on failure.
1291 * dip - ctime|mtime updated if new entry created
1292 * ip - ctime|mtime always, atime if new
1297 zfs_create(struct inode
*dip
, char *name
, vattr_t
*vap
, int excl
,
1298 int mode
, struct inode
**ipp
, cred_t
*cr
, int flag
, vsecattr_t
*vsecp
)
1300 znode_t
*zp
, *dzp
= ITOZ(dip
);
1301 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
1309 zfs_acl_ids_t acl_ids
;
1310 boolean_t fuid_dirtied
;
1311 boolean_t have_acl
= B_FALSE
;
1312 boolean_t waited
= B_FALSE
;
1315 * If we have an ephemeral id, ACL, or XVATTR then
1316 * make sure file system is at proper version
1322 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
1323 (vsecp
|| IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1324 return (SET_ERROR(EINVAL
));
1327 return (SET_ERROR(EINVAL
));
1332 zilog
= zfsvfs
->z_log
;
1334 if (zfsvfs
->z_utf8
&& u8_validate(name
, strlen(name
),
1335 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1337 return (SET_ERROR(EILSEQ
));
1340 if (vap
->va_mask
& ATTR_XVATTR
) {
1341 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1342 crgetuid(cr
), cr
, vap
->va_mode
)) != 0) {
1350 if (*name
== '\0') {
1352 * Null component name refers to the directory itself.
1359 /* possible igrab(zp) */
1362 if (flag
& FIGNORECASE
)
1365 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1369 zfs_acl_ids_free(&acl_ids
);
1370 if (strcmp(name
, "..") == 0)
1371 error
= SET_ERROR(EISDIR
);
1381 * Create a new file object and update the directory
1384 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
1386 zfs_acl_ids_free(&acl_ids
);
1391 * We only support the creation of regular files in
1392 * extended attribute directories.
1395 if ((dzp
->z_pflags
& ZFS_XATTR
) && !S_ISREG(vap
->va_mode
)) {
1397 zfs_acl_ids_free(&acl_ids
);
1398 error
= SET_ERROR(EINVAL
);
1402 if (!have_acl
&& (error
= zfs_acl_ids_create(dzp
, 0, vap
,
1403 cr
, vsecp
, &acl_ids
)) != 0)
1407 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
)) {
1408 zfs_acl_ids_free(&acl_ids
);
1409 error
= SET_ERROR(EDQUOT
);
1413 tx
= dmu_tx_create(os
);
1415 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1416 ZFS_SA_BASE_ATTR_SIZE
);
1418 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
1420 zfs_fuid_txhold(zfsvfs
, tx
);
1421 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
1422 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
1423 if (!zfsvfs
->z_use_sa
&&
1424 acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1425 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
1426 0, acl_ids
.z_aclp
->z_acl_bytes
);
1428 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
1430 zfs_dirent_unlock(dl
);
1431 if (error
== ERESTART
) {
1437 zfs_acl_ids_free(&acl_ids
);
1442 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
1445 zfs_fuid_sync(zfsvfs
, tx
);
1447 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
1448 txtype
= zfs_log_create_txtype(Z_FILE
, vsecp
, vap
);
1449 if (flag
& FIGNORECASE
)
1451 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, name
,
1452 vsecp
, acl_ids
.z_fuidp
, vap
);
1453 zfs_acl_ids_free(&acl_ids
);
1456 int aflags
= (flag
& FAPPEND
) ? V_APPEND
: 0;
1459 zfs_acl_ids_free(&acl_ids
);
1463 * A directory entry already exists for this name.
1466 * Can't truncate an existing file if in exclusive mode.
1469 error
= SET_ERROR(EEXIST
);
1473 * Can't open a directory for writing.
1475 if (S_ISDIR(ZTOI(zp
)->i_mode
)) {
1476 error
= SET_ERROR(EISDIR
);
1480 * Verify requested access to file.
1482 if (mode
&& (error
= zfs_zaccess_rwx(zp
, mode
, aflags
, cr
))) {
1486 mutex_enter(&dzp
->z_lock
);
1488 mutex_exit(&dzp
->z_lock
);
1491 * Truncate regular files if requested.
1493 if (S_ISREG(ZTOI(zp
)->i_mode
) &&
1494 (vap
->va_mask
& ATTR_SIZE
) && (vap
->va_size
== 0)) {
1495 /* we can't hold any locks when calling zfs_freesp() */
1497 zfs_dirent_unlock(dl
);
1500 error
= zfs_freesp(zp
, 0, 0, mode
, TRUE
);
1506 zfs_dirent_unlock(dl
);
1512 zfs_inode_update(dzp
);
1513 zfs_inode_update(zp
);
1517 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1518 zil_commit(zilog
, 0);
1526 zfs_tmpfile(struct inode
*dip
, vattr_t
*vap
, int excl
,
1527 int mode
, struct inode
**ipp
, cred_t
*cr
, int flag
, vsecattr_t
*vsecp
)
1529 znode_t
*zp
= NULL
, *dzp
= ITOZ(dip
);
1530 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
1536 zfs_acl_ids_t acl_ids
;
1537 boolean_t fuid_dirtied
;
1538 boolean_t have_acl
= B_FALSE
;
1539 boolean_t waited
= B_FALSE
;
1542 * If we have an ephemeral id, ACL, or XVATTR then
1543 * make sure file system is at proper version
1549 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
1550 (vsecp
|| IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1551 return (SET_ERROR(EINVAL
));
1557 if (vap
->va_mask
& ATTR_XVATTR
) {
1558 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1559 crgetuid(cr
), cr
, vap
->va_mode
)) != 0) {
1569 * Create a new file object and update the directory
1572 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
1574 zfs_acl_ids_free(&acl_ids
);
1578 if (!have_acl
&& (error
= zfs_acl_ids_create(dzp
, 0, vap
,
1579 cr
, vsecp
, &acl_ids
)) != 0)
1583 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
)) {
1584 zfs_acl_ids_free(&acl_ids
);
1585 error
= SET_ERROR(EDQUOT
);
1589 tx
= dmu_tx_create(os
);
1591 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
1592 ZFS_SA_BASE_ATTR_SIZE
);
1593 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
1595 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
1597 zfs_fuid_txhold(zfsvfs
, tx
);
1598 if (!zfsvfs
->z_use_sa
&&
1599 acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
1600 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
1601 0, acl_ids
.z_aclp
->z_acl_bytes
);
1603 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
1605 if (error
== ERESTART
) {
1611 zfs_acl_ids_free(&acl_ids
);
1616 zfs_mknode(dzp
, vap
, tx
, cr
, IS_TMPFILE
, &zp
, &acl_ids
);
1619 zfs_fuid_sync(zfsvfs
, tx
);
1621 /* Add to unlinked set */
1623 zfs_unlinked_add(zp
, tx
);
1624 zfs_acl_ids_free(&acl_ids
);
1632 zfs_inode_update(dzp
);
1633 zfs_inode_update(zp
);
1642 * Remove an entry from a directory.
1644 * IN: dip - inode of directory to remove entry from.
1645 * name - name of entry to remove.
1646 * cr - credentials of caller.
1648 * RETURN: 0 if success
1649 * error code if failure
1653 * ip - ctime (if nlink > 0)
1656 uint64_t null_xattr
= 0;
1660 zfs_remove(struct inode
*dip
, char *name
, cred_t
*cr
, int flags
)
1662 znode_t
*zp
, *dzp
= ITOZ(dip
);
1665 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
1667 uint64_t acl_obj
, xattr_obj
;
1668 uint64_t xattr_obj_unlinked
= 0;
1673 boolean_t may_delete_now
, delete_now
= FALSE
;
1674 boolean_t unlinked
, toobig
= FALSE
;
1676 pathname_t
*realnmp
= NULL
;
1680 boolean_t waited
= B_FALSE
;
1683 return (SET_ERROR(EINVAL
));
1687 zilog
= zfsvfs
->z_log
;
1689 if (flags
& FIGNORECASE
) {
1699 * Attempt to lock directory; fail if entry doesn't exist.
1701 if ((error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
1711 if ((error
= zfs_zaccess_delete(dzp
, zp
, cr
))) {
1716 * Need to use rmdir for removing directories.
1718 if (S_ISDIR(ip
->i_mode
)) {
1719 error
= SET_ERROR(EPERM
);
1725 dnlc_remove(dvp
, realnmp
->pn_buf
);
1727 dnlc_remove(dvp
, name
);
1728 #endif /* HAVE_DNLC */
1730 mutex_enter(&zp
->z_lock
);
1731 may_delete_now
= atomic_read(&ip
->i_count
) == 1 && !(zp
->z_is_mapped
);
1732 mutex_exit(&zp
->z_lock
);
1735 * We may delete the znode now, or we may put it in the unlinked set;
1736 * it depends on whether we're the last link, and on whether there are
1737 * other holds on the inode. So we dmu_tx_hold() the right things to
1738 * allow for either case.
1741 tx
= dmu_tx_create(zfsvfs
->z_os
);
1742 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
1743 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
1744 zfs_sa_upgrade_txholds(tx
, zp
);
1745 zfs_sa_upgrade_txholds(tx
, dzp
);
1746 if (may_delete_now
) {
1747 toobig
= zp
->z_size
> zp
->z_blksz
* zfs_delete_blocks
;
1748 /* if the file is too big, only hold_free a token amount */
1749 dmu_tx_hold_free(tx
, zp
->z_id
, 0,
1750 (toobig
? DMU_MAX_ACCESS
: DMU_OBJECT_END
));
1753 /* are there any extended attributes? */
1754 error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
1755 &xattr_obj
, sizeof (xattr_obj
));
1756 if (error
== 0 && xattr_obj
) {
1757 error
= zfs_zget(zfsvfs
, xattr_obj
, &xzp
);
1759 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
1760 dmu_tx_hold_sa(tx
, xzp
->z_sa_hdl
, B_FALSE
);
1763 mutex_enter(&zp
->z_lock
);
1764 if ((acl_obj
= zfs_external_acl(zp
)) != 0 && may_delete_now
)
1765 dmu_tx_hold_free(tx
, acl_obj
, 0, DMU_OBJECT_END
);
1766 mutex_exit(&zp
->z_lock
);
1768 /* charge as an update -- would be nice not to charge at all */
1769 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
1772 * Mark this transaction as typically resulting in a net free of space
1774 dmu_tx_mark_netfree(tx
);
1776 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
1778 zfs_dirent_unlock(dl
);
1779 if (error
== ERESTART
) {
1799 * Remove the directory entry.
1801 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, &unlinked
);
1810 * Hold z_lock so that we can make sure that the ACL obj
1811 * hasn't changed. Could have been deleted due to
1814 mutex_enter(&zp
->z_lock
);
1815 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
1816 &xattr_obj_unlinked
, sizeof (xattr_obj_unlinked
));
1817 delete_now
= may_delete_now
&& !toobig
&&
1818 atomic_read(&ip
->i_count
) == 1 && !(zp
->z_is_mapped
) &&
1819 xattr_obj
== xattr_obj_unlinked
&& zfs_external_acl(zp
) ==
1824 if (xattr_obj_unlinked
) {
1825 ASSERT3U(ZTOI(xzp
)->i_nlink
, ==, 2);
1826 mutex_enter(&xzp
->z_lock
);
1827 xzp
->z_unlinked
= 1;
1828 clear_nlink(ZTOI(xzp
));
1830 error
= sa_update(xzp
->z_sa_hdl
, SA_ZPL_LINKS(zfsvfs
),
1831 &links
, sizeof (links
), tx
);
1832 ASSERT3U(error
, ==, 0);
1833 mutex_exit(&xzp
->z_lock
);
1834 zfs_unlinked_add(xzp
, tx
);
1837 error
= sa_remove(zp
->z_sa_hdl
,
1838 SA_ZPL_XATTR(zfsvfs
), tx
);
1840 error
= sa_update(zp
->z_sa_hdl
,
1841 SA_ZPL_XATTR(zfsvfs
), &null_xattr
,
1842 sizeof (uint64_t), tx
);
1846 * Add to the unlinked set because a new reference could be
1847 * taken concurrently resulting in a deferred destruction.
1849 zfs_unlinked_add(zp
, tx
);
1850 mutex_exit(&zp
->z_lock
);
1851 } else if (unlinked
) {
1852 mutex_exit(&zp
->z_lock
);
1853 zfs_unlinked_add(zp
, tx
);
1857 if (flags
& FIGNORECASE
)
1859 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, obj
);
1866 zfs_dirent_unlock(dl
);
1867 zfs_inode_update(dzp
);
1868 zfs_inode_update(zp
);
1876 zfs_inode_update(xzp
);
1877 zfs_iput_async(ZTOI(xzp
));
1880 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
1881 zil_commit(zilog
, 0);
1888 * Create a new directory and insert it into dip using the name
1889 * provided. Return a pointer to the inserted directory.
1891 * IN: dip - inode of directory to add subdir to.
1892 * dirname - name of new directory.
1893 * vap - attributes of new directory.
1894 * cr - credentials of caller.
1895 * vsecp - ACL to be set
1897 * OUT: ipp - inode of created directory.
1899 * RETURN: 0 if success
1900 * error code if failure
1903 * dip - ctime|mtime updated
1904 * ipp - ctime|mtime|atime updated
1908 zfs_mkdir(struct inode
*dip
, char *dirname
, vattr_t
*vap
, struct inode
**ipp
,
1909 cred_t
*cr
, int flags
, vsecattr_t
*vsecp
)
1911 znode_t
*zp
, *dzp
= ITOZ(dip
);
1912 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
1920 gid_t gid
= crgetgid(cr
);
1921 zfs_acl_ids_t acl_ids
;
1922 boolean_t fuid_dirtied
;
1923 boolean_t waited
= B_FALSE
;
1925 ASSERT(S_ISDIR(vap
->va_mode
));
1928 * If we have an ephemeral id, ACL, or XVATTR then
1929 * make sure file system is at proper version
1933 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
1934 (vsecp
|| IS_EPHEMERAL(uid
) || IS_EPHEMERAL(gid
)))
1935 return (SET_ERROR(EINVAL
));
1937 if (dirname
== NULL
)
1938 return (SET_ERROR(EINVAL
));
1942 zilog
= zfsvfs
->z_log
;
1944 if (dzp
->z_pflags
& ZFS_XATTR
) {
1946 return (SET_ERROR(EINVAL
));
1949 if (zfsvfs
->z_utf8
&& u8_validate(dirname
,
1950 strlen(dirname
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
1952 return (SET_ERROR(EILSEQ
));
1954 if (flags
& FIGNORECASE
)
1957 if (vap
->va_mask
& ATTR_XVATTR
) {
1958 if ((error
= secpolicy_xvattr((xvattr_t
*)vap
,
1959 crgetuid(cr
), cr
, vap
->va_mode
)) != 0) {
1965 if ((error
= zfs_acl_ids_create(dzp
, 0, vap
, cr
,
1966 vsecp
, &acl_ids
)) != 0) {
1971 * First make sure the new directory doesn't exist.
1973 * Existence is checked first to make sure we don't return
1974 * EACCES instead of EEXIST which can cause some applications
1980 if ((error
= zfs_dirent_lock(&dl
, dzp
, dirname
, &zp
, zf
,
1982 zfs_acl_ids_free(&acl_ids
);
1987 if ((error
= zfs_zaccess(dzp
, ACE_ADD_SUBDIRECTORY
, 0, B_FALSE
, cr
))) {
1988 zfs_acl_ids_free(&acl_ids
);
1989 zfs_dirent_unlock(dl
);
1994 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
)) {
1995 zfs_acl_ids_free(&acl_ids
);
1996 zfs_dirent_unlock(dl
);
1998 return (SET_ERROR(EDQUOT
));
2002 * Add a new entry to the directory.
2004 tx
= dmu_tx_create(zfsvfs
->z_os
);
2005 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, dirname
);
2006 dmu_tx_hold_zap(tx
, DMU_NEW_OBJECT
, FALSE
, NULL
);
2007 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
2009 zfs_fuid_txhold(zfsvfs
, tx
);
2010 if (!zfsvfs
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
2011 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
2012 acl_ids
.z_aclp
->z_acl_bytes
);
2015 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
2016 ZFS_SA_BASE_ATTR_SIZE
);
2018 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
2020 zfs_dirent_unlock(dl
);
2021 if (error
== ERESTART
) {
2027 zfs_acl_ids_free(&acl_ids
);
2036 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
2039 zfs_fuid_sync(zfsvfs
, tx
);
2042 * Now put new name in parent dir.
2044 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
2048 txtype
= zfs_log_create_txtype(Z_DIR
, vsecp
, vap
);
2049 if (flags
& FIGNORECASE
)
2051 zfs_log_create(zilog
, tx
, txtype
, dzp
, zp
, dirname
, vsecp
,
2052 acl_ids
.z_fuidp
, vap
);
2054 zfs_acl_ids_free(&acl_ids
);
2058 zfs_dirent_unlock(dl
);
2060 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
2061 zil_commit(zilog
, 0);
2063 zfs_inode_update(dzp
);
2064 zfs_inode_update(zp
);
2070 * Remove a directory subdir entry. If the current working
2071 * directory is the same as the subdir to be removed, the
2074 * IN: dip - inode of directory to remove from.
2075 * name - name of directory to be removed.
2076 * cwd - inode of current working directory.
2077 * cr - credentials of caller.
2078 * flags - case flags
2080 * RETURN: 0 on success, error code on failure.
2083 * dip - ctime|mtime updated
2087 zfs_rmdir(struct inode
*dip
, char *name
, struct inode
*cwd
, cred_t
*cr
,
2090 znode_t
*dzp
= ITOZ(dip
);
2093 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
2099 boolean_t waited
= B_FALSE
;
2102 return (SET_ERROR(EINVAL
));
2106 zilog
= zfsvfs
->z_log
;
2108 if (flags
& FIGNORECASE
)
2114 * Attempt to lock directory; fail if entry doesn't exist.
2116 if ((error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
,
2124 if ((error
= zfs_zaccess_delete(dzp
, zp
, cr
))) {
2128 if (!S_ISDIR(ip
->i_mode
)) {
2129 error
= SET_ERROR(ENOTDIR
);
2134 error
= SET_ERROR(EINVAL
);
2139 * Grab a lock on the directory to make sure that no one is
2140 * trying to add (or lookup) entries while we are removing it.
2142 rw_enter(&zp
->z_name_lock
, RW_WRITER
);
2145 * Grab a lock on the parent pointer to make sure we play well
2146 * with the treewalk and directory rename code.
2148 rw_enter(&zp
->z_parent_lock
, RW_WRITER
);
2150 tx
= dmu_tx_create(zfsvfs
->z_os
);
2151 dmu_tx_hold_zap(tx
, dzp
->z_id
, FALSE
, name
);
2152 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
2153 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
2154 zfs_sa_upgrade_txholds(tx
, zp
);
2155 zfs_sa_upgrade_txholds(tx
, dzp
);
2156 dmu_tx_mark_netfree(tx
);
2157 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
2159 rw_exit(&zp
->z_parent_lock
);
2160 rw_exit(&zp
->z_name_lock
);
2161 zfs_dirent_unlock(dl
);
2162 if (error
== ERESTART
) {
2175 error
= zfs_link_destroy(dl
, zp
, tx
, zflg
, NULL
);
2178 uint64_t txtype
= TX_RMDIR
;
2179 if (flags
& FIGNORECASE
)
2181 zfs_log_remove(zilog
, tx
, txtype
, dzp
, name
, ZFS_NO_OBJECT
);
2186 rw_exit(&zp
->z_parent_lock
);
2187 rw_exit(&zp
->z_name_lock
);
2189 zfs_dirent_unlock(dl
);
2191 zfs_inode_update(dzp
);
2192 zfs_inode_update(zp
);
2195 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
2196 zil_commit(zilog
, 0);
2203 * Read as many directory entries as will fit into the provided
2204 * dirent buffer from the given directory cursor position.
2206 * IN: ip - inode of directory to read.
2207 * dirent - buffer for directory entries.
2209 * OUT: dirent - filler buffer of directory entries.
2211 * RETURN: 0 if success
2212 * error code if failure
2215 * ip - atime updated
2217 * Note that the low 4 bits of the cookie returned by zap is always zero.
2218 * This allows us to use the low range for "special" directory entries:
2219 * We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
2220 * we use the offset 2 for the '.zfs' directory.
2224 zfs_readdir(struct inode
*ip
, struct dir_context
*ctx
, cred_t
*cr
)
2226 znode_t
*zp
= ITOZ(ip
);
2227 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
2230 zap_attribute_t zap
;
2236 uint64_t offset
; /* must be unsigned; checks for < 1 */
2241 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(zfsvfs
),
2242 &parent
, sizeof (parent
))) != 0)
2246 * Quit if directory has been removed (posix)
2254 prefetch
= zp
->z_zn_prefetch
;
2257 * Initialize the iterator cursor.
2261 * Start iteration from the beginning of the directory.
2263 zap_cursor_init(&zc
, os
, zp
->z_id
);
2266 * The offset is a serialized cursor.
2268 zap_cursor_init_serialized(&zc
, os
, zp
->z_id
, offset
);
2272 * Transform to file-system independent format
2277 * Special case `.', `..', and `.zfs'.
2280 (void) strcpy(zap
.za_name
, ".");
2281 zap
.za_normalization_conflict
= 0;
2284 } else if (offset
== 1) {
2285 (void) strcpy(zap
.za_name
, "..");
2286 zap
.za_normalization_conflict
= 0;
2289 } else if (offset
== 2 && zfs_show_ctldir(zp
)) {
2290 (void) strcpy(zap
.za_name
, ZFS_CTLDIR_NAME
);
2291 zap
.za_normalization_conflict
= 0;
2292 objnum
= ZFSCTL_INO_ROOT
;
2298 if ((error
= zap_cursor_retrieve(&zc
, &zap
))) {
2299 if (error
== ENOENT
)
2306 * Allow multiple entries provided the first entry is
2307 * the object id. Non-zpl consumers may safely make
2308 * use of the additional space.
2310 * XXX: This should be a feature flag for compatibility
2312 if (zap
.za_integer_length
!= 8 ||
2313 zap
.za_num_integers
== 0) {
2314 cmn_err(CE_WARN
, "zap_readdir: bad directory "
2315 "entry, obj = %lld, offset = %lld, "
2316 "length = %d, num = %lld\n",
2317 (u_longlong_t
)zp
->z_id
,
2318 (u_longlong_t
)offset
,
2319 zap
.za_integer_length
,
2320 (u_longlong_t
)zap
.za_num_integers
);
2321 error
= SET_ERROR(ENXIO
);
2325 objnum
= ZFS_DIRENT_OBJ(zap
.za_first_integer
);
2326 type
= ZFS_DIRENT_TYPE(zap
.za_first_integer
);
2329 done
= !dir_emit(ctx
, zap
.za_name
, strlen(zap
.za_name
),
2334 /* Prefetch znode */
2336 dmu_prefetch(os
, objnum
, 0, 0, 0,
2337 ZIO_PRIORITY_SYNC_READ
);
2341 * Move to the next entry, fill in the previous offset.
2343 if (offset
> 2 || (offset
== 2 && !zfs_show_ctldir(zp
))) {
2344 zap_cursor_advance(&zc
);
2345 offset
= zap_cursor_serialize(&zc
);
2351 zp
->z_zn_prefetch
= B_FALSE
; /* a lookup will re-enable pre-fetching */
2354 zap_cursor_fini(&zc
);
2355 if (error
== ENOENT
)
2363 ulong_t zfs_fsync_sync_cnt
= 4;
2366 zfs_fsync(struct inode
*ip
, int syncflag
, cred_t
*cr
)
2368 znode_t
*zp
= ITOZ(ip
);
2369 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
2371 (void) tsd_set(zfs_fsyncer_key
, (void *)zfs_fsync_sync_cnt
);
2373 if (zfsvfs
->z_os
->os_sync
!= ZFS_SYNC_DISABLED
) {
2376 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
2379 tsd_set(zfs_fsyncer_key
, NULL
);
2386 * Get the requested file attributes and place them in the provided
2389 * IN: ip - inode of file.
2390 * vap - va_mask identifies requested attributes.
2391 * If ATTR_XVATTR set, then optional attrs are requested
2392 * flags - ATTR_NOACLCHECK (CIFS server context)
2393 * cr - credentials of caller.
2395 * OUT: vap - attribute values.
2397 * RETURN: 0 (always succeeds)
2401 zfs_getattr(struct inode
*ip
, vattr_t
*vap
, int flags
, cred_t
*cr
)
2403 znode_t
*zp
= ITOZ(ip
);
2404 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
2407 uint64_t atime
[2], mtime
[2], ctime
[2];
2408 xvattr_t
*xvap
= (xvattr_t
*)vap
; /* vap may be an xvattr_t * */
2409 xoptattr_t
*xoap
= NULL
;
2410 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2411 sa_bulk_attr_t bulk
[3];
2417 zfs_fuid_map_ids(zp
, cr
, &vap
->va_uid
, &vap
->va_gid
);
2419 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
, &atime
, 16);
2420 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
2421 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
2423 if ((error
= sa_bulk_lookup(zp
->z_sa_hdl
, bulk
, count
)) != 0) {
2429 * If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
2430 * Also, if we are the owner don't bother, since owner should
2431 * always be allowed to read basic attributes of file.
2433 if (!(zp
->z_pflags
& ZFS_ACL_TRIVIAL
) &&
2434 (vap
->va_uid
!= crgetuid(cr
))) {
2435 if ((error
= zfs_zaccess(zp
, ACE_READ_ATTRIBUTES
, 0,
2443 * Return all attributes. It's cheaper to provide the answer
2444 * than to determine whether we were asked the question.
2447 mutex_enter(&zp
->z_lock
);
2448 vap
->va_type
= vn_mode_to_vtype(zp
->z_mode
);
2449 vap
->va_mode
= zp
->z_mode
;
2450 vap
->va_fsid
= ZTOI(zp
)->i_sb
->s_dev
;
2451 vap
->va_nodeid
= zp
->z_id
;
2452 if ((zp
->z_id
== zfsvfs
->z_root
) && zfs_show_ctldir(zp
))
2453 links
= ZTOI(zp
)->i_nlink
+ 1;
2455 links
= ZTOI(zp
)->i_nlink
;
2456 vap
->va_nlink
= MIN(links
, ZFS_LINK_MAX
);
2457 vap
->va_size
= i_size_read(ip
);
2458 vap
->va_rdev
= ip
->i_rdev
;
2459 vap
->va_seq
= ip
->i_generation
;
2462 * Add in any requested optional attributes and the create time.
2463 * Also set the corresponding bits in the returned attribute bitmap.
2465 if ((xoap
= xva_getxoptattr(xvap
)) != NULL
&& zfsvfs
->z_use_fuids
) {
2466 if (XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
)) {
2468 ((zp
->z_pflags
& ZFS_ARCHIVE
) != 0);
2469 XVA_SET_RTN(xvap
, XAT_ARCHIVE
);
2472 if (XVA_ISSET_REQ(xvap
, XAT_READONLY
)) {
2473 xoap
->xoa_readonly
=
2474 ((zp
->z_pflags
& ZFS_READONLY
) != 0);
2475 XVA_SET_RTN(xvap
, XAT_READONLY
);
2478 if (XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)) {
2480 ((zp
->z_pflags
& ZFS_SYSTEM
) != 0);
2481 XVA_SET_RTN(xvap
, XAT_SYSTEM
);
2484 if (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
)) {
2486 ((zp
->z_pflags
& ZFS_HIDDEN
) != 0);
2487 XVA_SET_RTN(xvap
, XAT_HIDDEN
);
2490 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
2491 xoap
->xoa_nounlink
=
2492 ((zp
->z_pflags
& ZFS_NOUNLINK
) != 0);
2493 XVA_SET_RTN(xvap
, XAT_NOUNLINK
);
2496 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
2497 xoap
->xoa_immutable
=
2498 ((zp
->z_pflags
& ZFS_IMMUTABLE
) != 0);
2499 XVA_SET_RTN(xvap
, XAT_IMMUTABLE
);
2502 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
2503 xoap
->xoa_appendonly
=
2504 ((zp
->z_pflags
& ZFS_APPENDONLY
) != 0);
2505 XVA_SET_RTN(xvap
, XAT_APPENDONLY
);
2508 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
2510 ((zp
->z_pflags
& ZFS_NODUMP
) != 0);
2511 XVA_SET_RTN(xvap
, XAT_NODUMP
);
2514 if (XVA_ISSET_REQ(xvap
, XAT_OPAQUE
)) {
2516 ((zp
->z_pflags
& ZFS_OPAQUE
) != 0);
2517 XVA_SET_RTN(xvap
, XAT_OPAQUE
);
2520 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
2521 xoap
->xoa_av_quarantined
=
2522 ((zp
->z_pflags
& ZFS_AV_QUARANTINED
) != 0);
2523 XVA_SET_RTN(xvap
, XAT_AV_QUARANTINED
);
2526 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
2527 xoap
->xoa_av_modified
=
2528 ((zp
->z_pflags
& ZFS_AV_MODIFIED
) != 0);
2529 XVA_SET_RTN(xvap
, XAT_AV_MODIFIED
);
2532 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
) &&
2533 S_ISREG(ip
->i_mode
)) {
2534 zfs_sa_get_scanstamp(zp
, xvap
);
2537 if (XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)) {
2540 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_CRTIME(zfsvfs
),
2541 times
, sizeof (times
));
2542 ZFS_TIME_DECODE(&xoap
->xoa_createtime
, times
);
2543 XVA_SET_RTN(xvap
, XAT_CREATETIME
);
2546 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
2547 xoap
->xoa_reparse
= ((zp
->z_pflags
& ZFS_REPARSE
) != 0);
2548 XVA_SET_RTN(xvap
, XAT_REPARSE
);
2550 if (XVA_ISSET_REQ(xvap
, XAT_GEN
)) {
2551 xoap
->xoa_generation
= ip
->i_generation
;
2552 XVA_SET_RTN(xvap
, XAT_GEN
);
2555 if (XVA_ISSET_REQ(xvap
, XAT_OFFLINE
)) {
2557 ((zp
->z_pflags
& ZFS_OFFLINE
) != 0);
2558 XVA_SET_RTN(xvap
, XAT_OFFLINE
);
2561 if (XVA_ISSET_REQ(xvap
, XAT_SPARSE
)) {
2563 ((zp
->z_pflags
& ZFS_SPARSE
) != 0);
2564 XVA_SET_RTN(xvap
, XAT_SPARSE
);
2568 ZFS_TIME_DECODE(&vap
->va_atime
, atime
);
2569 ZFS_TIME_DECODE(&vap
->va_mtime
, mtime
);
2570 ZFS_TIME_DECODE(&vap
->va_ctime
, ctime
);
2572 mutex_exit(&zp
->z_lock
);
2574 sa_object_size(zp
->z_sa_hdl
, &vap
->va_blksize
, &vap
->va_nblocks
);
2576 if (zp
->z_blksz
== 0) {
2578 * Block size hasn't been set; suggest maximal I/O transfers.
2580 vap
->va_blksize
= zfsvfs
->z_max_blksz
;
2588 * Get the basic file attributes and place them in the provided kstat
2589 * structure. The inode is assumed to be the authoritative source
2590 * for most of the attributes. However, the znode currently has the
2591 * authoritative atime, blksize, and block count.
2593 * IN: ip - inode of file.
2595 * OUT: sp - kstat values.
2597 * RETURN: 0 (always succeeds)
2601 zfs_getattr_fast(struct inode
*ip
, struct kstat
*sp
)
2603 znode_t
*zp
= ITOZ(ip
);
2604 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
2606 u_longlong_t nblocks
;
2611 mutex_enter(&zp
->z_lock
);
2613 generic_fillattr(ip
, sp
);
2615 sa_object_size(zp
->z_sa_hdl
, &blksize
, &nblocks
);
2616 sp
->blksize
= blksize
;
2617 sp
->blocks
= nblocks
;
2619 if (unlikely(zp
->z_blksz
== 0)) {
2621 * Block size hasn't been set; suggest maximal I/O transfers.
2623 sp
->blksize
= zfsvfs
->z_max_blksz
;
2626 mutex_exit(&zp
->z_lock
);
2629 * Required to prevent NFS client from detecting different inode
2630 * numbers of snapshot root dentry before and after snapshot mount.
2632 if (zfsvfs
->z_issnap
) {
2633 if (ip
->i_sb
->s_root
->d_inode
== ip
)
2634 sp
->ino
= ZFSCTL_INO_SNAPDIRS
-
2635 dmu_objset_id(zfsvfs
->z_os
);
2644 * Set the file attributes to the values contained in the
2647 * IN: ip - inode of file to be modified.
2648 * vap - new attribute values.
2649 * If ATTR_XVATTR set, then optional attrs are being set
2650 * flags - ATTR_UTIME set if non-default time values provided.
2651 * - ATTR_NOACLCHECK (CIFS context only).
2652 * cr - credentials of caller.
2654 * RETURN: 0 if success
2655 * error code if failure
2658 * ip - ctime updated, mtime updated if size changed.
2662 zfs_setattr(struct inode
*ip
, vattr_t
*vap
, int flags
, cred_t
*cr
)
2664 znode_t
*zp
= ITOZ(ip
);
2665 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
2669 xvattr_t
*tmpxvattr
;
2670 uint_t mask
= vap
->va_mask
;
2671 uint_t saved_mask
= 0;
2674 uint64_t new_kuid
= 0, new_kgid
= 0, new_uid
, new_gid
;
2676 uint64_t mtime
[2], ctime
[2], atime
[2];
2678 int need_policy
= FALSE
;
2680 zfs_fuid_info_t
*fuidp
= NULL
;
2681 xvattr_t
*xvap
= (xvattr_t
*)vap
; /* vap may be an xvattr_t * */
2684 boolean_t skipaclchk
= (flags
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
2685 boolean_t fuid_dirtied
= B_FALSE
;
2686 sa_bulk_attr_t
*bulk
, *xattr_bulk
;
2687 int count
= 0, xattr_count
= 0;
2695 zilog
= zfsvfs
->z_log
;
2698 * Make sure that if we have ephemeral uid/gid or xvattr specified
2699 * that file system is at proper version level
2702 if (zfsvfs
->z_use_fuids
== B_FALSE
&&
2703 (((mask
& ATTR_UID
) && IS_EPHEMERAL(vap
->va_uid
)) ||
2704 ((mask
& ATTR_GID
) && IS_EPHEMERAL(vap
->va_gid
)) ||
2705 (mask
& ATTR_XVATTR
))) {
2707 return (SET_ERROR(EINVAL
));
2710 if (mask
& ATTR_SIZE
&& S_ISDIR(ip
->i_mode
)) {
2712 return (SET_ERROR(EISDIR
));
2715 if (mask
& ATTR_SIZE
&& !S_ISREG(ip
->i_mode
) && !S_ISFIFO(ip
->i_mode
)) {
2717 return (SET_ERROR(EINVAL
));
2721 * If this is an xvattr_t, then get a pointer to the structure of
2722 * optional attributes. If this is NULL, then we have a vattr_t.
2724 xoap
= xva_getxoptattr(xvap
);
2726 tmpxvattr
= kmem_alloc(sizeof (xvattr_t
), KM_SLEEP
);
2727 xva_init(tmpxvattr
);
2729 bulk
= kmem_alloc(sizeof (sa_bulk_attr_t
) * 7, KM_SLEEP
);
2730 xattr_bulk
= kmem_alloc(sizeof (sa_bulk_attr_t
) * 7, KM_SLEEP
);
2733 * Immutable files can only alter immutable bit and atime
2735 if ((zp
->z_pflags
& ZFS_IMMUTABLE
) &&
2736 ((mask
& (ATTR_SIZE
|ATTR_UID
|ATTR_GID
|ATTR_MTIME
|ATTR_MODE
)) ||
2737 ((mask
& ATTR_XVATTR
) && XVA_ISSET_REQ(xvap
, XAT_CREATETIME
)))) {
2738 err
= SET_ERROR(EPERM
);
2742 if ((mask
& ATTR_SIZE
) && (zp
->z_pflags
& ZFS_READONLY
)) {
2743 err
= SET_ERROR(EPERM
);
2748 * Verify timestamps doesn't overflow 32 bits.
2749 * ZFS can handle large timestamps, but 32bit syscalls can't
2750 * handle times greater than 2039. This check should be removed
2751 * once large timestamps are fully supported.
2753 if (mask
& (ATTR_ATIME
| ATTR_MTIME
)) {
2754 if (((mask
& ATTR_ATIME
) &&
2755 TIMESPEC_OVERFLOW(&vap
->va_atime
)) ||
2756 ((mask
& ATTR_MTIME
) &&
2757 TIMESPEC_OVERFLOW(&vap
->va_mtime
))) {
2758 err
= SET_ERROR(EOVERFLOW
);
2767 /* Can this be moved to before the top label? */
2768 if (zfs_is_readonly(zfsvfs
)) {
2769 err
= SET_ERROR(EROFS
);
2774 * First validate permissions
2777 if (mask
& ATTR_SIZE
) {
2778 err
= zfs_zaccess(zp
, ACE_WRITE_DATA
, 0, skipaclchk
, cr
);
2783 * XXX - Note, we are not providing any open
2784 * mode flags here (like FNDELAY), so we may
2785 * block if there are locks present... this
2786 * should be addressed in openat().
2788 /* XXX - would it be OK to generate a log record here? */
2789 err
= zfs_freesp(zp
, vap
->va_size
, 0, 0, FALSE
);
2794 if (mask
& (ATTR_ATIME
|ATTR_MTIME
) ||
2795 ((mask
& ATTR_XVATTR
) && (XVA_ISSET_REQ(xvap
, XAT_HIDDEN
) ||
2796 XVA_ISSET_REQ(xvap
, XAT_READONLY
) ||
2797 XVA_ISSET_REQ(xvap
, XAT_ARCHIVE
) ||
2798 XVA_ISSET_REQ(xvap
, XAT_OFFLINE
) ||
2799 XVA_ISSET_REQ(xvap
, XAT_SPARSE
) ||
2800 XVA_ISSET_REQ(xvap
, XAT_CREATETIME
) ||
2801 XVA_ISSET_REQ(xvap
, XAT_SYSTEM
)))) {
2802 need_policy
= zfs_zaccess(zp
, ACE_WRITE_ATTRIBUTES
, 0,
2806 if (mask
& (ATTR_UID
|ATTR_GID
)) {
2807 int idmask
= (mask
& (ATTR_UID
|ATTR_GID
));
2812 * NOTE: even if a new mode is being set,
2813 * we may clear S_ISUID/S_ISGID bits.
2816 if (!(mask
& ATTR_MODE
))
2817 vap
->va_mode
= zp
->z_mode
;
2820 * Take ownership or chgrp to group we are a member of
2823 take_owner
= (mask
& ATTR_UID
) && (vap
->va_uid
== crgetuid(cr
));
2824 take_group
= (mask
& ATTR_GID
) &&
2825 zfs_groupmember(zfsvfs
, vap
->va_gid
, cr
);
2828 * If both ATTR_UID and ATTR_GID are set then take_owner and
2829 * take_group must both be set in order to allow taking
2832 * Otherwise, send the check through secpolicy_vnode_setattr()
2836 if (((idmask
== (ATTR_UID
|ATTR_GID
)) &&
2837 take_owner
&& take_group
) ||
2838 ((idmask
== ATTR_UID
) && take_owner
) ||
2839 ((idmask
== ATTR_GID
) && take_group
)) {
2840 if (zfs_zaccess(zp
, ACE_WRITE_OWNER
, 0,
2841 skipaclchk
, cr
) == 0) {
2843 * Remove setuid/setgid for non-privileged users
2845 (void) secpolicy_setid_clear(vap
, cr
);
2846 trim_mask
= (mask
& (ATTR_UID
|ATTR_GID
));
2855 mutex_enter(&zp
->z_lock
);
2856 oldva
.va_mode
= zp
->z_mode
;
2857 zfs_fuid_map_ids(zp
, cr
, &oldva
.va_uid
, &oldva
.va_gid
);
2858 if (mask
& ATTR_XVATTR
) {
2860 * Update xvattr mask to include only those attributes
2861 * that are actually changing.
2863 * the bits will be restored prior to actually setting
2864 * the attributes so the caller thinks they were set.
2866 if (XVA_ISSET_REQ(xvap
, XAT_APPENDONLY
)) {
2867 if (xoap
->xoa_appendonly
!=
2868 ((zp
->z_pflags
& ZFS_APPENDONLY
) != 0)) {
2871 XVA_CLR_REQ(xvap
, XAT_APPENDONLY
);
2872 XVA_SET_REQ(tmpxvattr
, XAT_APPENDONLY
);
2876 if (XVA_ISSET_REQ(xvap
, XAT_NOUNLINK
)) {
2877 if (xoap
->xoa_nounlink
!=
2878 ((zp
->z_pflags
& ZFS_NOUNLINK
) != 0)) {
2881 XVA_CLR_REQ(xvap
, XAT_NOUNLINK
);
2882 XVA_SET_REQ(tmpxvattr
, XAT_NOUNLINK
);
2886 if (XVA_ISSET_REQ(xvap
, XAT_IMMUTABLE
)) {
2887 if (xoap
->xoa_immutable
!=
2888 ((zp
->z_pflags
& ZFS_IMMUTABLE
) != 0)) {
2891 XVA_CLR_REQ(xvap
, XAT_IMMUTABLE
);
2892 XVA_SET_REQ(tmpxvattr
, XAT_IMMUTABLE
);
2896 if (XVA_ISSET_REQ(xvap
, XAT_NODUMP
)) {
2897 if (xoap
->xoa_nodump
!=
2898 ((zp
->z_pflags
& ZFS_NODUMP
) != 0)) {
2901 XVA_CLR_REQ(xvap
, XAT_NODUMP
);
2902 XVA_SET_REQ(tmpxvattr
, XAT_NODUMP
);
2906 if (XVA_ISSET_REQ(xvap
, XAT_AV_MODIFIED
)) {
2907 if (xoap
->xoa_av_modified
!=
2908 ((zp
->z_pflags
& ZFS_AV_MODIFIED
) != 0)) {
2911 XVA_CLR_REQ(xvap
, XAT_AV_MODIFIED
);
2912 XVA_SET_REQ(tmpxvattr
, XAT_AV_MODIFIED
);
2916 if (XVA_ISSET_REQ(xvap
, XAT_AV_QUARANTINED
)) {
2917 if ((!S_ISREG(ip
->i_mode
) &&
2918 xoap
->xoa_av_quarantined
) ||
2919 xoap
->xoa_av_quarantined
!=
2920 ((zp
->z_pflags
& ZFS_AV_QUARANTINED
) != 0)) {
2923 XVA_CLR_REQ(xvap
, XAT_AV_QUARANTINED
);
2924 XVA_SET_REQ(tmpxvattr
, XAT_AV_QUARANTINED
);
2928 if (XVA_ISSET_REQ(xvap
, XAT_REPARSE
)) {
2929 mutex_exit(&zp
->z_lock
);
2930 err
= SET_ERROR(EPERM
);
2934 if (need_policy
== FALSE
&&
2935 (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
) ||
2936 XVA_ISSET_REQ(xvap
, XAT_OPAQUE
))) {
2941 mutex_exit(&zp
->z_lock
);
2943 if (mask
& ATTR_MODE
) {
2944 if (zfs_zaccess(zp
, ACE_WRITE_ACL
, 0, skipaclchk
, cr
) == 0) {
2945 err
= secpolicy_setid_setsticky_clear(ip
, vap
,
2950 trim_mask
|= ATTR_MODE
;
2958 * If trim_mask is set then take ownership
2959 * has been granted or write_acl is present and user
2960 * has the ability to modify mode. In that case remove
2961 * UID|GID and or MODE from mask so that
2962 * secpolicy_vnode_setattr() doesn't revoke it.
2966 saved_mask
= vap
->va_mask
;
2967 vap
->va_mask
&= ~trim_mask
;
2969 err
= secpolicy_vnode_setattr(cr
, ip
, vap
, &oldva
, flags
,
2970 (int (*)(void *, int, cred_t
*))zfs_zaccess_unix
, zp
);
2975 vap
->va_mask
|= saved_mask
;
2979 * secpolicy_vnode_setattr, or take ownership may have
2982 mask
= vap
->va_mask
;
2984 if ((mask
& (ATTR_UID
| ATTR_GID
))) {
2985 err
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_XATTR(zfsvfs
),
2986 &xattr_obj
, sizeof (xattr_obj
));
2988 if (err
== 0 && xattr_obj
) {
2989 err
= zfs_zget(ZTOZSB(zp
), xattr_obj
, &attrzp
);
2993 if (mask
& ATTR_UID
) {
2994 new_kuid
= zfs_fuid_create(zfsvfs
,
2995 (uint64_t)vap
->va_uid
, cr
, ZFS_OWNER
, &fuidp
);
2996 if (new_kuid
!= KUID_TO_SUID(ZTOI(zp
)->i_uid
) &&
2997 zfs_fuid_overquota(zfsvfs
, B_FALSE
, new_kuid
)) {
3000 err
= SET_ERROR(EDQUOT
);
3005 if (mask
& ATTR_GID
) {
3006 new_kgid
= zfs_fuid_create(zfsvfs
,
3007 (uint64_t)vap
->va_gid
, cr
, ZFS_GROUP
, &fuidp
);
3008 if (new_kgid
!= KGID_TO_SGID(ZTOI(zp
)->i_gid
) &&
3009 zfs_fuid_overquota(zfsvfs
, B_TRUE
, new_kgid
)) {
3012 err
= SET_ERROR(EDQUOT
);
3017 tx
= dmu_tx_create(zfsvfs
->z_os
);
3019 if (mask
& ATTR_MODE
) {
3020 uint64_t pmode
= zp
->z_mode
;
3022 new_mode
= (pmode
& S_IFMT
) | (vap
->va_mode
& ~S_IFMT
);
3024 zfs_acl_chmod_setattr(zp
, &aclp
, new_mode
);
3026 mutex_enter(&zp
->z_lock
);
3027 if (!zp
->z_is_sa
&& ((acl_obj
= zfs_external_acl(zp
)) != 0)) {
3029 * Are we upgrading ACL from old V0 format
3032 if (zfsvfs
->z_version
>= ZPL_VERSION_FUID
&&
3033 zfs_znode_acl_version(zp
) ==
3034 ZFS_ACL_VERSION_INITIAL
) {
3035 dmu_tx_hold_free(tx
, acl_obj
, 0,
3037 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
3038 0, aclp
->z_acl_bytes
);
3040 dmu_tx_hold_write(tx
, acl_obj
, 0,
3043 } else if (!zp
->z_is_sa
&& aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
3044 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
,
3045 0, aclp
->z_acl_bytes
);
3047 mutex_exit(&zp
->z_lock
);
3048 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
3050 if ((mask
& ATTR_XVATTR
) &&
3051 XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
))
3052 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_TRUE
);
3054 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
3058 dmu_tx_hold_sa(tx
, attrzp
->z_sa_hdl
, B_FALSE
);
3061 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
3063 zfs_fuid_txhold(zfsvfs
, tx
);
3065 zfs_sa_upgrade_txholds(tx
, zp
);
3067 err
= dmu_tx_assign(tx
, TXG_WAIT
);
3073 * Set each attribute requested.
3074 * We group settings according to the locks they need to acquire.
3076 * Note: you cannot set ctime directly, although it will be
3077 * updated as a side-effect of calling this function.
3081 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
3082 mutex_enter(&zp
->z_acl_lock
);
3083 mutex_enter(&zp
->z_lock
);
3085 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
3086 &zp
->z_pflags
, sizeof (zp
->z_pflags
));
3089 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
3090 mutex_enter(&attrzp
->z_acl_lock
);
3091 mutex_enter(&attrzp
->z_lock
);
3092 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3093 SA_ZPL_FLAGS(zfsvfs
), NULL
, &attrzp
->z_pflags
,
3094 sizeof (attrzp
->z_pflags
));
3097 if (mask
& (ATTR_UID
|ATTR_GID
)) {
3099 if (mask
& ATTR_UID
) {
3100 ZTOI(zp
)->i_uid
= SUID_TO_KUID(new_kuid
);
3101 new_uid
= zfs_uid_read(ZTOI(zp
));
3102 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
3103 &new_uid
, sizeof (new_uid
));
3105 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3106 SA_ZPL_UID(zfsvfs
), NULL
, &new_uid
,
3108 ZTOI(attrzp
)->i_uid
= SUID_TO_KUID(new_uid
);
3112 if (mask
& ATTR_GID
) {
3113 ZTOI(zp
)->i_gid
= SGID_TO_KGID(new_kgid
);
3114 new_gid
= zfs_gid_read(ZTOI(zp
));
3115 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
),
3116 NULL
, &new_gid
, sizeof (new_gid
));
3118 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3119 SA_ZPL_GID(zfsvfs
), NULL
, &new_gid
,
3121 ZTOI(attrzp
)->i_gid
= SGID_TO_KGID(new_kgid
);
3124 if (!(mask
& ATTR_MODE
)) {
3125 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
),
3126 NULL
, &new_mode
, sizeof (new_mode
));
3127 new_mode
= zp
->z_mode
;
3129 err
= zfs_acl_chown_setattr(zp
);
3132 err
= zfs_acl_chown_setattr(attrzp
);
3137 if (mask
& ATTR_MODE
) {
3138 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
,
3139 &new_mode
, sizeof (new_mode
));
3140 zp
->z_mode
= ZTOI(zp
)->i_mode
= new_mode
;
3141 ASSERT3P(aclp
, !=, NULL
);
3142 err
= zfs_aclset_common(zp
, aclp
, cr
, tx
);
3144 if (zp
->z_acl_cached
)
3145 zfs_acl_free(zp
->z_acl_cached
);
3146 zp
->z_acl_cached
= aclp
;
3150 if ((mask
& ATTR_ATIME
) || zp
->z_atime_dirty
) {
3151 zp
->z_atime_dirty
= 0;
3152 ZFS_TIME_ENCODE(&ip
->i_atime
, atime
);
3153 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
3154 &atime
, sizeof (atime
));
3157 if (mask
& ATTR_MTIME
) {
3158 ZFS_TIME_ENCODE(&vap
->va_mtime
, mtime
);
3159 ZTOI(zp
)->i_mtime
= timespec_trunc(vap
->va_mtime
,
3160 ZTOI(zp
)->i_sb
->s_time_gran
);
3162 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
3163 mtime
, sizeof (mtime
));
3166 if (mask
& ATTR_CTIME
) {
3167 ZFS_TIME_ENCODE(&vap
->va_ctime
, ctime
);
3168 ZTOI(zp
)->i_ctime
= timespec_trunc(vap
->va_ctime
,
3169 ZTOI(zp
)->i_sb
->s_time_gran
);
3170 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
3171 ctime
, sizeof (ctime
));
3174 if (attrzp
&& mask
) {
3175 SA_ADD_BULK_ATTR(xattr_bulk
, xattr_count
,
3176 SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
,
3181 * Do this after setting timestamps to prevent timestamp
3182 * update from toggling bit
3185 if (xoap
&& (mask
& ATTR_XVATTR
)) {
3188 * restore trimmed off masks
3189 * so that return masks can be set for caller.
3192 if (XVA_ISSET_REQ(tmpxvattr
, XAT_APPENDONLY
)) {
3193 XVA_SET_REQ(xvap
, XAT_APPENDONLY
);
3195 if (XVA_ISSET_REQ(tmpxvattr
, XAT_NOUNLINK
)) {
3196 XVA_SET_REQ(xvap
, XAT_NOUNLINK
);
3198 if (XVA_ISSET_REQ(tmpxvattr
, XAT_IMMUTABLE
)) {
3199 XVA_SET_REQ(xvap
, XAT_IMMUTABLE
);
3201 if (XVA_ISSET_REQ(tmpxvattr
, XAT_NODUMP
)) {
3202 XVA_SET_REQ(xvap
, XAT_NODUMP
);
3204 if (XVA_ISSET_REQ(tmpxvattr
, XAT_AV_MODIFIED
)) {
3205 XVA_SET_REQ(xvap
, XAT_AV_MODIFIED
);
3207 if (XVA_ISSET_REQ(tmpxvattr
, XAT_AV_QUARANTINED
)) {
3208 XVA_SET_REQ(xvap
, XAT_AV_QUARANTINED
);
3211 if (XVA_ISSET_REQ(xvap
, XAT_AV_SCANSTAMP
))
3212 ASSERT(S_ISREG(ip
->i_mode
));
3214 zfs_xvattr_set(zp
, xvap
, tx
);
3218 zfs_fuid_sync(zfsvfs
, tx
);
3221 zfs_log_setattr(zilog
, tx
, TX_SETATTR
, zp
, vap
, mask
, fuidp
);
3223 mutex_exit(&zp
->z_lock
);
3224 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
3225 mutex_exit(&zp
->z_acl_lock
);
3228 if (mask
& (ATTR_UID
|ATTR_GID
|ATTR_MODE
))
3229 mutex_exit(&attrzp
->z_acl_lock
);
3230 mutex_exit(&attrzp
->z_lock
);
3233 if (err
== 0 && attrzp
) {
3234 err2
= sa_bulk_update(attrzp
->z_sa_hdl
, xattr_bulk
,
3243 zfs_fuid_info_free(fuidp
);
3251 if (err
== ERESTART
)
3254 err2
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, count
, tx
);
3258 zfs_inode_update(zp
);
3262 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3263 zil_commit(zilog
, 0);
3266 kmem_free(xattr_bulk
, sizeof (sa_bulk_attr_t
) * 7);
3267 kmem_free(bulk
, sizeof (sa_bulk_attr_t
) * 7);
3268 kmem_free(tmpxvattr
, sizeof (xvattr_t
));
3273 typedef struct zfs_zlock
{
3274 krwlock_t
*zl_rwlock
; /* lock we acquired */
3275 znode_t
*zl_znode
; /* znode we held */
3276 struct zfs_zlock
*zl_next
; /* next in list */
3280 * Drop locks and release vnodes that were held by zfs_rename_lock().
3283 zfs_rename_unlock(zfs_zlock_t
**zlpp
)
3287 while ((zl
= *zlpp
) != NULL
) {
3288 if (zl
->zl_znode
!= NULL
)
3289 zfs_iput_async(ZTOI(zl
->zl_znode
));
3290 rw_exit(zl
->zl_rwlock
);
3291 *zlpp
= zl
->zl_next
;
3292 kmem_free(zl
, sizeof (*zl
));
3297 * Search back through the directory tree, using the ".." entries.
3298 * Lock each directory in the chain to prevent concurrent renames.
3299 * Fail any attempt to move a directory into one of its own descendants.
3300 * XXX - z_parent_lock can overlap with map or grow locks
3303 zfs_rename_lock(znode_t
*szp
, znode_t
*tdzp
, znode_t
*sdzp
, zfs_zlock_t
**zlpp
)
3307 uint64_t rootid
= ZTOZSB(zp
)->z_root
;
3308 uint64_t oidp
= zp
->z_id
;
3309 krwlock_t
*rwlp
= &szp
->z_parent_lock
;
3310 krw_t rw
= RW_WRITER
;
3313 * First pass write-locks szp and compares to zp->z_id.
3314 * Later passes read-lock zp and compare to zp->z_parent.
3317 if (!rw_tryenter(rwlp
, rw
)) {
3319 * Another thread is renaming in this path.
3320 * Note that if we are a WRITER, we don't have any
3321 * parent_locks held yet.
3323 if (rw
== RW_READER
&& zp
->z_id
> szp
->z_id
) {
3325 * Drop our locks and restart
3327 zfs_rename_unlock(&zl
);
3331 rwlp
= &szp
->z_parent_lock
;
3336 * Wait for other thread to drop its locks
3342 zl
= kmem_alloc(sizeof (*zl
), KM_SLEEP
);
3343 zl
->zl_rwlock
= rwlp
;
3344 zl
->zl_znode
= NULL
;
3345 zl
->zl_next
= *zlpp
;
3348 if (oidp
== szp
->z_id
) /* We're a descendant of szp */
3349 return (SET_ERROR(EINVAL
));
3351 if (oidp
== rootid
) /* We've hit the top */
3354 if (rw
== RW_READER
) { /* i.e. not the first pass */
3355 int error
= zfs_zget(ZTOZSB(zp
), oidp
, &zp
);
3360 (void) sa_lookup(zp
->z_sa_hdl
, SA_ZPL_PARENT(ZTOZSB(zp
)),
3361 &oidp
, sizeof (oidp
));
3362 rwlp
= &zp
->z_parent_lock
;
3365 } while (zp
->z_id
!= sdzp
->z_id
);
3371 * Move an entry from the provided source directory to the target
3372 * directory. Change the entry name as indicated.
3374 * IN: sdip - Source directory containing the "old entry".
3375 * snm - Old entry name.
3376 * tdip - Target directory to contain the "new entry".
3377 * tnm - New entry name.
3378 * cr - credentials of caller.
3379 * flags - case flags
3381 * RETURN: 0 on success, error code on failure.
3384 * sdip,tdip - ctime|mtime updated
3388 zfs_rename(struct inode
*sdip
, char *snm
, struct inode
*tdip
, char *tnm
,
3389 cred_t
*cr
, int flags
)
3391 znode_t
*tdzp
, *szp
, *tzp
;
3392 znode_t
*sdzp
= ITOZ(sdip
);
3393 zfsvfs_t
*zfsvfs
= ITOZSB(sdip
);
3395 zfs_dirlock_t
*sdl
, *tdl
;
3398 int cmp
, serr
, terr
;
3401 boolean_t waited
= B_FALSE
;
3403 if (snm
== NULL
|| tnm
== NULL
)
3404 return (SET_ERROR(EINVAL
));
3407 ZFS_VERIFY_ZP(sdzp
);
3408 zilog
= zfsvfs
->z_log
;
3411 ZFS_VERIFY_ZP(tdzp
);
3414 * We check i_sb because snapshots and the ctldir must have different
3417 if (tdip
->i_sb
!= sdip
->i_sb
|| zfsctl_is_node(tdip
)) {
3419 return (SET_ERROR(EXDEV
));
3422 if (zfsvfs
->z_utf8
&& u8_validate(tnm
,
3423 strlen(tnm
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3425 return (SET_ERROR(EILSEQ
));
3428 if (flags
& FIGNORECASE
)
3437 * This is to prevent the creation of links into attribute space
3438 * by renaming a linked file into/outof an attribute directory.
3439 * See the comment in zfs_link() for why this is considered bad.
3441 if ((tdzp
->z_pflags
& ZFS_XATTR
) != (sdzp
->z_pflags
& ZFS_XATTR
)) {
3443 return (SET_ERROR(EINVAL
));
3447 * Lock source and target directory entries. To prevent deadlock,
3448 * a lock ordering must be defined. We lock the directory with
3449 * the smallest object id first, or if it's a tie, the one with
3450 * the lexically first name.
3452 if (sdzp
->z_id
< tdzp
->z_id
) {
3454 } else if (sdzp
->z_id
> tdzp
->z_id
) {
3458 * First compare the two name arguments without
3459 * considering any case folding.
3461 int nofold
= (zfsvfs
->z_norm
& ~U8_TEXTPREP_TOUPPER
);
3463 cmp
= u8_strcmp(snm
, tnm
, 0, nofold
, U8_UNICODE_LATEST
, &error
);
3464 ASSERT(error
== 0 || !zfsvfs
->z_utf8
);
3467 * POSIX: "If the old argument and the new argument
3468 * both refer to links to the same existing file,
3469 * the rename() function shall return successfully
3470 * and perform no other action."
3476 * If the file system is case-folding, then we may
3477 * have some more checking to do. A case-folding file
3478 * system is either supporting mixed case sensitivity
3479 * access or is completely case-insensitive. Note
3480 * that the file system is always case preserving.
3482 * In mixed sensitivity mode case sensitive behavior
3483 * is the default. FIGNORECASE must be used to
3484 * explicitly request case insensitive behavior.
3486 * If the source and target names provided differ only
3487 * by case (e.g., a request to rename 'tim' to 'Tim'),
3488 * we will treat this as a special case in the
3489 * case-insensitive mode: as long as the source name
3490 * is an exact match, we will allow this to proceed as
3491 * a name-change request.
3493 if ((zfsvfs
->z_case
== ZFS_CASE_INSENSITIVE
||
3494 (zfsvfs
->z_case
== ZFS_CASE_MIXED
&&
3495 flags
& FIGNORECASE
)) &&
3496 u8_strcmp(snm
, tnm
, 0, zfsvfs
->z_norm
, U8_UNICODE_LATEST
,
3499 * case preserving rename request, require exact
3508 * If the source and destination directories are the same, we should
3509 * grab the z_name_lock of that directory only once.
3513 rw_enter(&sdzp
->z_name_lock
, RW_READER
);
3517 serr
= zfs_dirent_lock(&sdl
, sdzp
, snm
, &szp
,
3518 ZEXISTS
| zflg
, NULL
, NULL
);
3519 terr
= zfs_dirent_lock(&tdl
,
3520 tdzp
, tnm
, &tzp
, ZRENAMING
| zflg
, NULL
, NULL
);
3522 terr
= zfs_dirent_lock(&tdl
,
3523 tdzp
, tnm
, &tzp
, zflg
, NULL
, NULL
);
3524 serr
= zfs_dirent_lock(&sdl
,
3525 sdzp
, snm
, &szp
, ZEXISTS
| ZRENAMING
| zflg
,
3531 * Source entry invalid or not there.
3534 zfs_dirent_unlock(tdl
);
3540 rw_exit(&sdzp
->z_name_lock
);
3542 if (strcmp(snm
, "..") == 0)
3548 zfs_dirent_unlock(sdl
);
3552 rw_exit(&sdzp
->z_name_lock
);
3554 if (strcmp(tnm
, "..") == 0)
3561 * Must have write access at the source to remove the old entry
3562 * and write access at the target to create the new entry.
3563 * Note that if target and source are the same, this can be
3564 * done in a single check.
3567 if ((error
= zfs_zaccess_rename(sdzp
, szp
, tdzp
, tzp
, cr
)))
3570 if (S_ISDIR(ZTOI(szp
)->i_mode
)) {
3572 * Check to make sure rename is valid.
3573 * Can't do a move like this: /usr/a/b to /usr/a/b/c/d
3575 if ((error
= zfs_rename_lock(szp
, tdzp
, sdzp
, &zl
)))
3580 * Does target exist?
3584 * Source and target must be the same type.
3586 if (S_ISDIR(ZTOI(szp
)->i_mode
)) {
3587 if (!S_ISDIR(ZTOI(tzp
)->i_mode
)) {
3588 error
= SET_ERROR(ENOTDIR
);
3592 if (S_ISDIR(ZTOI(tzp
)->i_mode
)) {
3593 error
= SET_ERROR(EISDIR
);
3598 * POSIX dictates that when the source and target
3599 * entries refer to the same file object, rename
3600 * must do nothing and exit without error.
3602 if (szp
->z_id
== tzp
->z_id
) {
3608 tx
= dmu_tx_create(zfsvfs
->z_os
);
3609 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
3610 dmu_tx_hold_sa(tx
, sdzp
->z_sa_hdl
, B_FALSE
);
3611 dmu_tx_hold_zap(tx
, sdzp
->z_id
, FALSE
, snm
);
3612 dmu_tx_hold_zap(tx
, tdzp
->z_id
, TRUE
, tnm
);
3614 dmu_tx_hold_sa(tx
, tdzp
->z_sa_hdl
, B_FALSE
);
3615 zfs_sa_upgrade_txholds(tx
, tdzp
);
3618 dmu_tx_hold_sa(tx
, tzp
->z_sa_hdl
, B_FALSE
);
3619 zfs_sa_upgrade_txholds(tx
, tzp
);
3622 zfs_sa_upgrade_txholds(tx
, szp
);
3623 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
3624 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
3627 zfs_rename_unlock(&zl
);
3628 zfs_dirent_unlock(sdl
);
3629 zfs_dirent_unlock(tdl
);
3632 rw_exit(&sdzp
->z_name_lock
);
3634 if (error
== ERESTART
) {
3651 if (tzp
) /* Attempt to remove the existing target */
3652 error
= zfs_link_destroy(tdl
, tzp
, tx
, zflg
, NULL
);
3655 error
= zfs_link_create(tdl
, szp
, tx
, ZRENAMING
);
3657 szp
->z_pflags
|= ZFS_AV_MODIFIED
;
3659 error
= sa_update(szp
->z_sa_hdl
, SA_ZPL_FLAGS(zfsvfs
),
3660 (void *)&szp
->z_pflags
, sizeof (uint64_t), tx
);
3663 error
= zfs_link_destroy(sdl
, szp
, tx
, ZRENAMING
, NULL
);
3665 zfs_log_rename(zilog
, tx
, TX_RENAME
|
3666 (flags
& FIGNORECASE
? TX_CI
: 0), sdzp
,
3667 sdl
->dl_name
, tdzp
, tdl
->dl_name
, szp
);
3670 * At this point, we have successfully created
3671 * the target name, but have failed to remove
3672 * the source name. Since the create was done
3673 * with the ZRENAMING flag, there are
3674 * complications; for one, the link count is
3675 * wrong. The easiest way to deal with this
3676 * is to remove the newly created target, and
3677 * return the original error. This must
3678 * succeed; fortunately, it is very unlikely to
3679 * fail, since we just created it.
3681 VERIFY3U(zfs_link_destroy(tdl
, szp
, tx
,
3682 ZRENAMING
, NULL
), ==, 0);
3690 zfs_rename_unlock(&zl
);
3692 zfs_dirent_unlock(sdl
);
3693 zfs_dirent_unlock(tdl
);
3695 zfs_inode_update(sdzp
);
3697 rw_exit(&sdzp
->z_name_lock
);
3700 zfs_inode_update(tdzp
);
3702 zfs_inode_update(szp
);
3705 zfs_inode_update(tzp
);
3709 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3710 zil_commit(zilog
, 0);
3717 * Insert the indicated symbolic reference entry into the directory.
3719 * IN: dip - Directory to contain new symbolic link.
3720 * link - Name for new symlink entry.
3721 * vap - Attributes of new entry.
3722 * target - Target path of new symlink.
3724 * cr - credentials of caller.
3725 * flags - case flags
3727 * RETURN: 0 on success, error code on failure.
3730 * dip - ctime|mtime updated
3734 zfs_symlink(struct inode
*dip
, char *name
, vattr_t
*vap
, char *link
,
3735 struct inode
**ipp
, cred_t
*cr
, int flags
)
3737 znode_t
*zp
, *dzp
= ITOZ(dip
);
3740 zfsvfs_t
*zfsvfs
= ITOZSB(dip
);
3742 uint64_t len
= strlen(link
);
3745 zfs_acl_ids_t acl_ids
;
3746 boolean_t fuid_dirtied
;
3747 uint64_t txtype
= TX_SYMLINK
;
3748 boolean_t waited
= B_FALSE
;
3750 ASSERT(S_ISLNK(vap
->va_mode
));
3753 return (SET_ERROR(EINVAL
));
3757 zilog
= zfsvfs
->z_log
;
3759 if (zfsvfs
->z_utf8
&& u8_validate(name
, strlen(name
),
3760 NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3762 return (SET_ERROR(EILSEQ
));
3764 if (flags
& FIGNORECASE
)
3767 if (len
> MAXPATHLEN
) {
3769 return (SET_ERROR(ENAMETOOLONG
));
3772 if ((error
= zfs_acl_ids_create(dzp
, 0,
3773 vap
, cr
, NULL
, &acl_ids
)) != 0) {
3781 * Attempt to lock directory; fail if entry already exists.
3783 error
= zfs_dirent_lock(&dl
, dzp
, name
, &zp
, zflg
, NULL
, NULL
);
3785 zfs_acl_ids_free(&acl_ids
);
3790 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
3791 zfs_acl_ids_free(&acl_ids
);
3792 zfs_dirent_unlock(dl
);
3797 if (zfs_acl_ids_overquota(zfsvfs
, &acl_ids
)) {
3798 zfs_acl_ids_free(&acl_ids
);
3799 zfs_dirent_unlock(dl
);
3801 return (SET_ERROR(EDQUOT
));
3803 tx
= dmu_tx_create(zfsvfs
->z_os
);
3804 fuid_dirtied
= zfsvfs
->z_fuid_dirty
;
3805 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0, MAX(1, len
));
3806 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
3807 dmu_tx_hold_sa_create(tx
, acl_ids
.z_aclp
->z_acl_bytes
+
3808 ZFS_SA_BASE_ATTR_SIZE
+ len
);
3809 dmu_tx_hold_sa(tx
, dzp
->z_sa_hdl
, B_FALSE
);
3810 if (!zfsvfs
->z_use_sa
&& acl_ids
.z_aclp
->z_acl_bytes
> ZFS_ACE_SPACE
) {
3811 dmu_tx_hold_write(tx
, DMU_NEW_OBJECT
, 0,
3812 acl_ids
.z_aclp
->z_acl_bytes
);
3815 zfs_fuid_txhold(zfsvfs
, tx
);
3816 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
3818 zfs_dirent_unlock(dl
);
3819 if (error
== ERESTART
) {
3825 zfs_acl_ids_free(&acl_ids
);
3832 * Create a new object for the symlink.
3833 * for version 4 ZPL datsets the symlink will be an SA attribute
3835 zfs_mknode(dzp
, vap
, tx
, cr
, 0, &zp
, &acl_ids
);
3838 zfs_fuid_sync(zfsvfs
, tx
);
3840 mutex_enter(&zp
->z_lock
);
3842 error
= sa_update(zp
->z_sa_hdl
, SA_ZPL_SYMLINK(zfsvfs
),
3845 zfs_sa_symlink(zp
, link
, len
, tx
);
3846 mutex_exit(&zp
->z_lock
);
3849 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_SIZE(zfsvfs
),
3850 &zp
->z_size
, sizeof (zp
->z_size
), tx
);
3852 * Insert the new object into the directory.
3854 (void) zfs_link_create(dl
, zp
, tx
, ZNEW
);
3856 if (flags
& FIGNORECASE
)
3858 zfs_log_symlink(zilog
, tx
, txtype
, dzp
, zp
, name
, link
);
3860 zfs_inode_update(dzp
);
3861 zfs_inode_update(zp
);
3863 zfs_acl_ids_free(&acl_ids
);
3867 zfs_dirent_unlock(dl
);
3871 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
3872 zil_commit(zilog
, 0);
3879 * Return, in the buffer contained in the provided uio structure,
3880 * the symbolic path referred to by ip.
3882 * IN: ip - inode of symbolic link
3883 * uio - structure to contain the link path.
3884 * cr - credentials of caller.
3886 * RETURN: 0 if success
3887 * error code if failure
3890 * ip - atime updated
3894 zfs_readlink(struct inode
*ip
, uio_t
*uio
, cred_t
*cr
)
3896 znode_t
*zp
= ITOZ(ip
);
3897 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
3903 mutex_enter(&zp
->z_lock
);
3905 error
= sa_lookup_uio(zp
->z_sa_hdl
,
3906 SA_ZPL_SYMLINK(zfsvfs
), uio
);
3908 error
= zfs_sa_readlink(zp
, uio
);
3909 mutex_exit(&zp
->z_lock
);
3916 * Insert a new entry into directory tdip referencing sip.
3918 * IN: tdip - Directory to contain new entry.
3919 * sip - inode of new entry.
3920 * name - name of new entry.
3921 * cr - credentials of caller.
3923 * RETURN: 0 if success
3924 * error code if failure
3927 * tdip - ctime|mtime updated
3928 * sip - ctime updated
3932 zfs_link(struct inode
*tdip
, struct inode
*sip
, char *name
, cred_t
*cr
,
3935 znode_t
*dzp
= ITOZ(tdip
);
3937 zfsvfs_t
*zfsvfs
= ITOZSB(tdip
);
3945 boolean_t waited
= B_FALSE
;
3946 boolean_t is_tmpfile
= 0;
3949 is_tmpfile
= (sip
->i_nlink
== 0 && (sip
->i_state
& I_LINKABLE
));
3951 ASSERT(S_ISDIR(tdip
->i_mode
));
3954 return (SET_ERROR(EINVAL
));
3958 zilog
= zfsvfs
->z_log
;
3961 * POSIX dictates that we return EPERM here.
3962 * Better choices include ENOTSUP or EISDIR.
3964 if (S_ISDIR(sip
->i_mode
)) {
3966 return (SET_ERROR(EPERM
));
3973 * We check i_sb because snapshots and the ctldir must have different
3976 if (sip
->i_sb
!= tdip
->i_sb
|| zfsctl_is_node(sip
)) {
3978 return (SET_ERROR(EXDEV
));
3981 /* Prevent links to .zfs/shares files */
3983 if ((error
= sa_lookup(szp
->z_sa_hdl
, SA_ZPL_PARENT(zfsvfs
),
3984 &parent
, sizeof (uint64_t))) != 0) {
3988 if (parent
== zfsvfs
->z_shares_dir
) {
3990 return (SET_ERROR(EPERM
));
3993 if (zfsvfs
->z_utf8
&& u8_validate(name
,
3994 strlen(name
), NULL
, U8_VALIDATE_ENTIRE
, &error
) < 0) {
3996 return (SET_ERROR(EILSEQ
));
3998 if (flags
& FIGNORECASE
)
4002 * We do not support links between attributes and non-attributes
4003 * because of the potential security risk of creating links
4004 * into "normal" file space in order to circumvent restrictions
4005 * imposed in attribute space.
4007 if ((szp
->z_pflags
& ZFS_XATTR
) != (dzp
->z_pflags
& ZFS_XATTR
)) {
4009 return (SET_ERROR(EINVAL
));
4012 owner
= zfs_fuid_map_id(zfsvfs
, KUID_TO_SUID(sip
->i_uid
),
4014 if (owner
!= crgetuid(cr
) && secpolicy_basic_link(cr
) != 0) {
4016 return (SET_ERROR(EPERM
));
4019 if ((error
= zfs_zaccess(dzp
, ACE_ADD_FILE
, 0, B_FALSE
, cr
))) {
4026 * Attempt to lock directory; fail if entry already exists.
4028 error
= zfs_dirent_lock(&dl
, dzp
, name
, &tzp
, zf
, NULL
, NULL
);
4034 tx
= dmu_tx_create(zfsvfs
->z_os
);
4035 dmu_tx_hold_sa(tx
, szp
->z_sa_hdl
, B_FALSE
);
4036 dmu_tx_hold_zap(tx
, dzp
->z_id
, TRUE
, name
);
4038 dmu_tx_hold_zap(tx
, zfsvfs
->z_unlinkedobj
, FALSE
, NULL
);
4040 zfs_sa_upgrade_txholds(tx
, szp
);
4041 zfs_sa_upgrade_txholds(tx
, dzp
);
4042 error
= dmu_tx_assign(tx
, waited
? TXG_WAITED
: TXG_NOWAIT
);
4044 zfs_dirent_unlock(dl
);
4045 if (error
== ERESTART
) {
4055 /* unmark z_unlinked so zfs_link_create will not reject */
4057 szp
->z_unlinked
= 0;
4058 error
= zfs_link_create(dl
, szp
, tx
, 0);
4061 uint64_t txtype
= TX_LINK
;
4063 * tmpfile is created to be in z_unlinkedobj, so remove it.
4064 * Also, we don't log in ZIL, be cause all previous file
4065 * operation on the tmpfile are ignored by ZIL. Instead we
4066 * always wait for txg to sync to make sure all previous
4067 * operation are sync safe.
4070 VERIFY(zap_remove_int(zfsvfs
->z_os
,
4071 zfsvfs
->z_unlinkedobj
, szp
->z_id
, tx
) == 0);
4073 if (flags
& FIGNORECASE
)
4075 zfs_log_link(zilog
, tx
, txtype
, dzp
, szp
, name
);
4077 } else if (is_tmpfile
) {
4078 /* restore z_unlinked since when linking failed */
4079 szp
->z_unlinked
= 1;
4081 txg
= dmu_tx_get_txg(tx
);
4084 zfs_dirent_unlock(dl
);
4086 if (!is_tmpfile
&& zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4087 zil_commit(zilog
, 0);
4090 txg_wait_synced(dmu_objset_pool(zfsvfs
->z_os
), txg
);
4092 zfs_inode_update(dzp
);
4093 zfs_inode_update(szp
);
4099 zfs_putpage_commit_cb(void *arg
)
4101 struct page
*pp
= arg
;
4104 end_page_writeback(pp
);
4108 * Push a page out to disk, once the page is on stable storage the
4109 * registered commit callback will be run as notification of completion.
4111 * IN: ip - page mapped for inode.
4112 * pp - page to push (page is locked)
4113 * wbc - writeback control data
4115 * RETURN: 0 if success
4116 * error code if failure
4119 * ip - ctime|mtime updated
4123 zfs_putpage(struct inode
*ip
, struct page
*pp
, struct writeback_control
*wbc
)
4125 znode_t
*zp
= ITOZ(ip
);
4126 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4134 uint64_t mtime
[2], ctime
[2];
4135 sa_bulk_attr_t bulk
[3];
4137 struct address_space
*mapping
;
4142 ASSERT(PageLocked(pp
));
4144 pgoff
= page_offset(pp
); /* Page byte-offset in file */
4145 offset
= i_size_read(ip
); /* File length in bytes */
4146 pglen
= MIN(PAGE_SIZE
, /* Page length in bytes */
4147 P2ROUNDUP(offset
, PAGE_SIZE
)-pgoff
);
4149 /* Page is beyond end of file */
4150 if (pgoff
>= offset
) {
4156 /* Truncate page length to end of file */
4157 if (pgoff
+ pglen
> offset
)
4158 pglen
= offset
- pgoff
;
4162 * FIXME: Allow mmap writes past its quota. The correct fix
4163 * is to register a page_mkwrite() handler to count the page
4164 * against its quota when it is about to be dirtied.
4166 if (zfs_owner_overquota(zfsvfs
, zp
, B_FALSE
) ||
4167 zfs_owner_overquota(zfsvfs
, zp
, B_TRUE
)) {
4173 * The ordering here is critical and must adhere to the following
4174 * rules in order to avoid deadlocking in either zfs_read() or
4175 * zfs_free_range() due to a lock inversion.
4177 * 1) The page must be unlocked prior to acquiring the range lock.
4178 * This is critical because zfs_read() calls find_lock_page()
4179 * which may block on the page lock while holding the range lock.
4181 * 2) Before setting or clearing write back on a page the range lock
4182 * must be held in order to prevent a lock inversion with the
4183 * zfs_free_range() function.
4185 * This presents a problem because upon entering this function the
4186 * page lock is already held. To safely acquire the range lock the
4187 * page lock must be dropped. This creates a window where another
4188 * process could truncate, invalidate, dirty, or write out the page.
4190 * Therefore, after successfully reacquiring the range and page locks
4191 * the current page state is checked. In the common case everything
4192 * will be as is expected and it can be written out. However, if
4193 * the page state has changed it must be handled accordingly.
4195 mapping
= pp
->mapping
;
4196 redirty_page_for_writepage(wbc
, pp
);
4199 rl
= zfs_range_lock(&zp
->z_range_lock
, pgoff
, pglen
, RL_WRITER
);
4202 /* Page mapping changed or it was no longer dirty, we're done */
4203 if (unlikely((mapping
!= pp
->mapping
) || !PageDirty(pp
))) {
4205 zfs_range_unlock(rl
);
4210 /* Another process started write block if required */
4211 if (PageWriteback(pp
)) {
4213 zfs_range_unlock(rl
);
4215 if (wbc
->sync_mode
!= WB_SYNC_NONE
)
4216 wait_on_page_writeback(pp
);
4222 /* Clear the dirty flag the required locks are held */
4223 if (!clear_page_dirty_for_io(pp
)) {
4225 zfs_range_unlock(rl
);
4231 * Counterpart for redirty_page_for_writepage() above. This page
4232 * was in fact not skipped and should not be counted as if it were.
4234 wbc
->pages_skipped
--;
4235 set_page_writeback(pp
);
4238 tx
= dmu_tx_create(zfsvfs
->z_os
);
4239 dmu_tx_hold_write(tx
, zp
->z_id
, pgoff
, pglen
);
4240 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
4241 zfs_sa_upgrade_txholds(tx
, zp
);
4243 err
= dmu_tx_assign(tx
, TXG_NOWAIT
);
4245 if (err
== ERESTART
)
4249 __set_page_dirty_nobuffers(pp
);
4251 end_page_writeback(pp
);
4252 zfs_range_unlock(rl
);
4258 ASSERT3U(pglen
, <=, PAGE_SIZE
);
4259 dmu_write(zfsvfs
->z_os
, zp
->z_id
, pgoff
, pglen
, va
, tx
);
4262 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
4263 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
4264 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
4267 /* Preserve the mtime and ctime provided by the inode */
4268 ZFS_TIME_ENCODE(&ip
->i_mtime
, mtime
);
4269 ZFS_TIME_ENCODE(&ip
->i_ctime
, ctime
);
4270 zp
->z_atime_dirty
= 0;
4273 err
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, cnt
, tx
);
4275 zfs_log_write(zfsvfs
->z_log
, tx
, TX_WRITE
, zp
, pgoff
, pglen
, 0,
4276 zfs_putpage_commit_cb
, pp
);
4279 zfs_range_unlock(rl
);
4281 if (wbc
->sync_mode
!= WB_SYNC_NONE
) {
4283 * Note that this is rarely called under writepages(), because
4284 * writepages() normally handles the entire commit for
4285 * performance reasons.
4287 zil_commit(zfsvfs
->z_log
, zp
->z_id
);
4295 * Update the system attributes when the inode has been dirtied. For the
4296 * moment we only update the mode, atime, mtime, and ctime.
4299 zfs_dirty_inode(struct inode
*ip
, int flags
)
4301 znode_t
*zp
= ITOZ(ip
);
4302 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4304 uint64_t mode
, atime
[2], mtime
[2], ctime
[2];
4305 sa_bulk_attr_t bulk
[4];
4309 if (zfs_is_readonly(zfsvfs
) || dmu_objset_is_snapshot(zfsvfs
->z_os
))
4317 * This is the lazytime semantic indroduced in Linux 4.0
4318 * This flag will only be called from update_time when lazytime is set.
4319 * (Note, I_DIRTY_SYNC will also set if not lazytime)
4320 * Fortunately mtime and ctime are managed within ZFS itself, so we
4321 * only need to dirty atime.
4323 if (flags
== I_DIRTY_TIME
) {
4324 zp
->z_atime_dirty
= 1;
4329 tx
= dmu_tx_create(zfsvfs
->z_os
);
4331 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
4332 zfs_sa_upgrade_txholds(tx
, zp
);
4334 error
= dmu_tx_assign(tx
, TXG_WAIT
);
4340 mutex_enter(&zp
->z_lock
);
4341 zp
->z_atime_dirty
= 0;
4343 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_MODE(zfsvfs
), NULL
, &mode
, 8);
4344 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_ATIME(zfsvfs
), NULL
, &atime
, 16);
4345 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
4346 SA_ADD_BULK_ATTR(bulk
, cnt
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
4348 /* Preserve the mode, mtime and ctime provided by the inode */
4349 ZFS_TIME_ENCODE(&ip
->i_atime
, atime
);
4350 ZFS_TIME_ENCODE(&ip
->i_mtime
, mtime
);
4351 ZFS_TIME_ENCODE(&ip
->i_ctime
, ctime
);
4356 error
= sa_bulk_update(zp
->z_sa_hdl
, bulk
, cnt
, tx
);
4357 mutex_exit(&zp
->z_lock
);
4367 zfs_inactive(struct inode
*ip
)
4369 znode_t
*zp
= ITOZ(ip
);
4370 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4373 int need_unlock
= 0;
4375 /* Only read lock if we haven't already write locked, e.g. rollback */
4376 if (!RW_WRITE_HELD(&zfsvfs
->z_teardown_inactive_lock
)) {
4378 rw_enter(&zfsvfs
->z_teardown_inactive_lock
, RW_READER
);
4380 if (zp
->z_sa_hdl
== NULL
) {
4382 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
4386 if (zp
->z_atime_dirty
&& zp
->z_unlinked
== 0) {
4387 dmu_tx_t
*tx
= dmu_tx_create(zfsvfs
->z_os
);
4389 dmu_tx_hold_sa(tx
, zp
->z_sa_hdl
, B_FALSE
);
4390 zfs_sa_upgrade_txholds(tx
, zp
);
4391 error
= dmu_tx_assign(tx
, TXG_WAIT
);
4395 ZFS_TIME_ENCODE(&ip
->i_atime
, atime
);
4396 mutex_enter(&zp
->z_lock
);
4397 (void) sa_update(zp
->z_sa_hdl
, SA_ZPL_ATIME(zfsvfs
),
4398 (void *)&atime
, sizeof (atime
), tx
);
4399 zp
->z_atime_dirty
= 0;
4400 mutex_exit(&zp
->z_lock
);
4407 rw_exit(&zfsvfs
->z_teardown_inactive_lock
);
4411 * Bounds-check the seek operation.
4413 * IN: ip - inode seeking within
4414 * ooff - old file offset
4415 * noffp - pointer to new file offset
4416 * ct - caller context
4418 * RETURN: 0 if success
4419 * EINVAL if new offset invalid
4423 zfs_seek(struct inode
*ip
, offset_t ooff
, offset_t
*noffp
)
4425 if (S_ISDIR(ip
->i_mode
))
4427 return ((*noffp
< 0 || *noffp
> MAXOFFSET_T
) ? EINVAL
: 0);
4431 * Fill pages with data from the disk.
4434 zfs_fillpage(struct inode
*ip
, struct page
*pl
[], int nr_pages
)
4436 znode_t
*zp
= ITOZ(ip
);
4437 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4439 struct page
*cur_pp
;
4440 u_offset_t io_off
, total
;
4447 io_len
= nr_pages
<< PAGE_SHIFT
;
4448 i_size
= i_size_read(ip
);
4449 io_off
= page_offset(pl
[0]);
4451 if (io_off
+ io_len
> i_size
)
4452 io_len
= i_size
- io_off
;
4455 * Iterate over list of pages and read each page individually.
4458 for (total
= io_off
+ io_len
; io_off
< total
; io_off
+= PAGESIZE
) {
4461 cur_pp
= pl
[page_idx
++];
4463 err
= dmu_read(os
, zp
->z_id
, io_off
, PAGESIZE
, va
,
4467 /* convert checksum errors into IO errors */
4469 err
= SET_ERROR(EIO
);
4478 * Uses zfs_fillpage to read data from the file and fill the pages.
4480 * IN: ip - inode of file to get data from.
4481 * pl - list of pages to read
4482 * nr_pages - number of pages to read
4484 * RETURN: 0 on success, error code on failure.
4487 * vp - atime updated
4491 zfs_getpage(struct inode
*ip
, struct page
*pl
[], int nr_pages
)
4493 znode_t
*zp
= ITOZ(ip
);
4494 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4503 err
= zfs_fillpage(ip
, pl
, nr_pages
);
4510 * Check ZFS specific permissions to memory map a section of a file.
4512 * IN: ip - inode of the file to mmap
4514 * addrp - start address in memory region
4515 * len - length of memory region
4516 * vm_flags- address flags
4518 * RETURN: 0 if success
4519 * error code if failure
4523 zfs_map(struct inode
*ip
, offset_t off
, caddr_t
*addrp
, size_t len
,
4524 unsigned long vm_flags
)
4526 znode_t
*zp
= ITOZ(ip
);
4527 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4532 if ((vm_flags
& VM_WRITE
) && (zp
->z_pflags
&
4533 (ZFS_IMMUTABLE
| ZFS_READONLY
| ZFS_APPENDONLY
))) {
4535 return (SET_ERROR(EPERM
));
4538 if ((vm_flags
& (VM_READ
| VM_EXEC
)) &&
4539 (zp
->z_pflags
& ZFS_AV_QUARANTINED
)) {
4541 return (SET_ERROR(EACCES
));
4544 if (off
< 0 || len
> MAXOFFSET_T
- off
) {
4546 return (SET_ERROR(ENXIO
));
4554 * convoff - converts the given data (start, whence) to the
4558 convoff(struct inode
*ip
, flock64_t
*lckdat
, int whence
, offset_t offset
)
4563 if ((lckdat
->l_whence
== 2) || (whence
== 2)) {
4564 if ((error
= zfs_getattr(ip
, &vap
, 0, CRED()) != 0))
4568 switch (lckdat
->l_whence
) {
4570 lckdat
->l_start
+= offset
;
4573 lckdat
->l_start
+= vap
.va_size
;
4578 return (SET_ERROR(EINVAL
));
4581 if (lckdat
->l_start
< 0)
4582 return (SET_ERROR(EINVAL
));
4586 lckdat
->l_start
-= offset
;
4589 lckdat
->l_start
-= vap
.va_size
;
4594 return (SET_ERROR(EINVAL
));
4597 lckdat
->l_whence
= (short)whence
;
4602 * Free or allocate space in a file. Currently, this function only
4603 * supports the `F_FREESP' command. However, this command is somewhat
4604 * misnamed, as its functionality includes the ability to allocate as
4605 * well as free space.
4607 * IN: ip - inode of file to free data in.
4608 * cmd - action to take (only F_FREESP supported).
4609 * bfp - section of file to free/alloc.
4610 * flag - current file open mode flags.
4611 * offset - current file offset.
4612 * cr - credentials of caller [UNUSED].
4614 * RETURN: 0 on success, error code on failure.
4617 * ip - ctime|mtime updated
4621 zfs_space(struct inode
*ip
, int cmd
, flock64_t
*bfp
, int flag
,
4622 offset_t offset
, cred_t
*cr
)
4624 znode_t
*zp
= ITOZ(ip
);
4625 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4632 if (cmd
!= F_FREESP
) {
4634 return (SET_ERROR(EINVAL
));
4638 * Callers might not be able to detect properly that we are read-only,
4639 * so check it explicitly here.
4641 if (zfs_is_readonly(zfsvfs
)) {
4643 return (SET_ERROR(EROFS
));
4646 if ((error
= convoff(ip
, bfp
, 0, offset
))) {
4651 if (bfp
->l_len
< 0) {
4653 return (SET_ERROR(EINVAL
));
4657 * Permissions aren't checked on Solaris because on this OS
4658 * zfs_space() can only be called with an opened file handle.
4659 * On Linux we can get here through truncate_range() which
4660 * operates directly on inodes, so we need to check access rights.
4662 if ((error
= zfs_zaccess(zp
, ACE_WRITE_DATA
, 0, B_FALSE
, cr
))) {
4668 len
= bfp
->l_len
; /* 0 means from off to end of file */
4670 error
= zfs_freesp(zp
, off
, len
, flag
, TRUE
);
4678 zfs_fid(struct inode
*ip
, fid_t
*fidp
)
4680 znode_t
*zp
= ITOZ(ip
);
4681 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4684 uint64_t object
= zp
->z_id
;
4691 if ((error
= sa_lookup(zp
->z_sa_hdl
, SA_ZPL_GEN(zfsvfs
),
4692 &gen64
, sizeof (uint64_t))) != 0) {
4697 gen
= (uint32_t)gen64
;
4699 size
= SHORT_FID_LEN
;
4701 zfid
= (zfid_short_t
*)fidp
;
4703 zfid
->zf_len
= size
;
4705 for (i
= 0; i
< sizeof (zfid
->zf_object
); i
++)
4706 zfid
->zf_object
[i
] = (uint8_t)(object
>> (8 * i
));
4708 /* Must have a non-zero generation number to distinguish from .zfs */
4711 for (i
= 0; i
< sizeof (zfid
->zf_gen
); i
++)
4712 zfid
->zf_gen
[i
] = (uint8_t)(gen
>> (8 * i
));
4720 zfs_getsecattr(struct inode
*ip
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
)
4722 znode_t
*zp
= ITOZ(ip
);
4723 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4725 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
4729 error
= zfs_getacl(zp
, vsecp
, skipaclchk
, cr
);
4737 zfs_setsecattr(struct inode
*ip
, vsecattr_t
*vsecp
, int flag
, cred_t
*cr
)
4739 znode_t
*zp
= ITOZ(ip
);
4740 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4742 boolean_t skipaclchk
= (flag
& ATTR_NOACLCHECK
) ? B_TRUE
: B_FALSE
;
4743 zilog_t
*zilog
= zfsvfs
->z_log
;
4748 error
= zfs_setacl(zp
, vsecp
, skipaclchk
, cr
);
4750 if (zfsvfs
->z_os
->os_sync
== ZFS_SYNC_ALWAYS
)
4751 zil_commit(zilog
, 0);
4757 #ifdef HAVE_UIO_ZEROCOPY
4759 * Tunable, both must be a power of 2.
4761 * zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
4762 * zcr_blksz_max: if set to less than the file block size, allow loaning out of
4763 * an arcbuf for a partial block read
4765 int zcr_blksz_min
= (1 << 10); /* 1K */
4766 int zcr_blksz_max
= (1 << 17); /* 128K */
4770 zfs_reqzcbuf(struct inode
*ip
, enum uio_rw ioflag
, xuio_t
*xuio
, cred_t
*cr
)
4772 znode_t
*zp
= ITOZ(ip
);
4773 zfsvfs_t
*zfsvfs
= ITOZSB(ip
);
4774 int max_blksz
= zfsvfs
->z_max_blksz
;
4775 uio_t
*uio
= &xuio
->xu_uio
;
4776 ssize_t size
= uio
->uio_resid
;
4777 offset_t offset
= uio
->uio_loffset
;
4782 int preamble
, postamble
;
4784 if (xuio
->xu_type
!= UIOTYPE_ZEROCOPY
)
4785 return (SET_ERROR(EINVAL
));
4792 * Loan out an arc_buf for write if write size is bigger than
4793 * max_blksz, and the file's block size is also max_blksz.
4796 if (size
< blksz
|| zp
->z_blksz
!= blksz
) {
4798 return (SET_ERROR(EINVAL
));
4801 * Caller requests buffers for write before knowing where the
4802 * write offset might be (e.g. NFS TCP write).
4807 preamble
= P2PHASE(offset
, blksz
);
4809 preamble
= blksz
- preamble
;
4814 postamble
= P2PHASE(size
, blksz
);
4817 fullblk
= size
/ blksz
;
4818 (void) dmu_xuio_init(xuio
,
4819 (preamble
!= 0) + fullblk
+ (postamble
!= 0));
4822 * Have to fix iov base/len for partial buffers. They
4823 * currently represent full arc_buf's.
4826 /* data begins in the middle of the arc_buf */
4827 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
4830 (void) dmu_xuio_add(xuio
, abuf
,
4831 blksz
- preamble
, preamble
);
4834 for (i
= 0; i
< fullblk
; i
++) {
4835 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
4838 (void) dmu_xuio_add(xuio
, abuf
, 0, blksz
);
4842 /* data ends in the middle of the arc_buf */
4843 abuf
= dmu_request_arcbuf(sa_get_db(zp
->z_sa_hdl
),
4846 (void) dmu_xuio_add(xuio
, abuf
, 0, postamble
);
4851 * Loan out an arc_buf for read if the read size is larger than
4852 * the current file block size. Block alignment is not
4853 * considered. Partial arc_buf will be loaned out for read.
4855 blksz
= zp
->z_blksz
;
4856 if (blksz
< zcr_blksz_min
)
4857 blksz
= zcr_blksz_min
;
4858 if (blksz
> zcr_blksz_max
)
4859 blksz
= zcr_blksz_max
;
4860 /* avoid potential complexity of dealing with it */
4861 if (blksz
> max_blksz
) {
4863 return (SET_ERROR(EINVAL
));
4866 maxsize
= zp
->z_size
- uio
->uio_loffset
;
4872 return (SET_ERROR(EINVAL
));
4877 return (SET_ERROR(EINVAL
));
4880 uio
->uio_extflg
= UIO_XUIO
;
4881 XUIO_XUZC_RW(xuio
) = ioflag
;
4888 zfs_retzcbuf(struct inode
*ip
, xuio_t
*xuio
, cred_t
*cr
)
4892 int ioflag
= XUIO_XUZC_RW(xuio
);
4894 ASSERT(xuio
->xu_type
== UIOTYPE_ZEROCOPY
);
4896 i
= dmu_xuio_cnt(xuio
);
4898 abuf
= dmu_xuio_arcbuf(xuio
, i
);
4900 * if abuf == NULL, it must be a write buffer
4901 * that has been returned in zfs_write().
4904 dmu_return_arcbuf(abuf
);
4905 ASSERT(abuf
|| ioflag
== UIO_WRITE
);
4908 dmu_xuio_fini(xuio
);
4911 #endif /* HAVE_UIO_ZEROCOPY */
4913 #if defined(_KERNEL) && defined(HAVE_SPL)
4914 EXPORT_SYMBOL(zfs_open
);
4915 EXPORT_SYMBOL(zfs_close
);
4916 EXPORT_SYMBOL(zfs_read
);
4917 EXPORT_SYMBOL(zfs_write
);
4918 EXPORT_SYMBOL(zfs_access
);
4919 EXPORT_SYMBOL(zfs_lookup
);
4920 EXPORT_SYMBOL(zfs_create
);
4921 EXPORT_SYMBOL(zfs_tmpfile
);
4922 EXPORT_SYMBOL(zfs_remove
);
4923 EXPORT_SYMBOL(zfs_mkdir
);
4924 EXPORT_SYMBOL(zfs_rmdir
);
4925 EXPORT_SYMBOL(zfs_readdir
);
4926 EXPORT_SYMBOL(zfs_fsync
);
4927 EXPORT_SYMBOL(zfs_getattr
);
4928 EXPORT_SYMBOL(zfs_getattr_fast
);
4929 EXPORT_SYMBOL(zfs_setattr
);
4930 EXPORT_SYMBOL(zfs_rename
);
4931 EXPORT_SYMBOL(zfs_symlink
);
4932 EXPORT_SYMBOL(zfs_readlink
);
4933 EXPORT_SYMBOL(zfs_link
);
4934 EXPORT_SYMBOL(zfs_inactive
);
4935 EXPORT_SYMBOL(zfs_space
);
4936 EXPORT_SYMBOL(zfs_fid
);
4937 EXPORT_SYMBOL(zfs_getsecattr
);
4938 EXPORT_SYMBOL(zfs_setsecattr
);
4939 EXPORT_SYMBOL(zfs_getpage
);
4940 EXPORT_SYMBOL(zfs_putpage
);
4941 EXPORT_SYMBOL(zfs_dirty_inode
);
4942 EXPORT_SYMBOL(zfs_map
);
4945 module_param(zfs_delete_blocks
, ulong
, 0644);
4946 MODULE_PARM_DESC(zfs_delete_blocks
, "Delete files larger than N blocks async");
4947 module_param(zfs_read_chunk_size
, long, 0644);
4948 MODULE_PARM_DESC(zfs_read_chunk_size
, "Bytes to read per chunk");